hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
fd2ac270e818d87b9fd66751c56623cb83a341d4.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <string.h> #include <math.h> #include <hip/hip_runtime.h> #include <cudf/utilities/error.hpp> #include <rmm/rmm.h> #include <cudf/types.h> #include <cudf/legacy/column.hpp> #include <cuspatial/soa_readers.hpp> #include <utility/utility.hpp> namespace cuspatial { /** * @brief read uint32_t (unsigned integer with 32 bit fixed length) data from file as column * see soa_readers.hpp */ gdf_column read_uint32_soa(const char *filename) { gdf_column values; memset(&values,0,sizeof(gdf_column)); uint32_t *data=nullptr; size_t num_l=read_field<uint32_t>(filename,data); if(data==nullptr) return values; uint32_t* temp_val{nullptr}; RMM_TRY( RMM_ALLOC(&temp_val, num_l * sizeof(uint32_t), 0) ); hipStream_t stream{0}; CUDA_TRY( hipMemcpyAsync(temp_val, data, num_l * sizeof(uint32_t) , hipMemcpyHostToDevice,stream) ); gdf_column_view_augmented(&values, temp_val, nullptr, num_l, GDF_INT32, 0, gdf_dtype_extra_info{TIME_UNIT_NONE}, "id"); return values; }//read_uint32_soa }//cuspatial
fd2ac270e818d87b9fd66751c56623cb83a341d4.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <string.h> #include <math.h> #include <cuda_runtime.h> #include <cudf/utilities/error.hpp> #include <rmm/rmm.h> #include <cudf/types.h> #include <cudf/legacy/column.hpp> #include <cuspatial/soa_readers.hpp> #include <utility/utility.hpp> namespace cuspatial { /** * @brief read uint32_t (unsigned integer with 32 bit fixed length) data from file as column * see soa_readers.hpp */ gdf_column read_uint32_soa(const char *filename) { gdf_column values; memset(&values,0,sizeof(gdf_column)); uint32_t *data=nullptr; size_t num_l=read_field<uint32_t>(filename,data); if(data==nullptr) return values; uint32_t* temp_val{nullptr}; RMM_TRY( RMM_ALLOC(&temp_val, num_l * sizeof(uint32_t), 0) ); cudaStream_t stream{0}; CUDA_TRY( cudaMemcpyAsync(temp_val, data, num_l * sizeof(uint32_t) , cudaMemcpyHostToDevice,stream) ); gdf_column_view_augmented(&values, temp_val, nullptr, num_l, GDF_INT32, 0, gdf_dtype_extra_info{TIME_UNIT_NONE}, "id"); return values; }//read_uint32_soa }//cuspatial
a89b600883b396d30e587a5677e3ed421299e5f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #ifndef _PRESCAN_CU_ #define _PRESCAN_CU_ #include <assert.h> #include "model.h" #include <math.h> #include <gloop/benchmark.h> #define WARP_SIZE 32 #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 #define BLOCK_SIZE 256 #define NUM_WARPS (BLOCK_SIZE/WARP_SIZE) #define HISTS_PER_WARP 16 #define NUM_HISTOGRAMS (NUM_WARPS*HISTS_PER_WARP) #define THREADS_PER_HIST (WARP_SIZE/HISTS_PER_WARP) REAL** g_scanBlockSums; unsigned int g_numEltsAllocated = 0; unsigned int g_numLevelsAllocated = 0; __constant__ REAL dev_binb[NUM_BINS+1]; unsigned int NUM_SETS; unsigned int NUM_ELEMENTS; // create the bin boundaries void initBinB( struct pb_TimerSet *timers ) { REAL *binb = (REAL*)malloc((NUM_BINS+1)*sizeof(REAL)); for (int k = 0; k < NUM_BINS+1; k++) { binb[k] = cos(pow(10.0, (log10(min_arcmin) + k*1.0/bins_per_dec)) / 60.0*D2R); } gloop::Statistics::instance().switchTo<gloop::Statistics::Type::Copy>(); hipMemcpyToSymbol(dev_binb, binb, (NUM_BINS+1)*sizeof(REAL)); gloop::Statistics::instance().switchTo<gloop::Statistics::Type::Kernel>(); free(binb); } __global__ void gen_hists( hist_t* histograms, REAL* all_x_data, REAL* all_y_data, REAL* all_z_data, int NUM_SETS, int NUM_ELEMENTS ) { unsigned int bx = blockIdx.x; unsigned int tid = threadIdx.x; bool do_self = (bx < (NUM_SETS + 1)); REAL* data_x; REAL* data_y; REAL* data_z; REAL* random_x; REAL* random_y; REAL* random_z; __shared__ struct cartesian data_s[BLOCK_SIZE]; __shared__ unsigned int warp_hists[NUM_BINS][NUM_HISTOGRAMS]; // 640B <1k for(unsigned int w = 0; w < NUM_BINS*NUM_HISTOGRAMS; w += BLOCK_SIZE ) { if(w+tid < NUM_BINS*NUM_HISTOGRAMS) { warp_hists[(w+tid)/NUM_HISTOGRAMS][(w+tid)%NUM_HISTOGRAMS] = 0; } } // Get stuff into shared memory to kick off the loop. if( !do_self) { data_x = all_x_data; data_y = all_y_data; data_z = all_z_data; random_x = all_x_data + NUM_ELEMENTS * (bx - NUM_SETS); random_y = all_y_data + NUM_ELEMENTS * (bx - NUM_SETS); random_z = all_z_data + NUM_ELEMENTS * (bx - NUM_SETS); } else { random_x = all_x_data + NUM_ELEMENTS * (bx); random_y = all_y_data + NUM_ELEMENTS * (bx); random_z = all_z_data + NUM_ELEMENTS * (bx); data_x = random_x; data_y = random_y; data_z = random_z; } // Iterate over all data points for(unsigned int i = 0; i < NUM_ELEMENTS; i += BLOCK_SIZE ) { // load current set of data into shared memory // (total of BLOCK_SIZE points loaded) if( tid + i < NUM_ELEMENTS ) { // reading outside of bounds is a-okay data_s[tid] = (struct cartesian) {data_x[tid + i], data_y[tid + i], data_z[tid + i]}; } __syncthreads(); // Iterate over all random points for(unsigned int j = (do_self ? i+1 : 0); j < NUM_ELEMENTS; j += BLOCK_SIZE) { // load current random point values REAL random_x_s; REAL random_y_s; REAL random_z_s; if(tid + j < NUM_ELEMENTS) { random_x_s = random_x[tid + j]; random_y_s = random_y[tid + j]; random_z_s = random_z[tid + j]; } // Iterate for all elements of current set of data points // (BLOCK_SIZE iterations per thread) // Each thread calcs against 1 random point within cur set of random // (so BLOCK_SIZE threads covers all random points within cur set) for(unsigned int k = 0; (k < BLOCK_SIZE) && (k+i < NUM_ELEMENTS); k += 1) { // do actual calculations on the values: REAL distance = data_s[k].x * random_x_s + data_s[k].y * random_y_s + data_s[k].z * random_z_s; unsigned int bin_index; // run binary search to find bin_index unsigned int min = 0; unsigned int max = NUM_BINS; { unsigned int k2; while (max > min+1) { k2 = (min + max) / 2; if (distance >= dev_binb[k2]) max = k2; else min = k2; } bin_index = max - 1; } unsigned int warpnum = tid / (WARP_SIZE/HISTS_PER_WARP); if((distance < dev_binb[min]) && (distance >= dev_binb[max]) && (!do_self || (tid + j > i + k)) && (tid + j < NUM_ELEMENTS)) { atomicAdd(&warp_hists[bin_index][warpnum], 1U); } } } } // coalesce the histograms in a block unsigned int warp_index = tid & ( (NUM_HISTOGRAMS>>1) - 1); unsigned int bin_index = tid / (NUM_HISTOGRAMS>>1); for(unsigned int offset = NUM_HISTOGRAMS >> 1; offset > 0; offset >>= 1) { for(unsigned int bin_base = 0; bin_base < NUM_BINS; bin_base += BLOCK_SIZE/ (NUM_HISTOGRAMS>>1)) { __syncthreads(); if(warp_index < offset && bin_base+bin_index < NUM_BINS ) { unsigned long sum = warp_hists[bin_base + bin_index][warp_index] + warp_hists[bin_base + bin_index][warp_index+offset]; warp_hists[bin_base + bin_index][warp_index] = sum; } } } __syncthreads(); // Put the results back in the real histogram // warp_hists[x][0] holds sum of all locations of bin x hist_t* hist_base = histograms + NUM_BINS * bx; if(tid < NUM_BINS) { hist_base[tid] = warp_hists[tid][0]; } } void TPACF(hist_t * histograms, REAL* d_x_data, REAL* d_y_data, REAL* d_z_data) { dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(NUM_SETS*2 + 1); // gloop::Benchmark benchmark; // benchmark.begin(); hipLaunchKernelGGL(( gen_hists) , dim3(dimGrid), dim3(dimBlock) , 0, 0, histograms, d_x_data, d_y_data, d_z_data, NUM_SETS, NUM_ELEMENTS); hipDeviceSynchronize(); // benchmark.end(); // benchmark.report(); } // **===-----------------------------------------------------------===** #endif // _PRESCAN_CU_
a89b600883b396d30e587a5677e3ed421299e5f9.cu
/*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #ifndef _PRESCAN_CU_ #define _PRESCAN_CU_ #include <assert.h> #include "model.h" #include <math.h> #include <gloop/benchmark.h> #define WARP_SIZE 32 #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 #define BLOCK_SIZE 256 #define NUM_WARPS (BLOCK_SIZE/WARP_SIZE) #define HISTS_PER_WARP 16 #define NUM_HISTOGRAMS (NUM_WARPS*HISTS_PER_WARP) #define THREADS_PER_HIST (WARP_SIZE/HISTS_PER_WARP) REAL** g_scanBlockSums; unsigned int g_numEltsAllocated = 0; unsigned int g_numLevelsAllocated = 0; __constant__ REAL dev_binb[NUM_BINS+1]; unsigned int NUM_SETS; unsigned int NUM_ELEMENTS; // create the bin boundaries void initBinB( struct pb_TimerSet *timers ) { REAL *binb = (REAL*)malloc((NUM_BINS+1)*sizeof(REAL)); for (int k = 0; k < NUM_BINS+1; k++) { binb[k] = cos(pow(10.0, (log10(min_arcmin) + k*1.0/bins_per_dec)) / 60.0*D2R); } gloop::Statistics::instance().switchTo<gloop::Statistics::Type::Copy>(); cudaMemcpyToSymbol(dev_binb, binb, (NUM_BINS+1)*sizeof(REAL)); gloop::Statistics::instance().switchTo<gloop::Statistics::Type::Kernel>(); free(binb); } __global__ void gen_hists( hist_t* histograms, REAL* all_x_data, REAL* all_y_data, REAL* all_z_data, int NUM_SETS, int NUM_ELEMENTS ) { unsigned int bx = blockIdx.x; unsigned int tid = threadIdx.x; bool do_self = (bx < (NUM_SETS + 1)); REAL* data_x; REAL* data_y; REAL* data_z; REAL* random_x; REAL* random_y; REAL* random_z; __shared__ struct cartesian data_s[BLOCK_SIZE]; __shared__ unsigned int warp_hists[NUM_BINS][NUM_HISTOGRAMS]; // 640B <1k for(unsigned int w = 0; w < NUM_BINS*NUM_HISTOGRAMS; w += BLOCK_SIZE ) { if(w+tid < NUM_BINS*NUM_HISTOGRAMS) { warp_hists[(w+tid)/NUM_HISTOGRAMS][(w+tid)%NUM_HISTOGRAMS] = 0; } } // Get stuff into shared memory to kick off the loop. if( !do_self) { data_x = all_x_data; data_y = all_y_data; data_z = all_z_data; random_x = all_x_data + NUM_ELEMENTS * (bx - NUM_SETS); random_y = all_y_data + NUM_ELEMENTS * (bx - NUM_SETS); random_z = all_z_data + NUM_ELEMENTS * (bx - NUM_SETS); } else { random_x = all_x_data + NUM_ELEMENTS * (bx); random_y = all_y_data + NUM_ELEMENTS * (bx); random_z = all_z_data + NUM_ELEMENTS * (bx); data_x = random_x; data_y = random_y; data_z = random_z; } // Iterate over all data points for(unsigned int i = 0; i < NUM_ELEMENTS; i += BLOCK_SIZE ) { // load current set of data into shared memory // (total of BLOCK_SIZE points loaded) if( tid + i < NUM_ELEMENTS ) { // reading outside of bounds is a-okay data_s[tid] = (struct cartesian) {data_x[tid + i], data_y[tid + i], data_z[tid + i]}; } __syncthreads(); // Iterate over all random points for(unsigned int j = (do_self ? i+1 : 0); j < NUM_ELEMENTS; j += BLOCK_SIZE) { // load current random point values REAL random_x_s; REAL random_y_s; REAL random_z_s; if(tid + j < NUM_ELEMENTS) { random_x_s = random_x[tid + j]; random_y_s = random_y[tid + j]; random_z_s = random_z[tid + j]; } // Iterate for all elements of current set of data points // (BLOCK_SIZE iterations per thread) // Each thread calcs against 1 random point within cur set of random // (so BLOCK_SIZE threads covers all random points within cur set) for(unsigned int k = 0; (k < BLOCK_SIZE) && (k+i < NUM_ELEMENTS); k += 1) { // do actual calculations on the values: REAL distance = data_s[k].x * random_x_s + data_s[k].y * random_y_s + data_s[k].z * random_z_s; unsigned int bin_index; // run binary search to find bin_index unsigned int min = 0; unsigned int max = NUM_BINS; { unsigned int k2; while (max > min+1) { k2 = (min + max) / 2; if (distance >= dev_binb[k2]) max = k2; else min = k2; } bin_index = max - 1; } unsigned int warpnum = tid / (WARP_SIZE/HISTS_PER_WARP); if((distance < dev_binb[min]) && (distance >= dev_binb[max]) && (!do_self || (tid + j > i + k)) && (tid + j < NUM_ELEMENTS)) { atomicAdd(&warp_hists[bin_index][warpnum], 1U); } } } } // coalesce the histograms in a block unsigned int warp_index = tid & ( (NUM_HISTOGRAMS>>1) - 1); unsigned int bin_index = tid / (NUM_HISTOGRAMS>>1); for(unsigned int offset = NUM_HISTOGRAMS >> 1; offset > 0; offset >>= 1) { for(unsigned int bin_base = 0; bin_base < NUM_BINS; bin_base += BLOCK_SIZE/ (NUM_HISTOGRAMS>>1)) { __syncthreads(); if(warp_index < offset && bin_base+bin_index < NUM_BINS ) { unsigned long sum = warp_hists[bin_base + bin_index][warp_index] + warp_hists[bin_base + bin_index][warp_index+offset]; warp_hists[bin_base + bin_index][warp_index] = sum; } } } __syncthreads(); // Put the results back in the real histogram // warp_hists[x][0] holds sum of all locations of bin x hist_t* hist_base = histograms + NUM_BINS * bx; if(tid < NUM_BINS) { hist_base[tid] = warp_hists[tid][0]; } } void TPACF(hist_t * histograms, REAL* d_x_data, REAL* d_y_data, REAL* d_z_data) { dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(NUM_SETS*2 + 1); // gloop::Benchmark benchmark; // benchmark.begin(); gen_hists <<< dimGrid, dimBlock >>> ( histograms, d_x_data, d_y_data, d_z_data, NUM_SETS, NUM_ELEMENTS); cudaDeviceSynchronize(); // benchmark.end(); // benchmark.report(); } // **===-----------------------------------------------------------===** #endif // _PRESCAN_CU_
7b91b0d064f8000d500b1727c850320051ccef71.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <string> #include "timer.hpp" //for (; p<=3; p++); // cout << *p; // double option1() { // }; int main(void) { int option = 1; int N = 1000; int num_tests = 3; int i = 0; Timer timer, timer2; double total_time = 0.0; double runtime = 0.0, max_runtime = 0.0, min_runtime = 100.0, hosttime = -1.; double *x_check, *y_check; x_check = (double*)malloc(N*sizeof(double)); y_check = (double*)malloc(N*sizeof(double)); for (; i<num_tests; i++) { double *x, *y, *d_x, *d_y; x = (double*)malloc(N*sizeof(double)); y = (double*)malloc(N*sizeof(double)); hipMalloc(&d_x, N*sizeof(double)); hipMalloc(&d_y, N*sizeof(double)); timer.reset(); if (option == 1){ timer2.reset(); int j = 0; for (double *p = x; p <= x + sizeof(x); p++) { *p = j++; } for (double *p = y; p <= y + sizeof(y); p++) { *p = j--; } hosttime = timer2.get(); hipMemcpy(d_x, x, N*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_y, y, N*sizeof(double), hipMemcpyHostToDevice); hipDeviceSynchronize(); } runtime = timer.get(); std::cout << "(" << i+1 << ") Elapsed (total/host): " << runtime << " s/ " << hosttime << " s" << std::endl; total_time += runtime; if (i==0){ int cnt = 0; for (double *p = x_check; p <= x_check + sizeof(x_check); p++) { *p = *(x + cnt++); } cnt = 0; for (double *p = y_check; p <= y_check + sizeof(y_check); p++) { *p = *(y + cnt++); } } free(x); free(y); hipFree(d_x); hipFree(d_y); if (runtime > max_runtime) { max_runtime = runtime; } if (runtime < min_runtime) { min_runtime = runtime; } if (total_time > 1.) { break; } } std::cout << std::endl << "Results after " << i << " tests:" << std::endl; std::cout << "Total runtime: " << total_time << std::endl; std::cout << "Average runtime; " << total_time/i << std::endl; std::cout << "Maximum runtime: " << max_runtime << std::endl; std::cout << "Minimum runtime: " << min_runtime << std::endl; std::cout << "\n--- Checking arrays ---" << std::endl; std::cout << " i | x | y " << std::endl; for (i=0; i < 3; i++){ std::cout << " " << i << " | " << *(x_check+i) << " | " << *(y_check+i) << std::endl; } free(x_check); free(y_check); return EXIT_SUCCESS; }
7b91b0d064f8000d500b1727c850320051ccef71.cu
#include <iostream> #include <string> #include "timer.hpp" //for (; p<=3; p++); // cout << *p; // double option1() { // }; int main(void) { int option = 1; int N = 1000; int num_tests = 3; int i = 0; Timer timer, timer2; double total_time = 0.0; double runtime = 0.0, max_runtime = 0.0, min_runtime = 100.0, hosttime = -1.; double *x_check, *y_check; x_check = (double*)malloc(N*sizeof(double)); y_check = (double*)malloc(N*sizeof(double)); for (; i<num_tests; i++) { double *x, *y, *d_x, *d_y; x = (double*)malloc(N*sizeof(double)); y = (double*)malloc(N*sizeof(double)); cudaMalloc(&d_x, N*sizeof(double)); cudaMalloc(&d_y, N*sizeof(double)); timer.reset(); if (option == 1){ timer2.reset(); int j = 0; for (double *p = x; p <= x + sizeof(x); p++) { *p = j++; } for (double *p = y; p <= y + sizeof(y); p++) { *p = j--; } hosttime = timer2.get(); cudaMemcpy(d_x, x, N*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, N*sizeof(double), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); } runtime = timer.get(); std::cout << "(" << i+1 << ") Elapsed (total/host): " << runtime << " s/ " << hosttime << " s" << std::endl; total_time += runtime; if (i==0){ int cnt = 0; for (double *p = x_check; p <= x_check + sizeof(x_check); p++) { *p = *(x + cnt++); } cnt = 0; for (double *p = y_check; p <= y_check + sizeof(y_check); p++) { *p = *(y + cnt++); } } free(x); free(y); cudaFree(d_x); cudaFree(d_y); if (runtime > max_runtime) { max_runtime = runtime; } if (runtime < min_runtime) { min_runtime = runtime; } if (total_time > 1.) { break; } } std::cout << std::endl << "Results after " << i << " tests:" << std::endl; std::cout << "Total runtime: " << total_time << std::endl; std::cout << "Average runtime; " << total_time/i << std::endl; std::cout << "Maximum runtime: " << max_runtime << std::endl; std::cout << "Minimum runtime: " << min_runtime << std::endl; std::cout << "\n--- Checking arrays ---" << std::endl; std::cout << " i | x | y " << std::endl; for (i=0; i < 3; i++){ std::cout << " " << i << " | " << *(x_check+i) << " | " << *(y_check+i) << std::endl; } free(x_check); free(y_check); return EXIT_SUCCESS; }
e39f58e1e63581c2544cc43fbbc27229c792e072.hip
// !!! This is a file automatically generated by hipify!!! // // include files // #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> //#include <cutil_inline.h> #include "hip/hip_runtime.h" //#include "MyFirst_kernel.cu" __global__ void my_first_kernel(float *x) { // Uncomment line below and define integer "tid" as global index to vector "x" int tid = blockIdx.x * blockDim.x + threadIdx.x; // Uncomment line below and define x[tid] to be equal to the thread index x[tid] = (float)threadIdx.x; } // // main host code // int main(int argc, char **argv) { float *h_x, *d_x; int nblocks, nthreads, nsize, n; // set number of blocks, and threads per block nblocks = 2; nthreads = 16; nsize = nblocks*nthreads; // allocate memory for array h_x = (float *)malloc(nsize*sizeof(float)); hipMalloc((void **)&d_x, nsize*sizeof(float)); // execute kernel hipLaunchKernelGGL(( my_first_kernel), dim3(nblocks),dim3(nthreads), 0, 0, d_x); // copy results from device to host hipMemcpy(h_x,d_x,nsize*sizeof(float),hipMemcpyDeviceToHost); // print results for (n=0; n<nsize; n++) printf(" n, x = %d %f \n",n,h_x[n]); // check results float sumcheck = 0.; float sumcheckcorrect = 0.; for (int i = 0; i < nblocks * nthreads; ++i) { sumcheck += h_x[i]; } for (int j=0; j<nthreads; ++j) { sumcheckcorrect += j; } sumcheckcorrect *= 2; if (fabs(sumcheck-sumcheckcorrect)<1e-6) { printf("PASSED!\n"); } else { printf("FAILED!\n"); } // free memory hipFree(d_x); free(h_x); return 0; }
e39f58e1e63581c2544cc43fbbc27229c792e072.cu
// // include files // #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> //#include <cutil_inline.h> #include "cuda_runtime.h" //#include "MyFirst_kernel.cu" __global__ void my_first_kernel(float *x) { // Uncomment line below and define integer "tid" as global index to vector "x" int tid = blockIdx.x * blockDim.x + threadIdx.x; // Uncomment line below and define x[tid] to be equal to the thread index x[tid] = (float)threadIdx.x; } // // main host code // int main(int argc, char **argv) { float *h_x, *d_x; int nblocks, nthreads, nsize, n; // set number of blocks, and threads per block nblocks = 2; nthreads = 16; nsize = nblocks*nthreads; // allocate memory for array h_x = (float *)malloc(nsize*sizeof(float)); cudaMalloc((void **)&d_x, nsize*sizeof(float)); // execute kernel my_first_kernel<<<nblocks,nthreads>>>(d_x); // copy results from device to host cudaMemcpy(h_x,d_x,nsize*sizeof(float),cudaMemcpyDeviceToHost); // print results for (n=0; n<nsize; n++) printf(" n, x = %d %f \n",n,h_x[n]); // check results float sumcheck = 0.; float sumcheckcorrect = 0.; for (int i = 0; i < nblocks * nthreads; ++i) { sumcheck += h_x[i]; } for (int j=0; j<nthreads; ++j) { sumcheckcorrect += j; } sumcheckcorrect *= 2; if (fabs(sumcheck-sumcheckcorrect)<1e-6) { printf("PASSED!\n"); } else { printf("FAILED!\n"); } // free memory cudaFree(d_x); free(h_x); return 0; }
9d5c7f12c1a9383c5497810d0432a8a3e5f0ed57.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "emptyKernel.h" /* __global__ void foo(int *x, int *y) { int a[2] = {1,2}; #ifndef __CUDA_ARCH__ #warning Host mode auto [*x,*y] = a; #else *x = a[0]; *y=a[1]; #endif } */ void a() {}
9d5c7f12c1a9383c5497810d0432a8a3e5f0ed57.cu
#include "emptyKernel.h" /* __global__ void foo(int *x, int *y) { int a[2] = {1,2}; #ifndef __CUDA_ARCH__ #warning Host mode auto [*x,*y] = a; #else *x = a[0]; *y=a[1]; #endif } */ void a() {}
d8a4d1ca6cc23016ce2312c7a11098c5ad41c077.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include <iostream> #include <sys/time.h> #include "Cube_Unity.h" #include "bmp.h" #define PI 3.1415 __global__ void render(Cube_Unity *Cube, Cube_Unity *Cube_Perspective){ int Size = 100; int width = 2000; int OffSet = 3000; double startAngulo = 1; double offset = startAngulo; auto k = blockIdx.x; int i = int(threadIdx.x/10); for(auto j = (threadIdx.x*10)%100; j < ((threadIdx.x*10)%100)+10; j++){ Cube[i + Size * (j + Size * k)].z += offset; } double angulo = startAngulo * PI/180; double matrix_x[4][4] = { cos(angulo), 0, sin(angulo), 0, 0, 1, 0, 0, -sin(angulo), 0, cos(angulo), 0, 0, 0, 0, 1}; double new_x, new_y, new_z; for(auto j = (threadIdx.x*10)%100; j < ((threadIdx.x*10)%100)+10; j++){ new_x = Cube[i + Size * (j + Size * k)].x * matrix_x[0][0] + Cube[i + Size * (j + Size * k)].y * matrix_x[0][1] + Cube[i + Size * (j + Size * k)].z * matrix_x[0][2]; new_y = Cube[i + Size * (j + Size * k)].x * matrix_x[1][0] + Cube[i + Size * (j + Size * k)].y * matrix_x[1][1] + Cube[i + Size * (j + Size * k)].z * matrix_x[1][2]; new_z = Cube[i + Size * (j + Size * k)].x * matrix_x[2][0] + Cube[i + Size * (j + Size * k)].y * matrix_x[2][1] + Cube[i + Size * (j + Size * k)].z * matrix_x[2][2]; Cube[i + Size * (j + Size * k)].x = new_x; Cube[i + Size * (j + Size * k)].y = new_y; Cube[i + Size * (j + Size * k)].z = new_z; } angulo = startAngulo * PI/180; double matrix_y[4][4] = {1, 0, 0, 0, 0, cos(angulo), -sin(angulo), 0, 0, sin(angulo), cos(angulo), 0, 0, 0, 0, 1}; for(auto j = (threadIdx.x*10)%100; j < ((threadIdx.x*10)%100)+10; j++){ new_x = Cube[i + Size * (j + Size * k)].x * matrix_y[0][0] + Cube[i + Size * (j + Size * k)].y * matrix_y[0][1] + Cube[i + Size * (j + Size * k)].z * matrix_y[0][2]; new_y = Cube[i + Size * (j + Size * k)].x * matrix_y[1][0] + Cube[i + Size * (j + Size * k)].y * matrix_y[1][1] + Cube[i + Size * (j + Size * k)].z * matrix_y[1][2]; new_z = Cube[i + Size * (j + Size * k)].x * matrix_y[2][0] + Cube[i + Size * (j + Size * k)].y * matrix_y[2][1] + Cube[i + Size * (j + Size * k)].z * matrix_y[2][2]; Cube[i + Size * (j + Size * k)].x = new_x; Cube[i + Size * (j + Size * k)].y = new_y; Cube[i + Size * (j + Size * k)].z = new_z; } offset = OffSet; for(auto j = (threadIdx.x*10)%100; j < ((threadIdx.x*10)%100)+10; j++){ Cube[i + Size * (j + Size * k)].z += offset; } double dist_to_screen = 300; for(auto j = (threadIdx.x*10)%100; j < ((threadIdx.x*10)%100)+10; j++){ new_x = Cube[i + Size * (j + Size * k)].x*dist_to_screen/Cube[i + Size * (j + Size * k)].z+width/2; new_y = Cube[i + Size * (j + Size * k)].y*dist_to_screen/Cube[i + Size * (j + Size * k)].z+width/2; Cube_Perspective[i + Size * (j + Size * k)].x = new_x; Cube_Perspective[i + Size * (j + Size * k)].y = new_y; Cube_Perspective[i + Size * (j + Size * k)].z = 0; Cube_Perspective[i + Size * (j + Size * k)].cor = Cube[i + Size * (j + Size * k)].cor; } offset = -OffSet; for(auto j = (threadIdx.x*10)%100; j < ((threadIdx.x*10)%100)+10; j++){ Cube[i + Size * (j + Size * k)].z += offset; } } int main(void) { Cube_Unity *Cube; Cube_Unity *Cube_Perspective; int Size = 100; int width = 2000; int frames = 1; double spacing_factor = 40; double inix, iniy, iniz; double atualx, atualy, atualz; double spacing; inix = iniy = iniz = atualx = atualy = atualz = -double(Size)/2 * spacing_factor; spacing = -inix*2 / double(Size); hipMallocManaged(&Cube, Size*Size*Size*sizeof(Cube_Unity)); hipMallocManaged(&Cube_Perspective, Size*Size*Size*sizeof(Cube_Unity)); for(auto i = 0; i < Size; i++){ for(auto j = 0; j < Size; j++){ for(auto k = 0; k < Size; k++){ Cube[i + Size * (j + Size * k)].set_pos(atualx,atualy,atualz, 255); atualz += spacing; } atualz = iniz; atualy += spacing; } atualy = iniy; atualx += spacing; } // allocate picture array unsigned char* pic = new unsigned char[frames * width * width]; for (int frame = 0; frame < frames; frame++) { for (int row = 0; row < width; row++) { for (int col = 0; col < width; col++) { unsigned char color = (unsigned char) 255; pic[frame * width * width + row * width + col] = (unsigned char) color; } } } int max_threads, max_blocks; max_threads = 1000; max_blocks = Size; // start time timeval start, end; gettimeofday(&start, NULL); for(int frame = 0; frame < frames; frame++){ hipLaunchKernelGGL(( render), dim3(max_blocks), dim3(max_threads), 0, 0, Cube, Cube_Perspective); //render(); hipDeviceSynchronize(); for(auto i = 0; i < Size; i++){ for(auto j = 0; j < Size; j++){ for(auto k = 0; k < Size; k++){ int row = (int)Cube_Perspective[i + Size * (j + Size * k)].x; int col = (int)Cube_Perspective[i + Size * (j + Size * k)].y; if(row >= 0 && row < width && col >= 0 && col < width){ unsigned char color = (unsigned char) 0; pic[frame * width * width + row * width + col] = (unsigned char) color; } } } } } gettimeofday(&end, NULL); double runtime = end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0; std::cout << "compute time: " << runtime << " s\n"; for (int frame = 0; frame < frames; frame++) { char name[32]; sprintf(name, "cube%d.bmp", frame + 1000); writeBMP(width, width, &pic[frame * width * width], name); } }
d8a4d1ca6cc23016ce2312c7a11098c5ad41c077.cu
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <iostream> #include <sys/time.h> #include "Cube_Unity.h" #include "bmp.h" #define PI 3.1415 __global__ void render(Cube_Unity *Cube, Cube_Unity *Cube_Perspective){ int Size = 100; int width = 2000; int OffSet = 3000; double startAngulo = 1; double offset = startAngulo; auto k = blockIdx.x; int i = int(threadIdx.x/10); for(auto j = (threadIdx.x*10)%100; j < ((threadIdx.x*10)%100)+10; j++){ Cube[i + Size * (j + Size * k)].z += offset; } double angulo = startAngulo * PI/180; double matrix_x[4][4] = { cos(angulo), 0, sin(angulo), 0, 0, 1, 0, 0, -sin(angulo), 0, cos(angulo), 0, 0, 0, 0, 1}; double new_x, new_y, new_z; for(auto j = (threadIdx.x*10)%100; j < ((threadIdx.x*10)%100)+10; j++){ new_x = Cube[i + Size * (j + Size * k)].x * matrix_x[0][0] + Cube[i + Size * (j + Size * k)].y * matrix_x[0][1] + Cube[i + Size * (j + Size * k)].z * matrix_x[0][2]; new_y = Cube[i + Size * (j + Size * k)].x * matrix_x[1][0] + Cube[i + Size * (j + Size * k)].y * matrix_x[1][1] + Cube[i + Size * (j + Size * k)].z * matrix_x[1][2]; new_z = Cube[i + Size * (j + Size * k)].x * matrix_x[2][0] + Cube[i + Size * (j + Size * k)].y * matrix_x[2][1] + Cube[i + Size * (j + Size * k)].z * matrix_x[2][2]; Cube[i + Size * (j + Size * k)].x = new_x; Cube[i + Size * (j + Size * k)].y = new_y; Cube[i + Size * (j + Size * k)].z = new_z; } angulo = startAngulo * PI/180; double matrix_y[4][4] = {1, 0, 0, 0, 0, cos(angulo), -sin(angulo), 0, 0, sin(angulo), cos(angulo), 0, 0, 0, 0, 1}; for(auto j = (threadIdx.x*10)%100; j < ((threadIdx.x*10)%100)+10; j++){ new_x = Cube[i + Size * (j + Size * k)].x * matrix_y[0][0] + Cube[i + Size * (j + Size * k)].y * matrix_y[0][1] + Cube[i + Size * (j + Size * k)].z * matrix_y[0][2]; new_y = Cube[i + Size * (j + Size * k)].x * matrix_y[1][0] + Cube[i + Size * (j + Size * k)].y * matrix_y[1][1] + Cube[i + Size * (j + Size * k)].z * matrix_y[1][2]; new_z = Cube[i + Size * (j + Size * k)].x * matrix_y[2][0] + Cube[i + Size * (j + Size * k)].y * matrix_y[2][1] + Cube[i + Size * (j + Size * k)].z * matrix_y[2][2]; Cube[i + Size * (j + Size * k)].x = new_x; Cube[i + Size * (j + Size * k)].y = new_y; Cube[i + Size * (j + Size * k)].z = new_z; } offset = OffSet; for(auto j = (threadIdx.x*10)%100; j < ((threadIdx.x*10)%100)+10; j++){ Cube[i + Size * (j + Size * k)].z += offset; } double dist_to_screen = 300; for(auto j = (threadIdx.x*10)%100; j < ((threadIdx.x*10)%100)+10; j++){ new_x = Cube[i + Size * (j + Size * k)].x*dist_to_screen/Cube[i + Size * (j + Size * k)].z+width/2; new_y = Cube[i + Size * (j + Size * k)].y*dist_to_screen/Cube[i + Size * (j + Size * k)].z+width/2; Cube_Perspective[i + Size * (j + Size * k)].x = new_x; Cube_Perspective[i + Size * (j + Size * k)].y = new_y; Cube_Perspective[i + Size * (j + Size * k)].z = 0; Cube_Perspective[i + Size * (j + Size * k)].cor = Cube[i + Size * (j + Size * k)].cor; } offset = -OffSet; for(auto j = (threadIdx.x*10)%100; j < ((threadIdx.x*10)%100)+10; j++){ Cube[i + Size * (j + Size * k)].z += offset; } } int main(void) { Cube_Unity *Cube; Cube_Unity *Cube_Perspective; int Size = 100; int width = 2000; int frames = 1; double spacing_factor = 40; double inix, iniy, iniz; double atualx, atualy, atualz; double spacing; inix = iniy = iniz = atualx = atualy = atualz = -double(Size)/2 * spacing_factor; spacing = -inix*2 / double(Size); cudaMallocManaged(&Cube, Size*Size*Size*sizeof(Cube_Unity)); cudaMallocManaged(&Cube_Perspective, Size*Size*Size*sizeof(Cube_Unity)); for(auto i = 0; i < Size; i++){ for(auto j = 0; j < Size; j++){ for(auto k = 0; k < Size; k++){ Cube[i + Size * (j + Size * k)].set_pos(atualx,atualy,atualz, 255); atualz += spacing; } atualz = iniz; atualy += spacing; } atualy = iniy; atualx += spacing; } // allocate picture array unsigned char* pic = new unsigned char[frames * width * width]; for (int frame = 0; frame < frames; frame++) { for (int row = 0; row < width; row++) { for (int col = 0; col < width; col++) { unsigned char color = (unsigned char) 255; pic[frame * width * width + row * width + col] = (unsigned char) color; } } } int max_threads, max_blocks; max_threads = 1000; max_blocks = Size; // start time timeval start, end; gettimeofday(&start, NULL); for(int frame = 0; frame < frames; frame++){ render<<<max_blocks, max_threads>>>(Cube, Cube_Perspective); //render(); cudaDeviceSynchronize(); for(auto i = 0; i < Size; i++){ for(auto j = 0; j < Size; j++){ for(auto k = 0; k < Size; k++){ int row = (int)Cube_Perspective[i + Size * (j + Size * k)].x; int col = (int)Cube_Perspective[i + Size * (j + Size * k)].y; if(row >= 0 && row < width && col >= 0 && col < width){ unsigned char color = (unsigned char) 0; pic[frame * width * width + row * width + col] = (unsigned char) color; } } } } } gettimeofday(&end, NULL); double runtime = end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0; std::cout << "compute time: " << runtime << " s\n"; for (int frame = 0; frame < frames; frame++) { char name[32]; sprintf(name, "cube%d.bmp", frame + 1000); writeBMP(width, width, &pic[frame * width * width], name); } }
94839b42ec2d1b6b0f602cbbe26c06947aa9f753.hip
// !!! This is a file automatically generated by hipify!!! #include "synchronoussgd.h" void crossbowKernelOptimiserSynchronousSGD (crossbowStreamP s) { float minusone = -1; /* Number of model (and gradient) parameters */ int elements = s->model->elements; /* Get replica model data buffer */ crossbowDataBufferP model = s->model->data; /* Get replica's gradient data buffer */ crossbowDataBufferP gradient = s->model->gradient; /* Get base model's gradient data buffer */ crossbowDataBufferP theGradient = s->theModel->gradient; /* Apply weight decay to gradient, if set */ if (s->model->conf->weightDecay > 0) { checkCublasStatus(hipblasSaxpy (s->cublasHandle[s->op->branch], elements, &(s->model->conf->weightDecay), (float *) (model->dev), 1, (float *) (gradient->dev), 1)); } /* For debugging purposes * * checkCudaErrors(hipDeviceSynchronize()); * * float checksum = crossbowDataBufferComputeCheckSum(gradient, 0, s->model->bytes); * info("Gradient checksum of task %d is %.5f\n", s->task, checksum); */ /* Record event that gradient is ready to be used */ checkCudaErrors(hipEventRecord (s->model->client, s->stream[s->op->branch])); /* Accumulate replica's gradient to base model's gradient, applying learning rate */ checkCudaErrors(hipStreamWaitEvent(s->modelSynchronisationStream, s->model->client, 0)); if (s->model->conf->momentumMethod == NESTEROV) { err("Nesterov's momentum has been disabled\n"); } float rate = minusone * crossbowSolverConfGetLearningRate (s->model->conf, s->task); checkCublasStatus(hipblasSaxpy (s->modelSynchronisationHandle, elements, &(rate), (float *) (gradient->dev), 1, (float *) (theGradient->dev), 1)); /* Record event that replica model gradient is no longer required */ checkCudaErrors(hipEventRecord(s->model->server, s->modelSynchronisationStream)); return; }
94839b42ec2d1b6b0f602cbbe26c06947aa9f753.cu
#include "synchronoussgd.h" void crossbowKernelOptimiserSynchronousSGD (crossbowStreamP s) { float minusone = -1; /* Number of model (and gradient) parameters */ int elements = s->model->elements; /* Get replica model data buffer */ crossbowDataBufferP model = s->model->data; /* Get replica's gradient data buffer */ crossbowDataBufferP gradient = s->model->gradient; /* Get base model's gradient data buffer */ crossbowDataBufferP theGradient = s->theModel->gradient; /* Apply weight decay to gradient, if set */ if (s->model->conf->weightDecay > 0) { checkCublasStatus(cublasSaxpy (s->cublasHandle[s->op->branch], elements, &(s->model->conf->weightDecay), (float *) (model->dev), 1, (float *) (gradient->dev), 1)); } /* For debugging purposes * * checkCudaErrors(cudaDeviceSynchronize()); * * float checksum = crossbowDataBufferComputeCheckSum(gradient, 0, s->model->bytes); * info("Gradient checksum of task %d is %.5f\n", s->task, checksum); */ /* Record event that gradient is ready to be used */ checkCudaErrors(cudaEventRecord (s->model->client, s->stream[s->op->branch])); /* Accumulate replica's gradient to base model's gradient, applying learning rate */ checkCudaErrors(cudaStreamWaitEvent(s->modelSynchronisationStream, s->model->client, 0)); if (s->model->conf->momentumMethod == NESTEROV) { err("Nesterov's momentum has been disabled\n"); } float rate = minusone * crossbowSolverConfGetLearningRate (s->model->conf, s->task); checkCublasStatus(cublasSaxpy (s->modelSynchronisationHandle, elements, &(rate), (float *) (gradient->dev), 1, (float *) (theGradient->dev), 1)); /* Record event that replica model gradient is no longer required */ checkCudaErrors(cudaEventRecord(s->model->server, s->modelSynchronisationStream)); return; }
e414c579aeca4d6b9c2f1ec8d04714eb458b1313.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "utils/utils.h" #define NMAX (1 << 20) // ~TODO 3~ // Modify the kernel below such as each element of the // array will be now equal to 0 if it is an even number // or 1, if it is an odd number __global__ void kernel_parity_id(int *a, int N) { unsigned int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < N) { a[i] %= 2; } } // ~TODO 4~ // Modify the kernel below such as each element will // be equal to the BLOCK ID this computation takes // place. __global__ void kernel_block_id(int *a, int N) { unsigned int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < N) { a[i] = blockIdx.x; } } // ~TODO 5~ // Modify the kernel below such as each element will // be equal to the THREAD ID this computation takes // place. __global__ void kernel_thread_id(int *a, int N) { unsigned int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < N) { a[i] = threadIdx.x; } } int main(void) { int nDevices; hipError_t err; int i; int* host_a; int* device_a; // Get the number of CUDA-capable GPU(s) hipGetDeviceCount(&nDevices); // ~TODO 1~ // For each device, show some details in the format below, // then set as active device the first one (assuming there // is at least CUDA-capable device). Pay attention to the // type of the fields in the hipDeviceProp_t structure. // // Device number: <i> // Device name: <name> // Total memory: <mem> // Memory Clock Rate (KHz): <mcr> // Memory Bus Width (bits): <mbw> // // Hint: look for hipGetDeviceProperties and hipSetDevice in // the Cuda Toolkit Documentation. hipDeviceProp_t prop; for (i = 0; i < nDevices; ++i) { hipGetDeviceProperties(&prop, i); printf("Device number: %d\n", i); printf("\tDevice name: %s\n", prop.name); printf("\tTotal memory (GB): %.2lf\n", (double)(prop.totalGlobalMem) / (double)(1000 * 1000 * 1000 )); printf("\tMemory Clock Rate (KHz): %d\n", prop.memoryClockRate / 1000); printf("\tMemory Bus Width (bits): %d\n", prop.memoryBusWidth); } // ~TODO 2~ // With information from example_2.cu, allocate an array with // integers (where a[i] = i). Then, modify the three kernels // above and execute them using 4 blocks, each with 4 threads. // Hint: num_elements = block_size * block_no (see example_2) // // You can use the fill_array_int(int *a, int n) function (from utils) // to fill your array as many times you want. host_a = (int*)malloc(NMAX * sizeof(*host_a)); DIE(host_a == NULL, "malloc"); err = hipMalloc(&device_a, NMAX * sizeof(*device_a)); DIE(err != hipSuccess || device_a == NULL, "hipMalloc"); fill_array_int(host_a, NMAX); err = hipMemcpy(device_a, host_a, NMAX * sizeof(*host_a), hipMemcpyHostToDevice); DIE(err != hipSuccess, "hipMemcpy"); // ~TODO 3~ // Execute kernel_parity_id kernel and then copy from // the device to the host; call hipDeviceSynchronize() // after a kernel execution for safety purposes. // // Uncomment the line below to check your results hipLaunchKernelGGL(( kernel_parity_id), dim3(NMAX / 4), dim3(4), 0, 0, device_a, NMAX); err = hipDeviceSynchronize(); DIE(err != hipSuccess, "hipDeviceSynchronize"); err = hipMemcpy(host_a, device_a, NMAX * sizeof(*host_a), hipMemcpyDeviceToHost); DIE(err != hipSuccess, "hipMemcpy"); check_task_1(3, host_a); // ~TODO 4~ // Execute kernel_block_id kernel and then copy from // the device to the host; // // Uncomment the line below to check your results hipLaunchKernelGGL(( kernel_block_id), dim3(NMAX / 4), dim3(4), 0, 0, device_a, NMAX); err = hipDeviceSynchronize(); DIE(err != hipSuccess, "hipDeviceSynchronize"); err = hipMemcpy(host_a, device_a, NMAX * sizeof(*host_a), hipMemcpyDeviceToHost); DIE(err != hipSuccess, "hipMemcpy"); check_task_1(4, host_a); // ~TODO 5~ // Execute kernel_thread_id kernel and then copy from // the device to the host; // // Uncomment the line below to check your results hipLaunchKernelGGL(( kernel_thread_id), dim3(NMAX / 4), dim3(4), 0, 0, device_a, NMAX); err = hipDeviceSynchronize(); DIE(err != hipSuccess, "hipDeviceSynchronize"); err = hipMemcpy(host_a, device_a, NMAX * sizeof(*host_a), hipMemcpyDeviceToHost); DIE(err != hipSuccess, "hipMemcpy"); check_task_1(5, host_a); // TODO 6: Free the memory free(host_a); err = hipFree(device_a); DIE(err != hipSuccess, "hipFree"); return 0; }
e414c579aeca4d6b9c2f1ec8d04714eb458b1313.cu
#include <stdio.h> #include "utils/utils.h" #define NMAX (1 << 20) // ~TODO 3~ // Modify the kernel below such as each element of the // array will be now equal to 0 if it is an even number // or 1, if it is an odd number __global__ void kernel_parity_id(int *a, int N) { unsigned int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < N) { a[i] %= 2; } } // ~TODO 4~ // Modify the kernel below such as each element will // be equal to the BLOCK ID this computation takes // place. __global__ void kernel_block_id(int *a, int N) { unsigned int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < N) { a[i] = blockIdx.x; } } // ~TODO 5~ // Modify the kernel below such as each element will // be equal to the THREAD ID this computation takes // place. __global__ void kernel_thread_id(int *a, int N) { unsigned int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < N) { a[i] = threadIdx.x; } } int main(void) { int nDevices; cudaError_t err; int i; int* host_a; int* device_a; // Get the number of CUDA-capable GPU(s) cudaGetDeviceCount(&nDevices); // ~TODO 1~ // For each device, show some details in the format below, // then set as active device the first one (assuming there // is at least CUDA-capable device). Pay attention to the // type of the fields in the cudaDeviceProp structure. // // Device number: <i> // Device name: <name> // Total memory: <mem> // Memory Clock Rate (KHz): <mcr> // Memory Bus Width (bits): <mbw> // // Hint: look for cudaGetDeviceProperties and cudaSetDevice in // the Cuda Toolkit Documentation. cudaDeviceProp prop; for (i = 0; i < nDevices; ++i) { cudaGetDeviceProperties(&prop, i); printf("Device number: %d\n", i); printf("\tDevice name: %s\n", prop.name); printf("\tTotal memory (GB): %.2lf\n", (double)(prop.totalGlobalMem) / (double)(1000 * 1000 * 1000 )); printf("\tMemory Clock Rate (KHz): %d\n", prop.memoryClockRate / 1000); printf("\tMemory Bus Width (bits): %d\n", prop.memoryBusWidth); } // ~TODO 2~ // With information from example_2.cu, allocate an array with // integers (where a[i] = i). Then, modify the three kernels // above and execute them using 4 blocks, each with 4 threads. // Hint: num_elements = block_size * block_no (see example_2) // // You can use the fill_array_int(int *a, int n) function (from utils) // to fill your array as many times you want. host_a = (int*)malloc(NMAX * sizeof(*host_a)); DIE(host_a == NULL, "malloc"); err = cudaMalloc(&device_a, NMAX * sizeof(*device_a)); DIE(err != cudaSuccess || device_a == NULL, "cudaMalloc"); fill_array_int(host_a, NMAX); err = cudaMemcpy(device_a, host_a, NMAX * sizeof(*host_a), cudaMemcpyHostToDevice); DIE(err != cudaSuccess, "cudaMemcpy"); // ~TODO 3~ // Execute kernel_parity_id kernel and then copy from // the device to the host; call cudaDeviceSynchronize() // after a kernel execution for safety purposes. // // Uncomment the line below to check your results kernel_parity_id<<<NMAX / 4, 4>>>(device_a, NMAX); err = cudaDeviceSynchronize(); DIE(err != cudaSuccess, "cudaDeviceSynchronize"); err = cudaMemcpy(host_a, device_a, NMAX * sizeof(*host_a), cudaMemcpyDeviceToHost); DIE(err != cudaSuccess, "cudaMemcpy"); check_task_1(3, host_a); // ~TODO 4~ // Execute kernel_block_id kernel and then copy from // the device to the host; // // Uncomment the line below to check your results kernel_block_id<<<NMAX / 4, 4>>>(device_a, NMAX); err = cudaDeviceSynchronize(); DIE(err != cudaSuccess, "cudaDeviceSynchronize"); err = cudaMemcpy(host_a, device_a, NMAX * sizeof(*host_a), cudaMemcpyDeviceToHost); DIE(err != cudaSuccess, "cudaMemcpy"); check_task_1(4, host_a); // ~TODO 5~ // Execute kernel_thread_id kernel and then copy from // the device to the host; // // Uncomment the line below to check your results kernel_thread_id<<<NMAX / 4, 4>>>(device_a, NMAX); err = cudaDeviceSynchronize(); DIE(err != cudaSuccess, "cudaDeviceSynchronize"); err = cudaMemcpy(host_a, device_a, NMAX * sizeof(*host_a), cudaMemcpyDeviceToHost); DIE(err != cudaSuccess, "cudaMemcpy"); check_task_1(5, host_a); // TODO 6: Free the memory free(host_a); err = cudaFree(device_a); DIE(err != cudaSuccess, "cudaFree"); return 0; }
e500ff0dd2e0ce23a7c9c8117d2f89db200aa832.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zgemm_fermi.cu normal z -> c, Tue Sep 2 12:38:17 2014 @author Jakub Kurzak @author Stan Tomov @author Mark Gates [zcds]gemm_fermi.cu defines the CPU driver. [zcds]gemm_fermi_kernels.h defines the block sizes for each precision. gemm_stencil_defs.h defines types and functions for precision-independent code. gemm_stencil.cu defines the GPU kernel. It gets included multiple times, once for each transpose version. */ #include "common_magma.h" #include "commonblas_c.h" #define PRECISION_c /////////////////////////////////////////////////////////////////////////////////////////////////// #include "cgemm_fermi_kernels.h" /////////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- CGEMM performs one of the matrix-matrix operations C = alpha*op( A )*op( B ) + beta*C, where op( X ) is one of op( X ) = X or op( X ) = X**T or op( X ) = X**H, alpha and beta are scalars, and A, B and C are matrices, with op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix. Parameters ---------- @param[in] TRANSA CHARACTER*1. On entry, TRANSA specifies the form of op( A ) to be used in the matrix multiplication as follows: - = 'N': op( A ) = A. - = 'T': op( A ) = A**T. - = 'C': op( A ) = A**H. @param[in] TRANSB CHARACTER*1. On entry, TRANSB specifies the form of op( B ) to be used in the matrix multiplication as follows: - = 'N': op( B ) = B. - = 'T': op( B ) = B**T. - = 'C': op( B ) = B**H. @param[in] m INTEGER. On entry, M specifies the number of rows of the matrix op( d_A ) and of the matrix d_C. M must be at least zero. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix op( d_B ) and the number of columns of the matrix d_C. N must be at least zero. @param[in] k INTEGER. On entry, K specifies the number of columns of the matrix op( d_A ) and the number of rows of the matrix op( d_B ). K must be at least zero. @param[in] alpha COMPLEX On entry, ALPHA specifies the scalar alpha. @param[in] d_A COMPLEX array of DIMENSION ( LDA, ka ), where ka is k when TRANSA = MagmaNoTrans, and is m otherwise. Before entry with TRANSA = MagmaNoTrans, the leading m by k part of the array d_A must contain the matrix d_A, otherwise the leading k by m part of the array d_A must contain the matrix d_A. @param[in] lda INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When TRANSA = MagmaNoTrans then LDA must be at least max( 1, m ), otherwise LDA must be at least max( 1, k ). @param[in] d_B COMPLEX array of DIMENSION ( LDB, kb ), where kb is n when TRANSB = MagmaNoTrans, and is k otherwise. Before entry with TRANSB = MagmaNoTrans, the leading k by n part of the array d_B must contain the matrix d_B, otherwise the leading n by k part of the array d_B must contain the matrix d_B. @param[in] ldb INTEGER. On entry, LDB specifies the first dimension of d_B as declared in the calling (sub) program. When TRANSB = MagmaNoTrans then LDB must be at least max( 1, k ), otherwise LDB must be at least max( 1, n ). @param[in] beta COMPLEX. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then d_C need not be set on input. @param[in,out] d_C COMPLEX array of DIMENSION ( LDC, n ). Before entry, the leading m by n part of the array d_C must contain the matrix d_C, except when beta is zero, in which case d_C need not be set on entry. On exit, the array d_C is overwritten by the m by n matrix ( alpha*op( d_A )*op( d_B ) + beta*d_C ). @param[in] ldc INTEGER. On entry, LDC specifies the first dimension of d_C as declared in the calling (sub) program. LDC must be at least max( 1, m ). @ingroup magma_cblas3 ********************************************************************/ extern "C" void magmablas_cgemm( magma_trans_t TRANSA, magma_trans_t TRANSB, magma_int_t m, magma_int_t n, magma_int_t k, magmaFloatComplex alpha, const magmaFloatComplex *d_A, magma_int_t lda, const magmaFloatComplex *d_B, magma_int_t ldb, magmaFloatComplex beta, magmaFloatComplex *d_C, magma_int_t ldc ) { magma_int_t info = 0; if ( TRANSA != MagmaNoTrans && TRANSA != MagmaTrans && TRANSA != MagmaConjTrans ) info = -1; else if ( TRANSB != MagmaNoTrans && TRANSB != MagmaTrans && TRANSB != MagmaConjTrans ) info = -2; else if ( m < 0 ) info = -3; else if ( n < 0 ) info = -4; else if ( k < 0 ) info = -5; else if ( TRANSA == MagmaNoTrans ? lda < m : lda < k ) info = -8; else if ( TRANSB == MagmaNoTrans ? ldb < k : ldb < n ) info = -10; else if ( ldc < m ) info = -13; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { // -------------------- // call CUDA ARCH 1.x version // magmablas for [sd] precisions, cublas for [zc] precisions. #if defined(PRECISION_z) || defined(PRECISION_c) magma_cgemm( TRANSA, TRANSB, m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc ); #else magmablas_cgemm_tesla( TRANSA, TRANSB, m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc ); #endif return; } // -------------------- // CUDA ARCH 2.x (Fermi) version if ( m <= 0 || n <= 0 || k <= 0 ) return; size_t offsetA = 0; size_t offsetB = 0; int TransA = 2, TransB = 2; if ( TRANSA == MagmaTrans ) TransA = 1; else if ( TRANSA == MagmaNoTrans ) TransA = 0; if ( TRANSB == MagmaTrans ) TransB = 1; else if ( TRANSB == MagmaNoTrans ) TransB = 0; size_t sizeA = (size_t) lda * (size_t) (!TransA ? k : m); size_t sizeB = (size_t) ldb * (size_t) (!TransB ? n : k); size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512); if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE || sizeB >= CUBLAS_MAX_1DBUF_SIZE ) { magma_cgemm( TRANSA, TRANSB, m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc ); return; } #ifdef TEXTURE_1D // Set textures parameters tex_ref_A.normalized = false; tex_ref_A.filterMode = hipFilterModePoint; tex_ref_A.addressMode[0] = hipAddressModeClamp; tex_ref_B.normalized = false; tex_ref_B.filterMode = hipFilterModePoint; tex_ref_B.addressMode[0] = hipAddressModeClamp; // Bind A and B to texture references hipError_t err; err = hipBindTexture(&offsetA, tex_ref_A, d_A, sizeA*sizeof(magmaFloatComplex)); if ( err != hipSuccess ) { fprintf( stderr, "cannot bind A to texture: %s (%d)\n", hipGetErrorString(err), err ); return; } err = hipBindTexture(&offsetB, tex_ref_B, d_B, sizeB*sizeof(magmaFloatComplex)); if ( err != hipSuccess ) { fprintf( stderr, "cannot bind B to texture: %s (%d)\n", hipGetErrorString(err), err ); hipUnbindTexture( tex_ref_A ); return; } #endif // Set up grids dim3 dimBlock(DIM_X, DIM_Y); offsetA = offsetA/sizeof(d_A[0]); offsetB = offsetB/sizeof(d_B[0]); if ( TransA == 0 && TransB == 0 ) { dim3 dimGrid( (m - 1)/BLK_M_nn + 1, (n - 1)/BLK_N_nn + 1 ); hipLaunchKernelGGL(( cgemm_kernel_fermi_nn), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 0 && TransB == 1 ) { dim3 dimGrid( (m - 1)/BLK_M_nt + 1, (n - 1)/BLK_N_nt + 1 ); hipLaunchKernelGGL(( cgemm_kernel_fermi_nt), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 0 && TransB == 2 ) { dim3 dimGrid( (m - 1)/BLK_M_nc + 1, (n - 1)/BLK_N_nc + 1 ); hipLaunchKernelGGL(( cgemm_kernel_fermi_nc), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 0 ) { dim3 dimGrid( (m - 1)/BLK_M_tn + 1, (n - 1)/BLK_N_tn + 1 ); hipLaunchKernelGGL(( cgemm_kernel_fermi_tn), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 1 ) { dim3 dimGrid( (m - 1)/BLK_M_tt + 1, (n - 1)/BLK_N_tt + 1 ); hipLaunchKernelGGL(( cgemm_kernel_fermi_tt), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 2 ) { dim3 dimGrid( (m - 1)/BLK_M_tc + 1, (n - 1)/BLK_N_tc + 1 ); hipLaunchKernelGGL(( cgemm_kernel_fermi_tc), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 0 ) { dim3 dimGrid( (m - 1)/BLK_M_cn + 1, (n - 1)/BLK_N_cn + 1 ); hipLaunchKernelGGL(( cgemm_kernel_fermi_cn), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 1 ) { dim3 dimGrid( (m - 1)/BLK_M_ct + 1, (n - 1)/BLK_N_ct + 1 ); hipLaunchKernelGGL(( cgemm_kernel_fermi_ct), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 2 ) { dim3 dimGrid( (m - 1)/BLK_M_cc + 1, (n - 1)/BLK_N_cc + 1 ); hipLaunchKernelGGL(( cgemm_kernel_fermi_cc), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta, (int)offsetA, (int)offsetB ); } hipUnbindTexture( tex_ref_A ); hipUnbindTexture( tex_ref_B ); } ///////////////////////////////////////////////////////////////////////////////////////////////////
e500ff0dd2e0ce23a7c9c8117d2f89db200aa832.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zgemm_fermi.cu normal z -> c, Tue Sep 2 12:38:17 2014 @author Jakub Kurzak @author Stan Tomov @author Mark Gates [zcds]gemm_fermi.cu defines the CPU driver. [zcds]gemm_fermi_kernels.h defines the block sizes for each precision. gemm_stencil_defs.h defines types and functions for precision-independent code. gemm_stencil.cu defines the GPU kernel. It gets included multiple times, once for each transpose version. */ #include "common_magma.h" #include "commonblas_c.h" #define PRECISION_c /////////////////////////////////////////////////////////////////////////////////////////////////// #include "cgemm_fermi_kernels.h" /////////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- CGEMM performs one of the matrix-matrix operations C = alpha*op( A )*op( B ) + beta*C, where op( X ) is one of op( X ) = X or op( X ) = X**T or op( X ) = X**H, alpha and beta are scalars, and A, B and C are matrices, with op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix. Parameters ---------- @param[in] TRANSA CHARACTER*1. On entry, TRANSA specifies the form of op( A ) to be used in the matrix multiplication as follows: - = 'N': op( A ) = A. - = 'T': op( A ) = A**T. - = 'C': op( A ) = A**H. @param[in] TRANSB CHARACTER*1. On entry, TRANSB specifies the form of op( B ) to be used in the matrix multiplication as follows: - = 'N': op( B ) = B. - = 'T': op( B ) = B**T. - = 'C': op( B ) = B**H. @param[in] m INTEGER. On entry, M specifies the number of rows of the matrix op( d_A ) and of the matrix d_C. M must be at least zero. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix op( d_B ) and the number of columns of the matrix d_C. N must be at least zero. @param[in] k INTEGER. On entry, K specifies the number of columns of the matrix op( d_A ) and the number of rows of the matrix op( d_B ). K must be at least zero. @param[in] alpha COMPLEX On entry, ALPHA specifies the scalar alpha. @param[in] d_A COMPLEX array of DIMENSION ( LDA, ka ), where ka is k when TRANSA = MagmaNoTrans, and is m otherwise. Before entry with TRANSA = MagmaNoTrans, the leading m by k part of the array d_A must contain the matrix d_A, otherwise the leading k by m part of the array d_A must contain the matrix d_A. @param[in] lda INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When TRANSA = MagmaNoTrans then LDA must be at least max( 1, m ), otherwise LDA must be at least max( 1, k ). @param[in] d_B COMPLEX array of DIMENSION ( LDB, kb ), where kb is n when TRANSB = MagmaNoTrans, and is k otherwise. Before entry with TRANSB = MagmaNoTrans, the leading k by n part of the array d_B must contain the matrix d_B, otherwise the leading n by k part of the array d_B must contain the matrix d_B. @param[in] ldb INTEGER. On entry, LDB specifies the first dimension of d_B as declared in the calling (sub) program. When TRANSB = MagmaNoTrans then LDB must be at least max( 1, k ), otherwise LDB must be at least max( 1, n ). @param[in] beta COMPLEX. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then d_C need not be set on input. @param[in,out] d_C COMPLEX array of DIMENSION ( LDC, n ). Before entry, the leading m by n part of the array d_C must contain the matrix d_C, except when beta is zero, in which case d_C need not be set on entry. On exit, the array d_C is overwritten by the m by n matrix ( alpha*op( d_A )*op( d_B ) + beta*d_C ). @param[in] ldc INTEGER. On entry, LDC specifies the first dimension of d_C as declared in the calling (sub) program. LDC must be at least max( 1, m ). @ingroup magma_cblas3 ********************************************************************/ extern "C" void magmablas_cgemm( magma_trans_t TRANSA, magma_trans_t TRANSB, magma_int_t m, magma_int_t n, magma_int_t k, magmaFloatComplex alpha, const magmaFloatComplex *d_A, magma_int_t lda, const magmaFloatComplex *d_B, magma_int_t ldb, magmaFloatComplex beta, magmaFloatComplex *d_C, magma_int_t ldc ) { magma_int_t info = 0; if ( TRANSA != MagmaNoTrans && TRANSA != MagmaTrans && TRANSA != MagmaConjTrans ) info = -1; else if ( TRANSB != MagmaNoTrans && TRANSB != MagmaTrans && TRANSB != MagmaConjTrans ) info = -2; else if ( m < 0 ) info = -3; else if ( n < 0 ) info = -4; else if ( k < 0 ) info = -5; else if ( TRANSA == MagmaNoTrans ? lda < m : lda < k ) info = -8; else if ( TRANSB == MagmaNoTrans ? ldb < k : ldb < n ) info = -10; else if ( ldc < m ) info = -13; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { // -------------------- // call CUDA ARCH 1.x version // magmablas for [sd] precisions, cublas for [zc] precisions. #if defined(PRECISION_z) || defined(PRECISION_c) magma_cgemm( TRANSA, TRANSB, m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc ); #else magmablas_cgemm_tesla( TRANSA, TRANSB, m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc ); #endif return; } // -------------------- // CUDA ARCH 2.x (Fermi) version if ( m <= 0 || n <= 0 || k <= 0 ) return; size_t offsetA = 0; size_t offsetB = 0; int TransA = 2, TransB = 2; if ( TRANSA == MagmaTrans ) TransA = 1; else if ( TRANSA == MagmaNoTrans ) TransA = 0; if ( TRANSB == MagmaTrans ) TransB = 1; else if ( TRANSB == MagmaNoTrans ) TransB = 0; size_t sizeA = (size_t) lda * (size_t) (!TransA ? k : m); size_t sizeB = (size_t) ldb * (size_t) (!TransB ? n : k); size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512); if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE || sizeB >= CUBLAS_MAX_1DBUF_SIZE ) { magma_cgemm( TRANSA, TRANSB, m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc ); return; } #ifdef TEXTURE_1D // Set textures parameters tex_ref_A.normalized = false; tex_ref_A.filterMode = cudaFilterModePoint; tex_ref_A.addressMode[0] = cudaAddressModeClamp; tex_ref_B.normalized = false; tex_ref_B.filterMode = cudaFilterModePoint; tex_ref_B.addressMode[0] = cudaAddressModeClamp; // Bind A and B to texture references cudaError_t err; err = cudaBindTexture(&offsetA, tex_ref_A, d_A, sizeA*sizeof(magmaFloatComplex)); if ( err != cudaSuccess ) { fprintf( stderr, "cannot bind A to texture: %s (%d)\n", cudaGetErrorString(err), err ); return; } err = cudaBindTexture(&offsetB, tex_ref_B, d_B, sizeB*sizeof(magmaFloatComplex)); if ( err != cudaSuccess ) { fprintf( stderr, "cannot bind B to texture: %s (%d)\n", cudaGetErrorString(err), err ); cudaUnbindTexture( tex_ref_A ); return; } #endif // Set up grids dim3 dimBlock(DIM_X, DIM_Y); offsetA = offsetA/sizeof(d_A[0]); offsetB = offsetB/sizeof(d_B[0]); if ( TransA == 0 && TransB == 0 ) { dim3 dimGrid( (m - 1)/BLK_M_nn + 1, (n - 1)/BLK_N_nn + 1 ); cgemm_kernel_fermi_nn<<< dimGrid, dimBlock, 0, magma_stream >>>( m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 0 && TransB == 1 ) { dim3 dimGrid( (m - 1)/BLK_M_nt + 1, (n - 1)/BLK_N_nt + 1 ); cgemm_kernel_fermi_nt<<< dimGrid, dimBlock, 0, magma_stream >>>( m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 0 && TransB == 2 ) { dim3 dimGrid( (m - 1)/BLK_M_nc + 1, (n - 1)/BLK_N_nc + 1 ); cgemm_kernel_fermi_nc<<< dimGrid, dimBlock, 0, magma_stream >>>( m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 0 ) { dim3 dimGrid( (m - 1)/BLK_M_tn + 1, (n - 1)/BLK_N_tn + 1 ); cgemm_kernel_fermi_tn<<< dimGrid, dimBlock, 0, magma_stream >>>( m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 1 ) { dim3 dimGrid( (m - 1)/BLK_M_tt + 1, (n - 1)/BLK_N_tt + 1 ); cgemm_kernel_fermi_tt<<< dimGrid, dimBlock, 0, magma_stream >>>( m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 2 ) { dim3 dimGrid( (m - 1)/BLK_M_tc + 1, (n - 1)/BLK_N_tc + 1 ); cgemm_kernel_fermi_tc<<< dimGrid, dimBlock, 0, magma_stream >>>( m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 0 ) { dim3 dimGrid( (m - 1)/BLK_M_cn + 1, (n - 1)/BLK_N_cn + 1 ); cgemm_kernel_fermi_cn<<< dimGrid, dimBlock, 0, magma_stream >>>( m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 1 ) { dim3 dimGrid( (m - 1)/BLK_M_ct + 1, (n - 1)/BLK_N_ct + 1 ); cgemm_kernel_fermi_ct<<< dimGrid, dimBlock, 0, magma_stream >>>( m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 2 ) { dim3 dimGrid( (m - 1)/BLK_M_cc + 1, (n - 1)/BLK_N_cc + 1 ); cgemm_kernel_fermi_cc<<< dimGrid, dimBlock, 0, magma_stream >>>( m, n, k, d_A, lda, d_B, ldb, d_C, ldc, alpha, beta, (int)offsetA, (int)offsetB ); } cudaUnbindTexture( tex_ref_A ); cudaUnbindTexture( tex_ref_B ); } ///////////////////////////////////////////////////////////////////////////////////////////////////
73302b2506166d2d551a61138ac4d7cd61e2ee17.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "reluBackward.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *X = NULL; hipMalloc(&X, XSIZE*YSIZE); double *dout = NULL; hipMalloc(&dout, XSIZE*YSIZE); double *ret = NULL; hipMalloc(&ret, XSIZE*YSIZE); int rlen = 1; int clen = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( reluBackward), dim3(gridBlock),dim3(threadBlock), 0, 0, X,dout,ret,rlen,clen); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( reluBackward), dim3(gridBlock),dim3(threadBlock), 0, 0, X,dout,ret,rlen,clen); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( reluBackward), dim3(gridBlock),dim3(threadBlock), 0, 0, X,dout,ret,rlen,clen); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
73302b2506166d2d551a61138ac4d7cd61e2ee17.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "reluBackward.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *X = NULL; cudaMalloc(&X, XSIZE*YSIZE); double *dout = NULL; cudaMalloc(&dout, XSIZE*YSIZE); double *ret = NULL; cudaMalloc(&ret, XSIZE*YSIZE); int rlen = 1; int clen = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); reluBackward<<<gridBlock,threadBlock>>>(X,dout,ret,rlen,clen); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { reluBackward<<<gridBlock,threadBlock>>>(X,dout,ret,rlen,clen); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { reluBackward<<<gridBlock,threadBlock>>>(X,dout,ret,rlen,clen); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ef05ed04dd68c739d6d93cdb271c5fcde00dfe77.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cmath> #include<cstdio> //#define BLOCKSIZE 1 __global__ void dotproduct(int* A,int*B,int*C,int M,int N,int K) { printf("%d %d\n", A[0],A[1]); printf("%d %d\n", B[0],B[1]); printf("%d %d\n", C[0],C[1]); int I=blockIdx.x*blockDim.x+threadIdx.x; int J=blockIdx.y*blockDim.y+threadIdx.y; int temp =0; if( I < M || J < N){ //what is Bkj=B[k*N+J]; //int temp =0; for(int k=0;k<K;k++){ temp +=A[I*K+k]*B[k*N+J]; } } C[I*N+J]=temp; //} printf("%d\n", C[I*N+J]); //} } int main(){ //int *A=(iny ) int A[2]={1,2}; int B[2]={1,1}; int C[2]={0,0}; int* d_A;int* d_B;int* d_C; //int* A;int* B;int* C; int M=2; int N=2; int K=2; //allocating space for variables on device hipMalloc(&d_A, M *sizeof(int));//let memory store that m*n space for you of size ints hipMalloc(&d_B, M *sizeof(int)); hipMalloc(&d_C, sizeof(int)); /* //alocate space for variables on the host hipMalloc(&A, M *N*sizeof(int));//let memory store that m*n space for you of size ints hipMalloc(&B, M *N*sizeof(int)); hipMalloc(&C, M *N*sizeof(int)); */ //copy Aand B FROM HOST TO DEVICE hipMemcpy(d_A, &A[0],M *sizeof(int) , hipMemcpyHostToDevice); hipMemcpy(d_B, &B[0],M *sizeof(int) , hipMemcpyHostToDevice); hipMemcpy(d_C, &C[0],sizeof(int) , hipMemcpyHostToDevice);hipLaunchKernelGGL(( dotproduct), dim3(1),dim3(1), 0, 0, d_A,d_B,d_C,M,N,K ); //COPY RESULT BACK TO HOST hipMemcpy(&C[0], d_C, sizeof(int), hipMemcpyDeviceToHost); //printf("%d", C[0]); hipFree(A);//TO FREE MEMORY hipFree(B); hipFree(C); }
ef05ed04dd68c739d6d93cdb271c5fcde00dfe77.cu
#include<cmath> #include<cstdio> //#define BLOCKSIZE 1 __global__ void dotproduct(int* A,int*B,int*C,int M,int N,int K) { printf("%d %d\n", A[0],A[1]); printf("%d %d\n", B[0],B[1]); printf("%d %d\n", C[0],C[1]); int I=blockIdx.x*blockDim.x+threadIdx.x; int J=blockIdx.y*blockDim.y+threadIdx.y; int temp =0; if( I < M || J < N){ //what is Bkj=B[k*N+J]; //int temp =0; for(int k=0;k<K;k++){ temp +=A[I*K+k]*B[k*N+J]; } } C[I*N+J]=temp; //} printf("%d\n", C[I*N+J]); //} } int main(){ //int *A=(iny ) int A[2]={1,2}; int B[2]={1,1}; int C[2]={0,0}; int* d_A;int* d_B;int* d_C; //int* A;int* B;int* C; int M=2; int N=2; int K=2; //allocating space for variables on device cudaMalloc(&d_A, M *sizeof(int));//let memory store that m*n space for you of size ints cudaMalloc(&d_B, M *sizeof(int)); cudaMalloc(&d_C, sizeof(int)); /* //alocate space for variables on the host cudaMalloc(&A, M *N*sizeof(int));//let memory store that m*n space for you of size ints cudaMalloc(&B, M *N*sizeof(int)); cudaMalloc(&C, M *N*sizeof(int)); */ //copy Aand B FROM HOST TO DEVICE cudaMemcpy(d_A, &A[0],M *sizeof(int) , cudaMemcpyHostToDevice); cudaMemcpy(d_B, &B[0],M *sizeof(int) , cudaMemcpyHostToDevice); cudaMemcpy(d_C, &C[0],sizeof(int) , cudaMemcpyHostToDevice); dotproduct<<<1,1>>>(d_A,d_B,d_C,M,N,K ); //COPY RESULT BACK TO HOST cudaMemcpy(&C[0], d_C, sizeof(int), cudaMemcpyDeviceToHost); //printf("%d", C[0]); cudaFree(A);//TO FREE MEMORY cudaFree(B); cudaFree(C); }
25fa5f382f3e8e37f80308a698856402b1fae0ae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void tanh(float *inout, float *bias, int rows, int cols) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (j >= cols || i >= rows) return; inout[i * cols + j] = tanhf(inout[i * cols + j]) + bias[i]; }
25fa5f382f3e8e37f80308a698856402b1fae0ae.cu
#include "includes.h" __global__ void tanh(float *inout, float *bias, int rows, int cols) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (j >= cols || i >= rows) return; inout[i * cols + j] = tanhf(inout[i * cols + j]) + bias[i]; }
heat_trans.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include <common/book.h> #include <common/cpu_anim.h> #ifdef USE_2DTEXTURE_MEMO #define USE_TEXTURE_MEMO #define TEXTURE_DIM 2 #else #define TEXTURE_DIM 1 #endif #ifndef NTHREADS #define NTHREADS (int) 16 #endif #ifndef DIM #define DIM (int) 1024 #endif #ifndef NSTEPS #define NSTEPS (int) 90 #endif #ifndef SPEED #define SPEED 0.25f #endif #ifndef MAX_TEMP #define MAX_TEMP 1.f #endif #ifndef MIN_TEMP #define MIN_TEMP 0.0001f #endif // Declarations struct DataBlock; #ifndef USE_TEXTURE_MEMO __global__ void copy_const_kernel( float*, const float* ); __global__ void step_run_kernel( float*, const float* ); #else __global__ void copy_const_kernel( float* ); __global__ void step_run_kernel( float*, bool); #endif __global__ void my_float_to_color( unsigned char*, const float* ); // float to color defined in book.h, line 80 void anim_exit_callback( DataBlock* ); void anim_gpu( DataBlock*, int ); void my_swap( float**, float** ); struct DataBlock { CPUAnimBitmap *bitmap; unsigned char *dev_bitmap; float *dev_constSrc; float *dev_inSrc; float *dev_outSrc; hipEvent_t start; hipEvent_t stop; float totalElapsedTime; float frames; }; /* Texture references that resides on GPU */ #ifdef USE_TEXTURE_MEMO texture <float, TEXTURE_DIM> texRefConstSrc; texture <float, TEXTURE_DIM> texRefIn; texture <float, TEXTURE_DIM> texRefOut; #endif int main (void) { DataBlock data; CPUAnimBitmap bitmap( DIM, DIM, &data ); data.bitmap = &bitmap; data.frames = 0.f; data.totalElapsedTime = 0.f; HANDLE_ERROR( hipEventCreate( &data.start ) ); HANDLE_ERROR( hipEventCreate( &data.stop ) ); // Note that one char takes 1 byte, one float takes 4 bytes, // so the following allocation is correct HANDLE_ERROR( hipMalloc( (void**)&(data.dev_bitmap) , bitmap.image_size() ) ); HANDLE_ERROR( hipMalloc( (void**)&(data.dev_constSrc), bitmap.image_size() ) ); HANDLE_ERROR( hipMalloc( (void**)&(data.dev_inSrc) , bitmap.image_size() ) ); HANDLE_ERROR( hipMalloc( (void**)&(data.dev_outSrc) , bitmap.image_size() ) ); // Binding texture reference to device memory #ifdef USE_TEXTURE_MEMO #ifndef USE_2DTEXTURE_MEMO /*__host__ hipError_t hipBindTexture ( size_t* offset, * const textureReference* texref, * const void* devPtr, * const hipChannelFormatDesc* desc, * size_t size = UINT_MAX ) */ HANDLE_ERROR( hipBindTexture( NULL, texRefConstSrc, data.dev_constSrc, bitmap.image_size() ) ); HANDLE_ERROR( hipBindTexture( NULL, texRefIn, data.dev_inSrc, bitmap.image_size() ) ); HANDLE_ERROR( hipBindTexture( NULL, texRefOut, data.dev_outSrc, bitmap.image_size() ) ); #else hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); HANDLE_ERROR( hipBindTexture2D( NULL, texRefConstSrc, data.dev_constSrc, desc, DIM, DIM, sizeof(float) * DIM ) ); HANDLE_ERROR( hipBindTexture2D( NULL, texRefIn, data.dev_inSrc, desc, DIM, DIM, sizeof(float) * DIM ) ); HANDLE_ERROR( hipBindTexture2D( NULL, texRefOut, data.dev_outSrc, desc, DIM, DIM, sizeof(float) * DIM ) ); #endif // <-- #ifdef USE_2DTEXTURE_MEMO #endif // <-- #ifdef USE_TEXTURE_MEMO /* Initial Condition */ float *cond_init = (float*) malloc( bitmap.image_size() ); for (int i=0; i<DIM*DIM; i++) { cond_init[i] = 0; int x = i % DIM; int y = i / DIM; if ((x>300) && (x<600) && (y>310) && (y<601)) cond_init[i] = MAX_TEMP; } cond_init[DIM*100+100] = (MAX_TEMP + MIN_TEMP)/2; cond_init[DIM*700+100] = MIN_TEMP; cond_init[DIM*300+300] = MIN_TEMP; cond_init[DIM*200+700] = MIN_TEMP; for (int y=800; y<900; y++) { for (int x=400; x<500; x++) { cond_init[x+y*DIM] = MIN_TEMP; } } HANDLE_ERROR( hipMemcpy(data.dev_constSrc, cond_init, bitmap.image_size(), hipMemcpyHostToDevice) ); for (int y=800; y<DIM; ++y) { for (int x=0; x<200; ++x) { cond_init[x+y*DIM] = MAX_TEMP; } } HANDLE_ERROR( hipMemcpy(data.dev_inSrc, cond_init, bitmap.image_size(), hipMemcpyHostToDevice) ); free( cond_init ); bitmap.anim_and_exit( (void (*)(void*, int))anim_gpu, (void (*)(void*))anim_exit_callback ); } void anim_exit_callback ( DataBlock* d ) { /* Unbind texture reference if neccessary */ #ifdef USE_TEXTURE_MEMO hipUnbindTexture( texRefIn ); hipUnbindTexture( texRefOut ); hipUnbindTexture( texRefConstSrc ); #endif hipFree( d->dev_bitmap ); hipFree( d->dev_inSrc ); hipFree( d->dev_outSrc ); hipFree( d->dev_constSrc ); HANDLE_ERROR( hipEventDestroy( d->start ) ); HANDLE_ERROR( hipEventDestroy( d->stop ) ); } #ifndef USE_TEXTURE_MEMO /* Use Global Memory */ void anim_gpu ( DataBlock *d, int ticks ) { HANDLE_ERROR( hipEventRecord( d->start, 0 ) ); dim3 dimGrid( DIM/NTHREADS, DIM/NTHREADS ); dim3 dimBlocks( NTHREADS, NTHREADS ); for (int i=0; i<NSTEPS; ++i) { hipLaunchKernelGGL(( copy_const_kernel) , dim3(dimGrid), dim3(dimBlocks) , 0, 0, d->dev_inSrc, d->dev_constSrc ); hipLaunchKernelGGL(( step_run_kernel) , dim3(dimGrid), dim3(dimBlocks) , 0, 0, d->dev_outSrc, d->dev_inSrc ); my_swap( &d->dev_inSrc, &d->dev_outSrc ); } hipLaunchKernelGGL(( float_to_color) , dim3(dimGrid), dim3(dimBlocks) , 0, 0, d->dev_bitmap, d->dev_inSrc ); hipDeviceSynchronize(); HANDLE_ERROR( hipMemcpy( d->bitmap->get_ptr(), d->dev_bitmap, d->bitmap->image_size(), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipEventRecord( d->stop, 0 ) ); HANDLE_ERROR( hipEventSynchronize( d->stop ) ); float elapsedTime; HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, d->start, d->stop ) ); d->totalElapsedTime += elapsedTime; ++(d->frames); printf( "Mean computation time per frame: %3.2f ms\n", d->totalElapsedTime/d->frames ); } __global__ void copy_const_kernel( float *inSrc, const float *constSrc ) { unsigned int x = threadIdx.x + blockIdx.x * blockDim.x; unsigned int y = threadIdx.y + blockIdx.y * blockDim.y; unsigned int offset = x + y * blockDim.x * gridDim.x; if ( constSrc[offset] != 0 ) { inSrc[offset] = constSrc[offset]; } } __global__ void step_run_kernel( float *outSrc, const float *inSrc ) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; /* AND THAT, IS WHAT PPL CALLED STUPID */ // int x1 = (x!=(DIM-1)) ? (x+1) : x; // Right // int x4 = x; // Bottom // int x16 = x ? (x-1) : x; // Left // int x64 = x; // Top // int y1 = y; // int y4 = y ? (y-1) : y; // int y16 = y; // int y64 = (y!=(DIM-1)) ? (y+1) : y; // int offset1 = x1 + y1 * blockDim.x * gridDim.x; // int offset4 = x4 + y4 * blockDim.x * gridDim.x; // int offset16 = x16 + y16 * blockDim.x * gridDim.x; // int offset64 = x64 + y64 * blockDim.x * gridDim.x; // outSrc[offset] = ( 1.f - 4.f * SPEED ) * inSrc[offset] + // SPEED * ( inSrc[offset1] + inSrc[offset4] + // inSrc[offset16] + inSrc[offset64] ); /* END OF STUPIDITY */ int top = y ? (offset-DIM) : offset; int right = (x != DIM-1) ? (offset + 1) : offset; int bottom = (y != DIM-1) ? (offset+DIM) : offset; int left = x ? (offset - 1) : offset; outSrc[offset] = ( 1.f - 4.f * SPEED ) * inSrc[offset] + SPEED * ( inSrc[top] + inSrc[right] + inSrc[bottom] + inSrc[left] ); } #else /* Use Texture Memory */ void anim_gpu ( DataBlock *d, int ticks ) { HANDLE_ERROR( hipEventRecord( d->start, 0 ) ); dim3 dimGrid( DIM/NTHREADS, DIM/NTHREADS ); dim3 dimBlocks( NTHREADS, NTHREADS ); volatile bool dstIsOut = true; // Ues keyword volatile to prevent caching for (size_t i=0; i<NSTEPS; ++i) { hipLaunchKernelGGL(( copy_const_kernel) , dim3(dimGrid), dim3(dimBlocks) , 0, 0, d->dev_inSrc ); hipLaunchKernelGGL(( step_run_kernel) , dim3(dimGrid), dim3(dimBlocks) , 0, 0, d->dev_outSrc, dstIsOut ); my_swap( &d->dev_inSrc, &d->dev_outSrc ); dstIsOut = !dstIsOut; } hipLaunchKernelGGL(( float_to_color) , dim3(dimGrid), dim3(dimBlocks) , 0, 0, d->dev_bitmap, d->dev_inSrc ); hipDeviceSynchronize(); HANDLE_ERROR( hipMemcpy( d->bitmap->get_ptr(), d->dev_bitmap, d->bitmap->image_size(), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipEventRecord( d->stop, 0 ) ); HANDLE_ERROR( hipEventSynchronize( d->stop ) ); float elapsedTime; HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, d->start, d->stop ) ); d->totalElapsedTime += elapsedTime; ++(d->frames); printf( "Mean computation time per frame: %3.2f ms\n", d->totalElapsedTime/d->frames ); } #ifndef USE_2DTEXTURE_MEMO __global__ void copy_const_kernel( float *inSrc ) { unsigned int x = threadIdx.x + blockIdx.x * blockDim.x; unsigned int y = threadIdx.y + blockIdx.y * blockDim.y; unsigned int offset = x + y * blockDim.x * gridDim.x; float constSrc = tex1Dfetch(texRefConstSrc, offset); if ( constSrc != 0 ) { inSrc[offset] = constSrc; } } __global__ void step_run_kernel( float *outSrc, bool dstIsOut ) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; int top = y ? (offset-DIM) : offset; int right = (x != DIM-1) ? (offset + 1) : offset; int bottom = (y != DIM-1) ? (offset+DIM) : offset; int left = x ? (offset - 1) : offset; float v_top, v_right, v_bottom, v_left, v_old; if (dstIsOut) { v_top = tex1Dfetch( texRefIn, top ); v_right = tex1Dfetch( texRefIn, right ); v_bottom = tex1Dfetch( texRefIn, bottom ); v_left = tex1Dfetch( texRefIn, left ); v_old = tex1Dfetch( texRefIn, offset ); } else { v_top = tex1Dfetch( texRefOut, top ); v_right = tex1Dfetch( texRefOut, right ); v_bottom = tex1Dfetch( texRefOut, bottom ); v_left = tex1Dfetch( texRefOut, left ); v_old = tex1Dfetch( texRefOut, offset ); } outSrc[offset] = ( 1.f - 4.f * SPEED ) * v_old + SPEED * ( v_top + v_right + v_bottom + v_left); } #else __global__ void copy_const_kernel( float *inSrc ) { unsigned int x = threadIdx.x + blockIdx.x * blockDim.x; unsigned int y = threadIdx.y + blockIdx.y * blockDim.y; unsigned int offset = x + y * blockDim.x * gridDim.x; float constSrc = tex2D(texRefConstSrc, x, y); if ( constSrc != 0 ) { inSrc[offset] = constSrc; } } __global__ void step_run_kernel( float *outSrc, bool dstIsOut ) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float v_top, v_right, v_bottom, v_left, v_old; if (dstIsOut) { v_top = tex2D( texRefIn, x, y-1 ); v_right = tex2D( texRefIn, x+1, y ); v_bottom = tex2D( texRefIn, x, y+1 ); v_left = tex2D( texRefIn, x-1, y ); v_old = tex2D( texRefIn, x , y ); } else { v_top = tex2D( texRefOut, x, y-1 ); v_right = tex2D( texRefOut, x+1, y ); v_bottom = tex2D( texRefOut, x, y+1 ); v_left = tex2D( texRefOut, x-1, y ); v_old = tex2D( texRefOut, x , y ); } outSrc[offset] = ( 1.f - 4.f * SPEED ) * v_old + SPEED * ( v_top + v_right + v_bottom + v_left); } #endif // <-- #ifndef USE_TEXTURE2D_MEMO #endif // <-- #ifndef USE_TEXTURE_MEMO __global__ void my_float_to_color(unsigned char *ptr, const float *inSrc) { unsigned int x = threadIdx.x + blockIdx.x * blockDim.x; unsigned int y = threadIdx.y + blockIdx.y * blockDim.y; unsigned int offset = x + y * blockDim.x * gridDim.x; ptr[offset*4 + 0] = (int)( 255 * inSrc[offset] ); ptr[offset*4 + 1] = 0; ptr[offset*4 + 2] = 0; ptr[offset*4 + 3] = 0; } void my_swap (float **in, float **out) { float *dummy = *out; *out = *in; *in = dummy; }
heat_trans.cu
#include <iostream> #include <cuda.h> #include <common/book.h> #include <common/cpu_anim.h> #ifdef USE_2DTEXTURE_MEMO #define USE_TEXTURE_MEMO #define TEXTURE_DIM 2 #else #define TEXTURE_DIM 1 #endif #ifndef NTHREADS #define NTHREADS (int) 16 #endif #ifndef DIM #define DIM (int) 1024 #endif #ifndef NSTEPS #define NSTEPS (int) 90 #endif #ifndef SPEED #define SPEED 0.25f #endif #ifndef MAX_TEMP #define MAX_TEMP 1.f #endif #ifndef MIN_TEMP #define MIN_TEMP 0.0001f #endif // Declarations struct DataBlock; #ifndef USE_TEXTURE_MEMO __global__ void copy_const_kernel( float*, const float* ); __global__ void step_run_kernel( float*, const float* ); #else __global__ void copy_const_kernel( float* ); __global__ void step_run_kernel( float*, bool); #endif __global__ void my_float_to_color( unsigned char*, const float* ); // float to color defined in book.h, line 80 void anim_exit_callback( DataBlock* ); void anim_gpu( DataBlock*, int ); void my_swap( float**, float** ); struct DataBlock { CPUAnimBitmap *bitmap; unsigned char *dev_bitmap; float *dev_constSrc; float *dev_inSrc; float *dev_outSrc; cudaEvent_t start; cudaEvent_t stop; float totalElapsedTime; float frames; }; /* Texture references that resides on GPU */ #ifdef USE_TEXTURE_MEMO texture <float, TEXTURE_DIM> texRefConstSrc; texture <float, TEXTURE_DIM> texRefIn; texture <float, TEXTURE_DIM> texRefOut; #endif int main (void) { DataBlock data; CPUAnimBitmap bitmap( DIM, DIM, &data ); data.bitmap = &bitmap; data.frames = 0.f; data.totalElapsedTime = 0.f; HANDLE_ERROR( cudaEventCreate( &data.start ) ); HANDLE_ERROR( cudaEventCreate( &data.stop ) ); // Note that one char takes 1 byte, one float takes 4 bytes, // so the following allocation is correct HANDLE_ERROR( cudaMalloc( (void**)&(data.dev_bitmap) , bitmap.image_size() ) ); HANDLE_ERROR( cudaMalloc( (void**)&(data.dev_constSrc), bitmap.image_size() ) ); HANDLE_ERROR( cudaMalloc( (void**)&(data.dev_inSrc) , bitmap.image_size() ) ); HANDLE_ERROR( cudaMalloc( (void**)&(data.dev_outSrc) , bitmap.image_size() ) ); // Binding texture reference to device memory #ifdef USE_TEXTURE_MEMO #ifndef USE_2DTEXTURE_MEMO /*__host__ ​cudaError_t cudaBindTexture ( size_t* offset, * const textureReference* texref, * const void* devPtr, * const cudaChannelFormatDesc* desc, * size_t size = UINT_MAX ) */ HANDLE_ERROR( cudaBindTexture( NULL, texRefConstSrc, data.dev_constSrc, bitmap.image_size() ) ); HANDLE_ERROR( cudaBindTexture( NULL, texRefIn, data.dev_inSrc, bitmap.image_size() ) ); HANDLE_ERROR( cudaBindTexture( NULL, texRefOut, data.dev_outSrc, bitmap.image_size() ) ); #else cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); HANDLE_ERROR( cudaBindTexture2D( NULL, texRefConstSrc, data.dev_constSrc, desc, DIM, DIM, sizeof(float) * DIM ) ); HANDLE_ERROR( cudaBindTexture2D( NULL, texRefIn, data.dev_inSrc, desc, DIM, DIM, sizeof(float) * DIM ) ); HANDLE_ERROR( cudaBindTexture2D( NULL, texRefOut, data.dev_outSrc, desc, DIM, DIM, sizeof(float) * DIM ) ); #endif // <-- #ifdef USE_2DTEXTURE_MEMO #endif // <-- #ifdef USE_TEXTURE_MEMO /* Initial Condition */ float *cond_init = (float*) malloc( bitmap.image_size() ); for (int i=0; i<DIM*DIM; i++) { cond_init[i] = 0; int x = i % DIM; int y = i / DIM; if ((x>300) && (x<600) && (y>310) && (y<601)) cond_init[i] = MAX_TEMP; } cond_init[DIM*100+100] = (MAX_TEMP + MIN_TEMP)/2; cond_init[DIM*700+100] = MIN_TEMP; cond_init[DIM*300+300] = MIN_TEMP; cond_init[DIM*200+700] = MIN_TEMP; for (int y=800; y<900; y++) { for (int x=400; x<500; x++) { cond_init[x+y*DIM] = MIN_TEMP; } } HANDLE_ERROR( cudaMemcpy(data.dev_constSrc, cond_init, bitmap.image_size(), cudaMemcpyHostToDevice) ); for (int y=800; y<DIM; ++y) { for (int x=0; x<200; ++x) { cond_init[x+y*DIM] = MAX_TEMP; } } HANDLE_ERROR( cudaMemcpy(data.dev_inSrc, cond_init, bitmap.image_size(), cudaMemcpyHostToDevice) ); free( cond_init ); bitmap.anim_and_exit( (void (*)(void*, int))anim_gpu, (void (*)(void*))anim_exit_callback ); } void anim_exit_callback ( DataBlock* d ) { /* Unbind texture reference if neccessary */ #ifdef USE_TEXTURE_MEMO cudaUnbindTexture( texRefIn ); cudaUnbindTexture( texRefOut ); cudaUnbindTexture( texRefConstSrc ); #endif cudaFree( d->dev_bitmap ); cudaFree( d->dev_inSrc ); cudaFree( d->dev_outSrc ); cudaFree( d->dev_constSrc ); HANDLE_ERROR( cudaEventDestroy( d->start ) ); HANDLE_ERROR( cudaEventDestroy( d->stop ) ); } #ifndef USE_TEXTURE_MEMO /* Use Global Memory */ void anim_gpu ( DataBlock *d, int ticks ) { HANDLE_ERROR( cudaEventRecord( d->start, 0 ) ); dim3 dimGrid( DIM/NTHREADS, DIM/NTHREADS ); dim3 dimBlocks( NTHREADS, NTHREADS ); for (int i=0; i<NSTEPS; ++i) { copy_const_kernel <<< dimGrid, dimBlocks >>> ( d->dev_inSrc, d->dev_constSrc ); step_run_kernel <<< dimGrid, dimBlocks >>> ( d->dev_outSrc, d->dev_inSrc ); my_swap( &d->dev_inSrc, &d->dev_outSrc ); } float_to_color <<< dimGrid, dimBlocks >>> ( d->dev_bitmap, d->dev_inSrc ); cudaDeviceSynchronize(); HANDLE_ERROR( cudaMemcpy( d->bitmap->get_ptr(), d->dev_bitmap, d->bitmap->image_size(), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaEventRecord( d->stop, 0 ) ); HANDLE_ERROR( cudaEventSynchronize( d->stop ) ); float elapsedTime; HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, d->start, d->stop ) ); d->totalElapsedTime += elapsedTime; ++(d->frames); printf( "Mean computation time per frame: %3.2f ms\n", d->totalElapsedTime/d->frames ); } __global__ void copy_const_kernel( float *inSrc, const float *constSrc ) { unsigned int x = threadIdx.x + blockIdx.x * blockDim.x; unsigned int y = threadIdx.y + blockIdx.y * blockDim.y; unsigned int offset = x + y * blockDim.x * gridDim.x; if ( constSrc[offset] != 0 ) { inSrc[offset] = constSrc[offset]; } } __global__ void step_run_kernel( float *outSrc, const float *inSrc ) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; /* AND THAT, IS WHAT PPL CALLED STUPID */ // int x1 = (x!=(DIM-1)) ? (x+1) : x; // Right // int x4 = x; // Bottom // int x16 = x ? (x-1) : x; // Left // int x64 = x; // Top // int y1 = y; // int y4 = y ? (y-1) : y; // int y16 = y; // int y64 = (y!=(DIM-1)) ? (y+1) : y; // int offset1 = x1 + y1 * blockDim.x * gridDim.x; // int offset4 = x4 + y4 * blockDim.x * gridDim.x; // int offset16 = x16 + y16 * blockDim.x * gridDim.x; // int offset64 = x64 + y64 * blockDim.x * gridDim.x; // outSrc[offset] = ( 1.f - 4.f * SPEED ) * inSrc[offset] + // SPEED * ( inSrc[offset1] + inSrc[offset4] + // inSrc[offset16] + inSrc[offset64] ); /* END OF STUPIDITY */ int top = y ? (offset-DIM) : offset; int right = (x != DIM-1) ? (offset + 1) : offset; int bottom = (y != DIM-1) ? (offset+DIM) : offset; int left = x ? (offset - 1) : offset; outSrc[offset] = ( 1.f - 4.f * SPEED ) * inSrc[offset] + SPEED * ( inSrc[top] + inSrc[right] + inSrc[bottom] + inSrc[left] ); } #else /* Use Texture Memory */ void anim_gpu ( DataBlock *d, int ticks ) { HANDLE_ERROR( cudaEventRecord( d->start, 0 ) ); dim3 dimGrid( DIM/NTHREADS, DIM/NTHREADS ); dim3 dimBlocks( NTHREADS, NTHREADS ); volatile bool dstIsOut = true; // Ues keyword volatile to prevent caching for (size_t i=0; i<NSTEPS; ++i) { copy_const_kernel <<< dimGrid, dimBlocks >>> ( d->dev_inSrc ); step_run_kernel <<< dimGrid, dimBlocks >>> ( d->dev_outSrc, dstIsOut ); my_swap( &d->dev_inSrc, &d->dev_outSrc ); dstIsOut = !dstIsOut; } float_to_color <<< dimGrid, dimBlocks >>> ( d->dev_bitmap, d->dev_inSrc ); cudaDeviceSynchronize(); HANDLE_ERROR( cudaMemcpy( d->bitmap->get_ptr(), d->dev_bitmap, d->bitmap->image_size(), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaEventRecord( d->stop, 0 ) ); HANDLE_ERROR( cudaEventSynchronize( d->stop ) ); float elapsedTime; HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, d->start, d->stop ) ); d->totalElapsedTime += elapsedTime; ++(d->frames); printf( "Mean computation time per frame: %3.2f ms\n", d->totalElapsedTime/d->frames ); } #ifndef USE_2DTEXTURE_MEMO __global__ void copy_const_kernel( float *inSrc ) { unsigned int x = threadIdx.x + blockIdx.x * blockDim.x; unsigned int y = threadIdx.y + blockIdx.y * blockDim.y; unsigned int offset = x + y * blockDim.x * gridDim.x; float constSrc = tex1Dfetch(texRefConstSrc, offset); if ( constSrc != 0 ) { inSrc[offset] = constSrc; } } __global__ void step_run_kernel( float *outSrc, bool dstIsOut ) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; int top = y ? (offset-DIM) : offset; int right = (x != DIM-1) ? (offset + 1) : offset; int bottom = (y != DIM-1) ? (offset+DIM) : offset; int left = x ? (offset - 1) : offset; float v_top, v_right, v_bottom, v_left, v_old; if (dstIsOut) { v_top = tex1Dfetch( texRefIn, top ); v_right = tex1Dfetch( texRefIn, right ); v_bottom = tex1Dfetch( texRefIn, bottom ); v_left = tex1Dfetch( texRefIn, left ); v_old = tex1Dfetch( texRefIn, offset ); } else { v_top = tex1Dfetch( texRefOut, top ); v_right = tex1Dfetch( texRefOut, right ); v_bottom = tex1Dfetch( texRefOut, bottom ); v_left = tex1Dfetch( texRefOut, left ); v_old = tex1Dfetch( texRefOut, offset ); } outSrc[offset] = ( 1.f - 4.f * SPEED ) * v_old + SPEED * ( v_top + v_right + v_bottom + v_left); } #else __global__ void copy_const_kernel( float *inSrc ) { unsigned int x = threadIdx.x + blockIdx.x * blockDim.x; unsigned int y = threadIdx.y + blockIdx.y * blockDim.y; unsigned int offset = x + y * blockDim.x * gridDim.x; float constSrc = tex2D(texRefConstSrc, x, y); if ( constSrc != 0 ) { inSrc[offset] = constSrc; } } __global__ void step_run_kernel( float *outSrc, bool dstIsOut ) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float v_top, v_right, v_bottom, v_left, v_old; if (dstIsOut) { v_top = tex2D( texRefIn, x, y-1 ); v_right = tex2D( texRefIn, x+1, y ); v_bottom = tex2D( texRefIn, x, y+1 ); v_left = tex2D( texRefIn, x-1, y ); v_old = tex2D( texRefIn, x , y ); } else { v_top = tex2D( texRefOut, x, y-1 ); v_right = tex2D( texRefOut, x+1, y ); v_bottom = tex2D( texRefOut, x, y+1 ); v_left = tex2D( texRefOut, x-1, y ); v_old = tex2D( texRefOut, x , y ); } outSrc[offset] = ( 1.f - 4.f * SPEED ) * v_old + SPEED * ( v_top + v_right + v_bottom + v_left); } #endif // <-- #ifndef USE_TEXTURE2D_MEMO #endif // <-- #ifndef USE_TEXTURE_MEMO __global__ void my_float_to_color(unsigned char *ptr, const float *inSrc) { unsigned int x = threadIdx.x + blockIdx.x * blockDim.x; unsigned int y = threadIdx.y + blockIdx.y * blockDim.y; unsigned int offset = x + y * blockDim.x * gridDim.x; ptr[offset*4 + 0] = (int)( 255 * inSrc[offset] ); ptr[offset*4 + 1] = 0; ptr[offset*4 + 2] = 0; ptr[offset*4 + 3] = 0; } void my_swap (float **in, float **out) { float *dummy = *out; *out = *in; *in = dummy; }
f9d79726fa59616f3138080c44871e42e7bbef45.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float* var_11,float var_12,float var_13,float var_14,float var_15,float var_16) { comp = +1.3931E-37f * +1.9547E-1f * (+1.0944E14f - logf(+1.8493E36f)); comp = (var_2 - -1.3692E7f + (var_3 - -1.9508E35f + -1.6681E36f / var_4)); float tmp_1 = (var_5 / +1.9037E34f); comp = tmp_1 * -1.4246E-18f * var_6 - fabsf(-1.5294E-42f / (var_7 * (var_8 * (var_9 + +1.0429E24f - var_10)))); for (int i=0; i < var_1; ++i) { var_11[i] = tanhf(var_12 - +1.0808E36f * var_13 - -0.0f - var_14); float tmp_2 = sinhf(var_15 - -1.0151E-35f); comp = tmp_2 + var_11[i] * var_16 - (-0.0f / -1.3075E-44f); } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float* tmp_12 = initPointer( atof(argv[12]) ); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17); hipDeviceSynchronize(); return 0; }
f9d79726fa59616f3138080c44871e42e7bbef45.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float* var_11,float var_12,float var_13,float var_14,float var_15,float var_16) { comp = +1.3931E-37f * +1.9547E-1f * (+1.0944E14f - logf(+1.8493E36f)); comp = (var_2 - -1.3692E7f + (var_3 - -1.9508E35f + -1.6681E36f / var_4)); float tmp_1 = (var_5 / +1.9037E34f); comp = tmp_1 * -1.4246E-18f * var_6 - fabsf(-1.5294E-42f / (var_7 * (var_8 * (var_9 + +1.0429E24f - var_10)))); for (int i=0; i < var_1; ++i) { var_11[i] = tanhf(var_12 - +1.0808E36f * var_13 - -0.0f - var_14); float tmp_2 = sinhf(var_15 - -1.0151E-35f); comp = tmp_2 + var_11[i] * var_16 - (-0.0f / -1.3075E-44f); } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float* tmp_12 = initPointer( atof(argv[12]) ); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17); cudaDeviceSynchronize(); return 0; }
4c2ec7351ffe0d37a92362d7ba5a42bbba0853be.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Builds a quad tree for computing Barnes-Hut approximation of t-SNE repulsive forces. */ #include "include/kernels/tree_builder.h" /******************************************************************************/ /*** build tree ***************************************************************/ /******************************************************************************/ __global__ void tsnecuda::bh::ClearKernel1(volatile int * __restrict__ children, const int num_nodes, const int num_points) { register int k, inc, top, bottom; top = 4 * num_nodes; bottom = 4 * num_points; inc = blockDim.x * gridDim.x; k = (bottom & (-32)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size if (k < bottom) k += inc; // iterate over all cells assigned to thread while (k < top) { children[k] = -1; k += inc; } } __global__ void tsnecuda::bh::TreeBuildingKernel(volatile int * __restrict__ errd, volatile int * __restrict__ children, volatile float * __restrict__ x_pos_device, volatile float * __restrict__ y_pos_device, const int num_nodes, const int num_points) { register int i, j, depth, localmaxdepth, skip, inc; register float x, y, r; register float px, py; register float dx, dy; register int ch, n, cell, locked, patch; register float radius, rootx, rooty; // cache root data radius = radiusd; rootx = x_pos_device[num_nodes]; rooty = y_pos_device[num_nodes]; localmaxdepth = 1; skip = 1; inc = blockDim.x * gridDim.x; i = threadIdx.x + blockIdx.x * blockDim.x; // iterate over all bodies assigned to thread while (i < num_points) { if (skip != 0) { // new body, so start traversing at root skip = 0; px = x_pos_device[i]; py = y_pos_device[i]; n = num_nodes; depth = 1; r = radius * 0.5f; dx = dy = -r; j = 0; // determine which child to follow if (rootx < px) {j = 1; dx = r;} if (rooty < py) {j |= 2; dy = r;} x = rootx + dx; y = rooty + dy; } // follow path to leaf cell ch = children[n*4+j]; while (ch >= num_points) { n = ch; depth++; r *= 0.5f; dx = dy = -r; j = 0; // determine which child to follow if (x < px) {j = 1; dx = r;} if (y < py) {j |= 2; dy = r;} x += dx; y += dy; ch = children[n*4+j]; } if (ch != -2) { // skip if child pointer is locked and try again later locked = n*4+j; if (ch == -1) { if (-1 == atomicCAS((int *)&children[locked], -1, i)) { // if null, just insert the new body localmaxdepth = max(depth, localmaxdepth); i += inc; // move on to next body skip = 1; } } else { // there already is a body in this position if (ch == atomicCAS((int *)&children[locked], ch, -2)) { // try to lock patch = -1; // create new cell(s) and insert the old and new body do { depth++; cell = atomicSub((int *)&bottomd, 1) - 1; if (cell <= num_points) { *errd = 1; bottomd = num_nodes; } if (patch != -1) { children[n*4+j] = cell; } patch = max(patch, cell); j = 0; if (x < x_pos_device[ch]) j = 1; if (y < y_pos_device[ch]) j |= 2; children[cell*4+j] = ch; n = cell; r *= 0.5f; dx = dy = -r; j = 0; if (x < px) {j = 1; dx = r;} if (y < py) {j |= 2; dy = r;} x += dx; y += dy; ch = children[n*4+j]; // repeat until the two bodies are different children } while (ch >= 0 && r > 1e-10); // add radius check because bodies that are very close together can cause this to fail... if points are too close together it will exceed the max depth of the tree children[n*4+j] = i; localmaxdepth = max(depth, localmaxdepth); i += inc; // move on to next body skip = 2; } } } __threadfence(); if (skip == 2) { children[locked] = patch; } } // record maximum tree depth atomicMax((int *)&maxdepthd, localmaxdepth); } __global__ void tsnecuda::bh::ClearKernel2(volatile int * __restrict__ cell_starts, volatile float * __restrict__ cell_mass, const int num_nodes) { register int k, inc, bottom; bottom = bottomd; inc = blockDim.x * gridDim.x; k = (bottom & (-32)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size if (k < bottom) k += inc; // iterate over all cells assigned to thread while (k < num_nodes) { cell_mass[k] = -1.0f; cell_starts[k] = -1; k += inc; } } void tsnecuda::bh::BuildTree(tsnecuda::GpuOptions &gpu_opt, thrust::device_vector<int> &errd, thrust::device_vector<int> &children, thrust::device_vector<int> &cell_starts, thrust::device_vector<float> &cell_mass, thrust::device_vector<float> &points, const int num_nodes, const int num_points, const int num_blocks) { hipLaunchKernelGGL(( tsnecuda::bh::ClearKernel1), dim3(num_blocks), dim3(1024), 0, 0, thrust::raw_pointer_cast(children.data()), num_nodes, num_points); hipLaunchKernelGGL(( tsnecuda::bh::TreeBuildingKernel), dim3(num_blocks * gpu_opt.tree_kernel_factor), dim3(gpu_opt.tree_kernel_threads), 0, 0, thrust::raw_pointer_cast(errd.data()), thrust::raw_pointer_cast(children.data()), thrust::raw_pointer_cast(points.data()), thrust::raw_pointer_cast(points.data() + num_nodes + 1), num_nodes, num_points); hipLaunchKernelGGL(( tsnecuda::bh::ClearKernel2), dim3(num_blocks), dim3(1024), 0, 0, thrust::raw_pointer_cast(cell_starts.data()), thrust::raw_pointer_cast(cell_mass.data()), num_nodes); GpuErrorCheck(hipDeviceSynchronize()); }
4c2ec7351ffe0d37a92362d7ba5a42bbba0853be.cu
/* Builds a quad tree for computing Barnes-Hut approximation of t-SNE repulsive forces. */ #include "include/kernels/tree_builder.h" /******************************************************************************/ /*** build tree ***************************************************************/ /******************************************************************************/ __global__ void tsnecuda::bh::ClearKernel1(volatile int * __restrict__ children, const int num_nodes, const int num_points) { register int k, inc, top, bottom; top = 4 * num_nodes; bottom = 4 * num_points; inc = blockDim.x * gridDim.x; k = (bottom & (-32)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size if (k < bottom) k += inc; // iterate over all cells assigned to thread while (k < top) { children[k] = -1; k += inc; } } __global__ void tsnecuda::bh::TreeBuildingKernel(volatile int * __restrict__ errd, volatile int * __restrict__ children, volatile float * __restrict__ x_pos_device, volatile float * __restrict__ y_pos_device, const int num_nodes, const int num_points) { register int i, j, depth, localmaxdepth, skip, inc; register float x, y, r; register float px, py; register float dx, dy; register int ch, n, cell, locked, patch; register float radius, rootx, rooty; // cache root data radius = radiusd; rootx = x_pos_device[num_nodes]; rooty = y_pos_device[num_nodes]; localmaxdepth = 1; skip = 1; inc = blockDim.x * gridDim.x; i = threadIdx.x + blockIdx.x * blockDim.x; // iterate over all bodies assigned to thread while (i < num_points) { if (skip != 0) { // new body, so start traversing at root skip = 0; px = x_pos_device[i]; py = y_pos_device[i]; n = num_nodes; depth = 1; r = radius * 0.5f; dx = dy = -r; j = 0; // determine which child to follow if (rootx < px) {j = 1; dx = r;} if (rooty < py) {j |= 2; dy = r;} x = rootx + dx; y = rooty + dy; } // follow path to leaf cell ch = children[n*4+j]; while (ch >= num_points) { n = ch; depth++; r *= 0.5f; dx = dy = -r; j = 0; // determine which child to follow if (x < px) {j = 1; dx = r;} if (y < py) {j |= 2; dy = r;} x += dx; y += dy; ch = children[n*4+j]; } if (ch != -2) { // skip if child pointer is locked and try again later locked = n*4+j; if (ch == -1) { if (-1 == atomicCAS((int *)&children[locked], -1, i)) { // if null, just insert the new body localmaxdepth = max(depth, localmaxdepth); i += inc; // move on to next body skip = 1; } } else { // there already is a body in this position if (ch == atomicCAS((int *)&children[locked], ch, -2)) { // try to lock patch = -1; // create new cell(s) and insert the old and new body do { depth++; cell = atomicSub((int *)&bottomd, 1) - 1; if (cell <= num_points) { *errd = 1; bottomd = num_nodes; } if (patch != -1) { children[n*4+j] = cell; } patch = max(patch, cell); j = 0; if (x < x_pos_device[ch]) j = 1; if (y < y_pos_device[ch]) j |= 2; children[cell*4+j] = ch; n = cell; r *= 0.5f; dx = dy = -r; j = 0; if (x < px) {j = 1; dx = r;} if (y < py) {j |= 2; dy = r;} x += dx; y += dy; ch = children[n*4+j]; // repeat until the two bodies are different children } while (ch >= 0 && r > 1e-10); // add radius check because bodies that are very close together can cause this to fail... if points are too close together it will exceed the max depth of the tree children[n*4+j] = i; localmaxdepth = max(depth, localmaxdepth); i += inc; // move on to next body skip = 2; } } } __threadfence(); if (skip == 2) { children[locked] = patch; } } // record maximum tree depth atomicMax((int *)&maxdepthd, localmaxdepth); } __global__ void tsnecuda::bh::ClearKernel2(volatile int * __restrict__ cell_starts, volatile float * __restrict__ cell_mass, const int num_nodes) { register int k, inc, bottom; bottom = bottomd; inc = blockDim.x * gridDim.x; k = (bottom & (-32)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size if (k < bottom) k += inc; // iterate over all cells assigned to thread while (k < num_nodes) { cell_mass[k] = -1.0f; cell_starts[k] = -1; k += inc; } } void tsnecuda::bh::BuildTree(tsnecuda::GpuOptions &gpu_opt, thrust::device_vector<int> &errd, thrust::device_vector<int> &children, thrust::device_vector<int> &cell_starts, thrust::device_vector<float> &cell_mass, thrust::device_vector<float> &points, const int num_nodes, const int num_points, const int num_blocks) { tsnecuda::bh::ClearKernel1<<<num_blocks, 1024>>>(thrust::raw_pointer_cast(children.data()), num_nodes, num_points); tsnecuda::bh::TreeBuildingKernel<<<num_blocks * gpu_opt.tree_kernel_factor, gpu_opt.tree_kernel_threads>>>( thrust::raw_pointer_cast(errd.data()), thrust::raw_pointer_cast(children.data()), thrust::raw_pointer_cast(points.data()), thrust::raw_pointer_cast(points.data() + num_nodes + 1), num_nodes, num_points); tsnecuda::bh::ClearKernel2<<<num_blocks, 1024>>>(thrust::raw_pointer_cast(cell_starts.data()), thrust::raw_pointer_cast(cell_mass.data()), num_nodes); GpuErrorCheck(cudaDeviceSynchronize()); }
ff72e2aa412e639cb857a25809dd8c3b1c2adf7d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) Microsoft Corporation. Licensed under the MIT License. */ /* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "core/providers/cuda/cu_inc/common.cuh" #include "contrib_ops/cuda/bert/relative_attn_bias_impl.h" namespace onnxruntime { namespace contrib { namespace cuda { using namespace onnxruntime::cuda; template<typename T> __global__ void buildRelativeAttentionBias(T* relative_attention_bias, const T* relative_attention_bias_table, const int head_num, const int seq_len, const int num_bucket, const bool is_bidirectional, const int max_distance) { const int head_id = blockIdx.x; for (int seq_id = threadIdx.x; seq_id < seq_len * seq_len; seq_id += blockDim.x * gridDim.y) { int row_id = seq_id / seq_len; int col_id = seq_id % seq_len; int relative_position = col_id - row_id; int relative_buckets = 0; int tmp_num_bucket = num_bucket; if (is_bidirectional) { tmp_num_bucket /= 2; if (relative_position > 0) { relative_buckets += tmp_num_bucket; } else { relative_position *= -1; } } else { if (relative_position > 0) { relative_position = 0; } else { relative_position *= -1; } } int max_exact = tmp_num_bucket / 2; bool is_small = relative_position < max_exact; int relative_position_if_large = max_exact + (int)(logf(relative_position * 1.0f / max_exact) / logf((float)max_distance / max_exact) * (tmp_num_bucket - max_exact)); relative_position_if_large = min(relative_position_if_large, tmp_num_bucket - 1); relative_buckets += is_small ? relative_position : relative_position_if_large; relative_attention_bias[head_id * seq_len * seq_len + seq_id] = relative_attention_bias_table[head_id * num_bucket + relative_buckets]; } } template <typename T> Status LaunchRelPosAttnBiasKernel( hipStream_t stream, T* output, const T* bias_table, const int num_heads, const int seq_len, const int num_bucket, const int max_distance, const bool is_bidirectional, const int max_threads_per_block) { const int squared_sq_len = seq_len * seq_len; if (squared_sq_len <= max_threads_per_block) { dim3 grid(num_heads); dim3 block(squared_sq_len); hipLaunchKernelGGL(( buildRelativeAttentionBias), dim3(grid), dim3(block), 0, stream, output, bias_table, num_heads, seq_len, num_bucket, is_bidirectional, max_distance); return CUDA_CALL(hipGetLastError()); } else if (seq_len >= 128 && seq_len <= 384) { dim3 grid(num_heads, seq_len); dim3 block(seq_len); hipLaunchKernelGGL(( buildRelativeAttentionBias), dim3(grid), dim3(block), 0, stream, output, bias_table, num_heads, seq_len, num_bucket, is_bidirectional, max_distance); return CUDA_CALL(hipGetLastError()); } else { int blockSize = max_threads_per_block; const int grid_y_Size = (squared_sq_len + blockSize - 1) / blockSize; dim3 grid(num_heads, grid_y_Size); dim3 block(blockSize); hipLaunchKernelGGL(( buildRelativeAttentionBias), dim3(grid), dim3(block), 0, stream, output, bias_table, num_heads, seq_len, num_bucket, is_bidirectional, max_distance); return CUDA_CALL(hipGetLastError()); } } template Status LaunchRelPosAttnBiasKernel<float>(hipStream_t stream, float* output, const float* bias_table, const int num_heads, const int seq_len, const int num_bucket, const int max_distance, const bool is_bidirectional, const int max_threads_per_block); template Status LaunchRelPosAttnBiasKernel<half>(hipStream_t stream, half* output, const half* bias_table, const int num_heads, const int seq_len, const int num_bucket, const int max_distance, const bool is_bidirectional, const int max_threads_per_block); } // namespace cuda } // namespace contrib } // namespace onnxruntime
ff72e2aa412e639cb857a25809dd8c3b1c2adf7d.cu
/* Copyright (c) Microsoft Corporation. Licensed under the MIT License. */ /* * Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "core/providers/cuda/cu_inc/common.cuh" #include "contrib_ops/cuda/bert/relative_attn_bias_impl.h" namespace onnxruntime { namespace contrib { namespace cuda { using namespace onnxruntime::cuda; template<typename T> __global__ void buildRelativeAttentionBias(T* relative_attention_bias, const T* relative_attention_bias_table, const int head_num, const int seq_len, const int num_bucket, const bool is_bidirectional, const int max_distance) { const int head_id = blockIdx.x; for (int seq_id = threadIdx.x; seq_id < seq_len * seq_len; seq_id += blockDim.x * gridDim.y) { int row_id = seq_id / seq_len; int col_id = seq_id % seq_len; int relative_position = col_id - row_id; int relative_buckets = 0; int tmp_num_bucket = num_bucket; if (is_bidirectional) { tmp_num_bucket /= 2; if (relative_position > 0) { relative_buckets += tmp_num_bucket; } else { relative_position *= -1; } } else { if (relative_position > 0) { relative_position = 0; } else { relative_position *= -1; } } int max_exact = tmp_num_bucket / 2; bool is_small = relative_position < max_exact; int relative_position_if_large = max_exact + (int)(logf(relative_position * 1.0f / max_exact) / logf((float)max_distance / max_exact) * (tmp_num_bucket - max_exact)); relative_position_if_large = min(relative_position_if_large, tmp_num_bucket - 1); relative_buckets += is_small ? relative_position : relative_position_if_large; relative_attention_bias[head_id * seq_len * seq_len + seq_id] = relative_attention_bias_table[head_id * num_bucket + relative_buckets]; } } template <typename T> Status LaunchRelPosAttnBiasKernel( cudaStream_t stream, T* output, const T* bias_table, const int num_heads, const int seq_len, const int num_bucket, const int max_distance, const bool is_bidirectional, const int max_threads_per_block) { const int squared_sq_len = seq_len * seq_len; if (squared_sq_len <= max_threads_per_block) { dim3 grid(num_heads); dim3 block(squared_sq_len); buildRelativeAttentionBias<<<grid, block, 0, stream>>>(output, bias_table, num_heads, seq_len, num_bucket, is_bidirectional, max_distance); return CUDA_CALL(cudaGetLastError()); } else if (seq_len >= 128 && seq_len <= 384) { dim3 grid(num_heads, seq_len); dim3 block(seq_len); buildRelativeAttentionBias<<<grid, block, 0, stream>>>(output, bias_table, num_heads, seq_len, num_bucket, is_bidirectional, max_distance); return CUDA_CALL(cudaGetLastError()); } else { int blockSize = max_threads_per_block; const int grid_y_Size = (squared_sq_len + blockSize - 1) / blockSize; dim3 grid(num_heads, grid_y_Size); dim3 block(blockSize); buildRelativeAttentionBias<<<grid, block, 0, stream>>>(output, bias_table, num_heads, seq_len, num_bucket, is_bidirectional, max_distance); return CUDA_CALL(cudaGetLastError()); } } template Status LaunchRelPosAttnBiasKernel<float>(cudaStream_t stream, float* output, const float* bias_table, const int num_heads, const int seq_len, const int num_bucket, const int max_distance, const bool is_bidirectional, const int max_threads_per_block); template Status LaunchRelPosAttnBiasKernel<half>(cudaStream_t stream, half* output, const half* bias_table, const int num_heads, const int seq_len, const int num_bucket, const int max_distance, const bool is_bidirectional, const int max_threads_per_block); } // namespace cuda } // namespace contrib } // namespace onnxruntime
9b67ad7fed3e7a5941cb547dc1cd46970ee4dc9c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include <iostream> using namespace std; long int array[100]; int N; __device__ /*Newton method to caculate zero point y=x*x-a */ double mysqrt(int s){ //double s=1.0; double before=1.0,after; while(1){ after=(before*before+s)/(2.0*before); if(fabs(before-after)<0.0000001) break; before=after; } return before; } __device__ double power1(int n) { double temp=1.0; double a5=mysqrt(5); for(int i=0; i<n; ++i) temp=temp*(1.0+a5)/2.0;//sqrt return temp; } __device__ double power2(int n) { double temp=1.0; double a5=mysqrt(5); for(int i=0; i<n; ++i) temp=temp*(1.0-a5)/2.0;//sqrt return temp; } __global__ void fib(long int *C, int N) { //Gridx //int xIndex = threadIdx.x + blockIdx.x * blockDim.x; //Gridy //int yIndex = threadIdx.y + blockIdx.y * blockDim.y; //cout<<"threadIdx.x"=<<threadIdx.x<<" xIndex="<<xIndex<<endl; int i = (blockIdx.x * gridDim.x + blockIdx.y) * blockDim.x * blockDim.y + threadIdx.x * blockDim.x + threadIdx.y; if (i < N) { C[i] = (long int)((power1(i+1)-power2(i+1))/mysqrt(5));//sqrt } } int main(int argc, char **agrv) { long int *device_C; scanf("%d",&N); // N=atoi(agrv[1]); struct timeval tpstart,tpend; struct timeval kernel_begin,kernel_end; // gettimeofday(&tpstart,NULL); //GPU hipMalloc((void**)&device_C, N * sizeof(long int)); //blockthread dim3 threadsPerBlock(4, 4); //block dim3 blocksPerGrid(1,(N + 15) / 16,1); gettimeofday(&kernel_begin,NULL); hipLaunchKernelGGL(( fib) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, device_C,N); gettimeofday(&kernel_end,NULL); double kenerl_time=1000000*(kernel_end.tv_sec-kernel_begin.tv_sec)+kernel_end.tv_usec-kernel_begin.tv_usec; cout<<"this kernel operation consumes "<<kenerl_time<<"us\n"; //CPU hipMemcpy(array, device_C, N * sizeof(long int), hipMemcpyDeviceToHost); for(int i = 0 ; i<N ; i++) { if(i==0) printf("%ld",array[i]); else printf(" %ld",array[i]); } putchar('\n'); // gettimeofday(&tpend,NULL); double timeuse=1000000*(tpend.tv_sec-tpstart.tv_sec)+tpend.tv_usec-tpstart.tv_usec; cout<<"this operation consumes "<<timeuse/1000<<"ms\n"; return 0; } /***** End *****/
9b67ad7fed3e7a5941cb547dc1cd46970ee4dc9c.cu
#include <stdio.h> #include <math.h> #include <sys/time.h> #include <cuda_runtime.h> #include <iostream> using namespace std; long int array[100]; int N; __device__ /*Newton method to caculate zero point y=x*x-a */ double mysqrt(int s){ //double s=1.0; double before=1.0,after; while(1){ after=(before*before+s)/(2.0*before); if(fabs(before-after)<0.0000001) break; before=after; } return before; } __device__ double power1(int n) { double temp=1.0; double a5=mysqrt(5); for(int i=0; i<n; ++i) temp=temp*(1.0+a5)/2.0;//sqrt return temp; } __device__ double power2(int n) { double temp=1.0; double a5=mysqrt(5); for(int i=0; i<n; ++i) temp=temp*(1.0-a5)/2.0;//sqrt return temp; } __global__ void fib(long int *C, int N) { //Grid中x方向上的索引 //int xIndex = threadIdx.x + blockIdx.x * blockDim.x; //Grid中y方向上的索引 //int yIndex = threadIdx.y + blockIdx.y * blockDim.y; //cout<<"threadIdx.x"=<<threadIdx.x<<" xIndex="<<xIndex<<endl; int i = (blockIdx.x * gridDim.x + blockIdx.y) * blockDim.x * blockDim.y + threadIdx.x * blockDim.x + threadIdx.y; if (i < N) { C[i] = (long int)((power1(i+1)-power2(i+1))/mysqrt(5));//sqrt } } int main(int argc, char **agrv) { long int *device_C; scanf("%d",&N); // N=atoi(agrv[1]); struct timeval tpstart,tpend; struct timeval kernel_begin,kernel_end; //开始计时 gettimeofday(&tpstart,NULL); //在GPU中开辟内存 cudaMalloc((void**)&device_C, N * sizeof(long int)); //定义block中thread的分布 dim3 threadsPerBlock(4, 4); //根据输入图片的宽高定义block的大小 dim3 blocksPerGrid(1,(N + 15) / 16,1); gettimeofday(&kernel_begin,NULL); fib <<<blocksPerGrid, threadsPerBlock >>>(device_C,N); gettimeofday(&kernel_end,NULL); double kenerl_time=1000000*(kernel_end.tv_sec-kernel_begin.tv_sec)+kernel_end.tv_usec-kernel_begin.tv_usec; cout<<"this kernel operation consumes "<<kenerl_time<<"us\n"; //将结果传回CPU cudaMemcpy(array, device_C, N * sizeof(long int), cudaMemcpyDeviceToHost); for(int i = 0 ; i<N ; i++) { if(i==0) printf("%ld",array[i]); else printf(" %ld",array[i]); } putchar('\n'); //结束计时 gettimeofday(&tpend,NULL); double timeuse=1000000*(tpend.tv_sec-tpstart.tv_sec)+tpend.tv_usec-tpstart.tv_usec; cout<<"this operation consumes "<<timeuse/1000<<"ms\n"; return 0; } /***** End *****/
984b077e276320a47f15e845ea3e3eff7ebc8739.hip
// !!! This is a file automatically generated by hipify!!! /* This file contains routines for Parallel vector operations. */ #define PETSC_SKIP_SPINLOCK #define PETSC_SKIP_CXX_COMPLEX_FIX #include <petscconf.h> #include <../src/vec/vec/impls/mpi/pvecimpl.h> /*I "petscvec.h" I*/ #include <petsc/private/cudavecimpl.h> /*MC VECCUDA - VECCUDA = "cuda" - A VECSEQCUDA on a single-process communicator, and VECMPICUDA otherwise. Options Database Keys: . -vec_type cuda - sets the vector type to VECCUDA during a call to VecSetFromOptions() Level: beginner .seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECSEQCUDA, VECMPICUDA, VECSTANDARD, VecType, VecCreateMPI(), VecSetPinnedMemoryMin() M*/ PetscErrorCode VecDestroy_MPICUDA(Vec v) { Vec_MPI *vecmpi = (Vec_MPI*)v->data; Vec_CUDA *veccuda; PetscErrorCode ierr; hipError_t err; PetscFunctionBegin; if (v->spptr) { veccuda = (Vec_CUDA*)v->spptr; if (veccuda->GPUarray_allocated) { err = hipFree(((Vec_CUDA*)v->spptr)->GPUarray_allocated);CHKERRCUDA(err); veccuda->GPUarray_allocated = NULL; } if (veccuda->stream) { err = hipStreamDestroy(((Vec_CUDA*)v->spptr)->stream);CHKERRCUDA(err); } if (v->pinned_memory) { ierr = PetscMallocSetCUDAHost();CHKERRQ(ierr); ierr = PetscFree(vecmpi->array_allocated);CHKERRQ(ierr); ierr = PetscMallocResetCUDAHost();CHKERRQ(ierr); v->pinned_memory = PETSC_FALSE; } ierr = PetscFree(v->spptr);CHKERRQ(ierr); } ierr = VecDestroy_MPI(v);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode VecNorm_MPICUDA(Vec xin,NormType type,PetscReal *z) { PetscReal sum,work = 0.0; PetscErrorCode ierr; PetscFunctionBegin; if (type == NORM_2 || type == NORM_FROBENIUS) { ierr = VecNorm_SeqCUDA(xin,NORM_2,&work); work *= work; ierr = MPIU_Allreduce(&work,&sum,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); *z = PetscSqrtReal(sum); } else if (type == NORM_1) { /* Find the local part */ ierr = VecNorm_SeqCUDA(xin,NORM_1,&work);CHKERRQ(ierr); /* Find the global max */ ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); } else if (type == NORM_INFINITY) { /* Find the local max */ ierr = VecNorm_SeqCUDA(xin,NORM_INFINITY,&work);CHKERRQ(ierr); /* Find the global max */ ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); } else if (type == NORM_1_AND_2) { PetscReal temp[2]; ierr = VecNorm_SeqCUDA(xin,NORM_1,temp);CHKERRQ(ierr); ierr = VecNorm_SeqCUDA(xin,NORM_2,temp+1);CHKERRQ(ierr); temp[1] = temp[1]*temp[1]; ierr = MPIU_Allreduce(temp,z,2,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); z[1] = PetscSqrtReal(z[1]); } PetscFunctionReturn(0); } PetscErrorCode VecDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z) { PetscScalar sum,work; PetscErrorCode ierr; PetscFunctionBegin; ierr = VecDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr); ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); *z = sum; PetscFunctionReturn(0); } PetscErrorCode VecTDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z) { PetscScalar sum,work; PetscErrorCode ierr; PetscFunctionBegin; ierr = VecTDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr); ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); *z = sum; PetscFunctionReturn(0); } PetscErrorCode VecMDot_MPICUDA(Vec xin,PetscInt nv,const Vec y[],PetscScalar *z) { PetscScalar awork[128],*work = awork; PetscErrorCode ierr; PetscFunctionBegin; if (nv > 128) { ierr = PetscMalloc1(nv,&work);CHKERRQ(ierr); } ierr = VecMDot_SeqCUDA(xin,nv,y,work);CHKERRQ(ierr); ierr = MPIU_Allreduce(work,z,nv,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); if (nv > 128) { ierr = PetscFree(work);CHKERRQ(ierr); } PetscFunctionReturn(0); } /*MC VECMPICUDA - VECMPICUDA = "mpicuda" - The basic parallel vector, modified to use CUDA Options Database Keys: . -vec_type mpicuda - sets the vector type to VECMPICUDA during a call to VecSetFromOptions() Level: beginner .seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECMPI, VecType, VecCreateMPI(), VecSetPinnedMemoryMin() M*/ PetscErrorCode VecDuplicate_MPICUDA(Vec win,Vec *v) { PetscErrorCode ierr; Vec_MPI *vw,*w = (Vec_MPI*)win->data; PetscScalar *array; PetscFunctionBegin; ierr = VecCreate(PetscObjectComm((PetscObject)win),v);CHKERRQ(ierr); ierr = PetscLayoutReference(win->map,&(*v)->map);CHKERRQ(ierr); ierr = VecCreate_MPICUDA_Private(*v,PETSC_TRUE,w->nghost,0);CHKERRQ(ierr); vw = (Vec_MPI*)(*v)->data; ierr = PetscMemcpy((*v)->ops,win->ops,sizeof(struct _VecOps));CHKERRQ(ierr); /* save local representation of the parallel vector (and scatter) if it exists */ if (w->localrep) { ierr = VecGetArray(*v,&array);CHKERRQ(ierr); ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,win->map->n+w->nghost,array,&vw->localrep);CHKERRQ(ierr); ierr = PetscMemcpy(vw->localrep->ops,w->localrep->ops,sizeof(struct _VecOps));CHKERRQ(ierr); ierr = VecRestoreArray(*v,&array);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)*v,(PetscObject)vw->localrep);CHKERRQ(ierr); vw->localupdate = w->localupdate; if (vw->localupdate) { ierr = PetscObjectReference((PetscObject)vw->localupdate);CHKERRQ(ierr); } } /* New vector should inherit stashing property of parent */ (*v)->stash.donotstash = win->stash.donotstash; (*v)->stash.ignorenegidx = win->stash.ignorenegidx; /* change type_name appropriately */ ierr = VecCUDAAllocateCheck(*v);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)(*v),VECMPICUDA);CHKERRQ(ierr); ierr = PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*v))->olist);CHKERRQ(ierr); ierr = PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*v))->qlist);CHKERRQ(ierr); (*v)->map->bs = PetscAbs(win->map->bs); (*v)->bstash.bs = win->bstash.bs; PetscFunctionReturn(0); } PetscErrorCode VecDotNorm2_MPICUDA(Vec s,Vec t,PetscScalar *dp,PetscScalar *nm) { PetscErrorCode ierr; PetscScalar work[2],sum[2]; PetscFunctionBegin; ierr = VecDotNorm2_SeqCUDA(s,t,work,work+1);CHKERRQ(ierr); ierr = MPIU_Allreduce(&work,&sum,2,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)s));CHKERRMPI(ierr); *dp = sum[0]; *nm = sum[1]; PetscFunctionReturn(0); } PetscErrorCode VecCreate_MPICUDA(Vec vv) { PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); ierr = PetscLayoutSetUp(vv->map);CHKERRQ(ierr); ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr); ierr = VecCreate_MPICUDA_Private(vv,PETSC_FALSE,0,((Vec_CUDA*)vv->spptr)->GPUarray_allocated);CHKERRQ(ierr); ierr = VecCUDAAllocateCheckHost(vv);CHKERRQ(ierr); ierr = VecSet(vv,0.0);CHKERRQ(ierr); ierr = VecSet_Seq(vv,0.0);CHKERRQ(ierr); vv->offloadmask = PETSC_OFFLOAD_BOTH; PetscFunctionReturn(0); } PetscErrorCode VecCreate_CUDA(Vec v) { PetscErrorCode ierr; PetscMPIInt size; PetscFunctionBegin; ierr = MPI_Comm_size(PetscObjectComm((PetscObject)v),&size);CHKERRMPI(ierr); if (size == 1) { ierr = VecSetType(v,VECSEQCUDA);CHKERRQ(ierr); } else { ierr = VecSetType(v,VECMPICUDA);CHKERRQ(ierr); } PetscFunctionReturn(0); } /*@C VecCreateMPICUDAWithArray - Creates a parallel, array-style vector, where the user provides the GPU array space to store the vector values. Collective Input Parameters: + comm - the MPI communicator to use . bs - block size, same meaning as VecSetBlockSize() . n - local vector length, cannot be PETSC_DECIDE . N - global vector length (or PETSC_DECIDE to have calculated) - array - the user provided GPU array to store the vector values Output Parameter: . vv - the vector Notes: Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the same type as an existing vector. If the user-provided array is NULL, then VecCUDAPlaceArray() can be used at a later stage to SET the array for storing the vector values. PETSc does NOT free the array when the vector is destroyed via VecDestroy(). The user should not free the array until the vector is destroyed. Level: intermediate .seealso: VecCreateSeqCUDAWithArray(), VecCreateMPIWithArray(), VecCreateSeqWithArray(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(), VecCreateMPI(), VecCreateGhostWithArray(), VecPlaceArray() @*/ PetscErrorCode VecCreateMPICUDAWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar array[],Vec *vv) { PetscErrorCode ierr; PetscFunctionBegin; if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size of vector"); ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); ierr = VecCreate(comm,vv);CHKERRQ(ierr); ierr = VecSetSizes(*vv,n,N);CHKERRQ(ierr); ierr = VecSetBlockSize(*vv,bs);CHKERRQ(ierr); ierr = VecCreate_MPICUDA_Private(*vv,PETSC_FALSE,0,array);CHKERRQ(ierr); PetscFunctionReturn(0); } /*@C VecCreateMPICUDAWithArrays - Creates a parallel, array-style vector, where the user provides the GPU array space to store the vector values. Collective Input Parameters: + comm - the MPI communicator to use . bs - block size, same meaning as VecSetBlockSize() . n - local vector length, cannot be PETSC_DECIDE . N - global vector length (or PETSC_DECIDE to have calculated) - cpuarray - the user provided CPU array to store the vector values - gpuarray - the user provided GPU array to store the vector values Output Parameter: . vv - the vector Notes: If both cpuarray and gpuarray are provided, the caller must ensure that the provided arrays have identical values. Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the same type as an existing vector. PETSc does NOT free the provided arrays when the vector is destroyed via VecDestroy(). The user should not free the array until the vector is destroyed. Level: intermediate .seealso: VecCreateSeqCUDAWithArrays(), VecCreateMPIWithArray(), VecCreateSeqWithArray(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(), VecCreateMPI(), VecCreateGhostWithArray(), VecCUDAPlaceArray(), VecPlaceArray(), VecCUDAAllocateCheckHost() @*/ PetscErrorCode VecCreateMPICUDAWithArrays(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar cpuarray[],const PetscScalar gpuarray[],Vec *vv) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCreateMPICUDAWithArray(comm,bs,n,N,gpuarray,vv);CHKERRQ(ierr); if (cpuarray && gpuarray) { Vec_MPI *s = (Vec_MPI*)((*vv)->data); s->array = (PetscScalar*)cpuarray; (*vv)->offloadmask = PETSC_OFFLOAD_BOTH; } else if (cpuarray) { Vec_MPI *s = (Vec_MPI*)((*vv)->data); s->array = (PetscScalar*)cpuarray; (*vv)->offloadmask = PETSC_OFFLOAD_CPU; } else if (gpuarray) { (*vv)->offloadmask = PETSC_OFFLOAD_GPU; } else { (*vv)->offloadmask = PETSC_OFFLOAD_UNALLOCATED; } PetscFunctionReturn(0); } PetscErrorCode VecMax_MPICUDA(Vec xin,PetscInt *idx,PetscReal *z) { PetscErrorCode ierr; PetscReal work; PetscFunctionBegin; ierr = VecMax_SeqCUDA(xin,idx,&work);CHKERRQ(ierr); if (!idx) { ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); } else { PetscReal work2[2],z2[2]; PetscInt rstart; rstart = xin->map->rstart; work2[0] = work; work2[1] = *idx + rstart; ierr = MPIU_Allreduce(work2,z2,2,MPIU_REAL,MPIU_MAXINDEX_OP,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); *z = z2[0]; *idx = (PetscInt)z2[1]; } PetscFunctionReturn(0); } PetscErrorCode VecMin_MPICUDA(Vec xin,PetscInt *idx,PetscReal *z) { PetscErrorCode ierr; PetscReal work; PetscFunctionBegin; ierr = VecMin_SeqCUDA(xin,idx,&work);CHKERRQ(ierr); if (!idx) { ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MIN,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); } else { PetscReal work2[2],z2[2]; PetscInt rstart; ierr = VecGetOwnershipRange(xin,&rstart,NULL);CHKERRQ(ierr); work2[0] = work; work2[1] = *idx + rstart; ierr = MPIU_Allreduce(work2,z2,2,MPIU_REAL,MPIU_MININDEX_OP,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); *z = z2[0]; *idx = (PetscInt)z2[1]; } PetscFunctionReturn(0); } PetscErrorCode VecBindToCPU_MPICUDA(Vec V,PetscBool pin) { PetscErrorCode ierr; PetscFunctionBegin; V->boundtocpu = pin; if (pin) { ierr = VecCUDACopyFromGPU(V);CHKERRQ(ierr); V->offloadmask = PETSC_OFFLOAD_CPU; /* since the CPU code will likely change values in the vector */ V->ops->dotnorm2 = NULL; V->ops->waxpy = VecWAXPY_Seq; V->ops->dot = VecDot_MPI; V->ops->mdot = VecMDot_MPI; V->ops->tdot = VecTDot_MPI; V->ops->norm = VecNorm_MPI; V->ops->scale = VecScale_Seq; V->ops->copy = VecCopy_Seq; V->ops->set = VecSet_Seq; V->ops->swap = VecSwap_Seq; V->ops->axpy = VecAXPY_Seq; V->ops->axpby = VecAXPBY_Seq; V->ops->maxpy = VecMAXPY_Seq; V->ops->aypx = VecAYPX_Seq; V->ops->axpbypcz = VecAXPBYPCZ_Seq; V->ops->pointwisemult = VecPointwiseMult_Seq; V->ops->setrandom = VecSetRandom_Seq; V->ops->placearray = VecPlaceArray_Seq; V->ops->replacearray = VecReplaceArray_SeqCUDA; V->ops->resetarray = VecResetArray_Seq; V->ops->dot_local = VecDot_Seq; V->ops->tdot_local = VecTDot_Seq; V->ops->norm_local = VecNorm_Seq; V->ops->mdot_local = VecMDot_Seq; V->ops->pointwisedivide = VecPointwiseDivide_Seq; V->ops->getlocalvector = NULL; V->ops->restorelocalvector = NULL; V->ops->getlocalvectorread = NULL; V->ops->restorelocalvectorread = NULL; V->ops->getarraywrite = NULL; V->ops->max = VecMax_MPI; V->ops->min = VecMin_MPI; /* default random number generator */ ierr = PetscFree(V->defaultrandtype);CHKERRQ(ierr); ierr = PetscStrallocpy(PETSCRANDER48,&V->defaultrandtype);CHKERRQ(ierr); } else { V->ops->dotnorm2 = VecDotNorm2_MPICUDA; V->ops->waxpy = VecWAXPY_SeqCUDA; V->ops->duplicate = VecDuplicate_MPICUDA; V->ops->dot = VecDot_MPICUDA; V->ops->mdot = VecMDot_MPICUDA; V->ops->tdot = VecTDot_MPICUDA; V->ops->norm = VecNorm_MPICUDA; V->ops->scale = VecScale_SeqCUDA; V->ops->copy = VecCopy_SeqCUDA; V->ops->set = VecSet_SeqCUDA; V->ops->swap = VecSwap_SeqCUDA; V->ops->axpy = VecAXPY_SeqCUDA; V->ops->axpby = VecAXPBY_SeqCUDA; V->ops->maxpy = VecMAXPY_SeqCUDA; V->ops->aypx = VecAYPX_SeqCUDA; V->ops->axpbypcz = VecAXPBYPCZ_SeqCUDA; V->ops->pointwisemult = VecPointwiseMult_SeqCUDA; V->ops->setrandom = VecSetRandom_SeqCUDA; V->ops->placearray = VecPlaceArray_SeqCUDA; V->ops->replacearray = VecReplaceArray_SeqCUDA; V->ops->resetarray = VecResetArray_SeqCUDA; V->ops->dot_local = VecDot_SeqCUDA; V->ops->tdot_local = VecTDot_SeqCUDA; V->ops->norm_local = VecNorm_SeqCUDA; V->ops->mdot_local = VecMDot_SeqCUDA; V->ops->destroy = VecDestroy_MPICUDA; V->ops->pointwisedivide = VecPointwiseDivide_SeqCUDA; V->ops->getlocalvector = VecGetLocalVector_SeqCUDA; V->ops->restorelocalvector = VecRestoreLocalVector_SeqCUDA; V->ops->getlocalvectorread = VecGetLocalVector_SeqCUDA; V->ops->restorelocalvectorread = VecRestoreLocalVector_SeqCUDA; V->ops->getarraywrite = VecGetArrayWrite_SeqCUDA; V->ops->getarray = VecGetArray_SeqCUDA; V->ops->restorearray = VecRestoreArray_SeqCUDA; V->ops->getarrayandmemtype = VecGetArrayAndMemType_SeqCUDA; V->ops->restorearrayandmemtype = VecRestoreArrayAndMemType_SeqCUDA; V->ops->max = VecMax_MPICUDA; V->ops->min = VecMin_MPICUDA; /* default random number generator */ ierr = PetscFree(V->defaultrandtype);CHKERRQ(ierr); ierr = PetscStrallocpy(PETSCCURAND,&V->defaultrandtype);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode VecCreate_MPICUDA_Private(Vec vv,PetscBool alloc,PetscInt nghost,const PetscScalar array[]) { PetscErrorCode ierr; Vec_CUDA *veccuda; PetscFunctionBegin; ierr = VecCreate_MPI_Private(vv,PETSC_FALSE,0,0);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)vv,VECMPICUDA);CHKERRQ(ierr); ierr = VecBindToCPU_MPICUDA(vv,PETSC_FALSE);CHKERRQ(ierr); vv->ops->bindtocpu = VecBindToCPU_MPICUDA; /* Later, functions check for the Vec_CUDA structure existence, so do not create it without array */ if (alloc && !array) { ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr); ierr = VecCUDAAllocateCheckHost(vv);CHKERRQ(ierr); ierr = VecSet(vv,0.0);CHKERRQ(ierr); ierr = VecSet_Seq(vv,0.0);CHKERRQ(ierr); vv->offloadmask = PETSC_OFFLOAD_BOTH; } if (array) { if (!vv->spptr) { PetscReal pinned_memory_min; PetscBool flag; /* Cannot use PetscNew() here because spptr is void* */ ierr = PetscMalloc(sizeof(Vec_CUDA),&vv->spptr);CHKERRQ(ierr); veccuda = (Vec_CUDA*)vv->spptr; veccuda->stream = 0; /* using default stream */ veccuda->GPUarray_allocated = 0; vv->offloadmask = PETSC_OFFLOAD_UNALLOCATED; vv->minimum_bytes_pinned_memory = 0; /* Need to parse command line for minimum size to use for pinned memory allocations on host here. Note: This same code duplicated in VecCreate_SeqCUDA_Private() and VecCUDAAllocateCheck(). Is there a good way to avoid this? */ ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)vv),((PetscObject)vv)->prefix,"VECCUDA Options","Vec");CHKERRQ(ierr); pinned_memory_min = vv->minimum_bytes_pinned_memory; ierr = PetscOptionsReal("-vec_pinned_memory_min","Minimum size (in bytes) for an allocation to use pinned memory on host","VecSetPinnedMemoryMin",pinned_memory_min,&pinned_memory_min,&flag);CHKERRQ(ierr); if (flag) vv->minimum_bytes_pinned_memory = pinned_memory_min; ierr = PetscOptionsEnd();CHKERRQ(ierr); } veccuda = (Vec_CUDA*)vv->spptr; veccuda->GPUarray = (PetscScalar*)array; } PetscFunctionReturn(0); }
984b077e276320a47f15e845ea3e3eff7ebc8739.cu
/* This file contains routines for Parallel vector operations. */ #define PETSC_SKIP_SPINLOCK #define PETSC_SKIP_CXX_COMPLEX_FIX #include <petscconf.h> #include <../src/vec/vec/impls/mpi/pvecimpl.h> /*I "petscvec.h" I*/ #include <petsc/private/cudavecimpl.h> /*MC VECCUDA - VECCUDA = "cuda" - A VECSEQCUDA on a single-process communicator, and VECMPICUDA otherwise. Options Database Keys: . -vec_type cuda - sets the vector type to VECCUDA during a call to VecSetFromOptions() Level: beginner .seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECSEQCUDA, VECMPICUDA, VECSTANDARD, VecType, VecCreateMPI(), VecSetPinnedMemoryMin() M*/ PetscErrorCode VecDestroy_MPICUDA(Vec v) { Vec_MPI *vecmpi = (Vec_MPI*)v->data; Vec_CUDA *veccuda; PetscErrorCode ierr; cudaError_t err; PetscFunctionBegin; if (v->spptr) { veccuda = (Vec_CUDA*)v->spptr; if (veccuda->GPUarray_allocated) { err = cudaFree(((Vec_CUDA*)v->spptr)->GPUarray_allocated);CHKERRCUDA(err); veccuda->GPUarray_allocated = NULL; } if (veccuda->stream) { err = cudaStreamDestroy(((Vec_CUDA*)v->spptr)->stream);CHKERRCUDA(err); } if (v->pinned_memory) { ierr = PetscMallocSetCUDAHost();CHKERRQ(ierr); ierr = PetscFree(vecmpi->array_allocated);CHKERRQ(ierr); ierr = PetscMallocResetCUDAHost();CHKERRQ(ierr); v->pinned_memory = PETSC_FALSE; } ierr = PetscFree(v->spptr);CHKERRQ(ierr); } ierr = VecDestroy_MPI(v);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode VecNorm_MPICUDA(Vec xin,NormType type,PetscReal *z) { PetscReal sum,work = 0.0; PetscErrorCode ierr; PetscFunctionBegin; if (type == NORM_2 || type == NORM_FROBENIUS) { ierr = VecNorm_SeqCUDA(xin,NORM_2,&work); work *= work; ierr = MPIU_Allreduce(&work,&sum,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); *z = PetscSqrtReal(sum); } else if (type == NORM_1) { /* Find the local part */ ierr = VecNorm_SeqCUDA(xin,NORM_1,&work);CHKERRQ(ierr); /* Find the global max */ ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); } else if (type == NORM_INFINITY) { /* Find the local max */ ierr = VecNorm_SeqCUDA(xin,NORM_INFINITY,&work);CHKERRQ(ierr); /* Find the global max */ ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); } else if (type == NORM_1_AND_2) { PetscReal temp[2]; ierr = VecNorm_SeqCUDA(xin,NORM_1,temp);CHKERRQ(ierr); ierr = VecNorm_SeqCUDA(xin,NORM_2,temp+1);CHKERRQ(ierr); temp[1] = temp[1]*temp[1]; ierr = MPIU_Allreduce(temp,z,2,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); z[1] = PetscSqrtReal(z[1]); } PetscFunctionReturn(0); } PetscErrorCode VecDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z) { PetscScalar sum,work; PetscErrorCode ierr; PetscFunctionBegin; ierr = VecDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr); ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); *z = sum; PetscFunctionReturn(0); } PetscErrorCode VecTDot_MPICUDA(Vec xin,Vec yin,PetscScalar *z) { PetscScalar sum,work; PetscErrorCode ierr; PetscFunctionBegin; ierr = VecTDot_SeqCUDA(xin,yin,&work);CHKERRQ(ierr); ierr = MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); *z = sum; PetscFunctionReturn(0); } PetscErrorCode VecMDot_MPICUDA(Vec xin,PetscInt nv,const Vec y[],PetscScalar *z) { PetscScalar awork[128],*work = awork; PetscErrorCode ierr; PetscFunctionBegin; if (nv > 128) { ierr = PetscMalloc1(nv,&work);CHKERRQ(ierr); } ierr = VecMDot_SeqCUDA(xin,nv,y,work);CHKERRQ(ierr); ierr = MPIU_Allreduce(work,z,nv,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); if (nv > 128) { ierr = PetscFree(work);CHKERRQ(ierr); } PetscFunctionReturn(0); } /*MC VECMPICUDA - VECMPICUDA = "mpicuda" - The basic parallel vector, modified to use CUDA Options Database Keys: . -vec_type mpicuda - sets the vector type to VECMPICUDA during a call to VecSetFromOptions() Level: beginner .seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMPIWithArray(), VECMPI, VecType, VecCreateMPI(), VecSetPinnedMemoryMin() M*/ PetscErrorCode VecDuplicate_MPICUDA(Vec win,Vec *v) { PetscErrorCode ierr; Vec_MPI *vw,*w = (Vec_MPI*)win->data; PetscScalar *array; PetscFunctionBegin; ierr = VecCreate(PetscObjectComm((PetscObject)win),v);CHKERRQ(ierr); ierr = PetscLayoutReference(win->map,&(*v)->map);CHKERRQ(ierr); ierr = VecCreate_MPICUDA_Private(*v,PETSC_TRUE,w->nghost,0);CHKERRQ(ierr); vw = (Vec_MPI*)(*v)->data; ierr = PetscMemcpy((*v)->ops,win->ops,sizeof(struct _VecOps));CHKERRQ(ierr); /* save local representation of the parallel vector (and scatter) if it exists */ if (w->localrep) { ierr = VecGetArray(*v,&array);CHKERRQ(ierr); ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,win->map->n+w->nghost,array,&vw->localrep);CHKERRQ(ierr); ierr = PetscMemcpy(vw->localrep->ops,w->localrep->ops,sizeof(struct _VecOps));CHKERRQ(ierr); ierr = VecRestoreArray(*v,&array);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)*v,(PetscObject)vw->localrep);CHKERRQ(ierr); vw->localupdate = w->localupdate; if (vw->localupdate) { ierr = PetscObjectReference((PetscObject)vw->localupdate);CHKERRQ(ierr); } } /* New vector should inherit stashing property of parent */ (*v)->stash.donotstash = win->stash.donotstash; (*v)->stash.ignorenegidx = win->stash.ignorenegidx; /* change type_name appropriately */ ierr = VecCUDAAllocateCheck(*v);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)(*v),VECMPICUDA);CHKERRQ(ierr); ierr = PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*v))->olist);CHKERRQ(ierr); ierr = PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*v))->qlist);CHKERRQ(ierr); (*v)->map->bs = PetscAbs(win->map->bs); (*v)->bstash.bs = win->bstash.bs; PetscFunctionReturn(0); } PetscErrorCode VecDotNorm2_MPICUDA(Vec s,Vec t,PetscScalar *dp,PetscScalar *nm) { PetscErrorCode ierr; PetscScalar work[2],sum[2]; PetscFunctionBegin; ierr = VecDotNorm2_SeqCUDA(s,t,work,work+1);CHKERRQ(ierr); ierr = MPIU_Allreduce(&work,&sum,2,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)s));CHKERRMPI(ierr); *dp = sum[0]; *nm = sum[1]; PetscFunctionReturn(0); } PetscErrorCode VecCreate_MPICUDA(Vec vv) { PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); ierr = PetscLayoutSetUp(vv->map);CHKERRQ(ierr); ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr); ierr = VecCreate_MPICUDA_Private(vv,PETSC_FALSE,0,((Vec_CUDA*)vv->spptr)->GPUarray_allocated);CHKERRQ(ierr); ierr = VecCUDAAllocateCheckHost(vv);CHKERRQ(ierr); ierr = VecSet(vv,0.0);CHKERRQ(ierr); ierr = VecSet_Seq(vv,0.0);CHKERRQ(ierr); vv->offloadmask = PETSC_OFFLOAD_BOTH; PetscFunctionReturn(0); } PetscErrorCode VecCreate_CUDA(Vec v) { PetscErrorCode ierr; PetscMPIInt size; PetscFunctionBegin; ierr = MPI_Comm_size(PetscObjectComm((PetscObject)v),&size);CHKERRMPI(ierr); if (size == 1) { ierr = VecSetType(v,VECSEQCUDA);CHKERRQ(ierr); } else { ierr = VecSetType(v,VECMPICUDA);CHKERRQ(ierr); } PetscFunctionReturn(0); } /*@C VecCreateMPICUDAWithArray - Creates a parallel, array-style vector, where the user provides the GPU array space to store the vector values. Collective Input Parameters: + comm - the MPI communicator to use . bs - block size, same meaning as VecSetBlockSize() . n - local vector length, cannot be PETSC_DECIDE . N - global vector length (or PETSC_DECIDE to have calculated) - array - the user provided GPU array to store the vector values Output Parameter: . vv - the vector Notes: Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the same type as an existing vector. If the user-provided array is NULL, then VecCUDAPlaceArray() can be used at a later stage to SET the array for storing the vector values. PETSc does NOT free the array when the vector is destroyed via VecDestroy(). The user should not free the array until the vector is destroyed. Level: intermediate .seealso: VecCreateSeqCUDAWithArray(), VecCreateMPIWithArray(), VecCreateSeqWithArray(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(), VecCreateMPI(), VecCreateGhostWithArray(), VecPlaceArray() @*/ PetscErrorCode VecCreateMPICUDAWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar array[],Vec *vv) { PetscErrorCode ierr; PetscFunctionBegin; if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size of vector"); ierr = PetscCUDAInitializeCheck();CHKERRQ(ierr); ierr = VecCreate(comm,vv);CHKERRQ(ierr); ierr = VecSetSizes(*vv,n,N);CHKERRQ(ierr); ierr = VecSetBlockSize(*vv,bs);CHKERRQ(ierr); ierr = VecCreate_MPICUDA_Private(*vv,PETSC_FALSE,0,array);CHKERRQ(ierr); PetscFunctionReturn(0); } /*@C VecCreateMPICUDAWithArrays - Creates a parallel, array-style vector, where the user provides the GPU array space to store the vector values. Collective Input Parameters: + comm - the MPI communicator to use . bs - block size, same meaning as VecSetBlockSize() . n - local vector length, cannot be PETSC_DECIDE . N - global vector length (or PETSC_DECIDE to have calculated) - cpuarray - the user provided CPU array to store the vector values - gpuarray - the user provided GPU array to store the vector values Output Parameter: . vv - the vector Notes: If both cpuarray and gpuarray are provided, the caller must ensure that the provided arrays have identical values. Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the same type as an existing vector. PETSc does NOT free the provided arrays when the vector is destroyed via VecDestroy(). The user should not free the array until the vector is destroyed. Level: intermediate .seealso: VecCreateSeqCUDAWithArrays(), VecCreateMPIWithArray(), VecCreateSeqWithArray(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(), VecCreateMPI(), VecCreateGhostWithArray(), VecCUDAPlaceArray(), VecPlaceArray(), VecCUDAAllocateCheckHost() @*/ PetscErrorCode VecCreateMPICUDAWithArrays(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar cpuarray[],const PetscScalar gpuarray[],Vec *vv) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCreateMPICUDAWithArray(comm,bs,n,N,gpuarray,vv);CHKERRQ(ierr); if (cpuarray && gpuarray) { Vec_MPI *s = (Vec_MPI*)((*vv)->data); s->array = (PetscScalar*)cpuarray; (*vv)->offloadmask = PETSC_OFFLOAD_BOTH; } else if (cpuarray) { Vec_MPI *s = (Vec_MPI*)((*vv)->data); s->array = (PetscScalar*)cpuarray; (*vv)->offloadmask = PETSC_OFFLOAD_CPU; } else if (gpuarray) { (*vv)->offloadmask = PETSC_OFFLOAD_GPU; } else { (*vv)->offloadmask = PETSC_OFFLOAD_UNALLOCATED; } PetscFunctionReturn(0); } PetscErrorCode VecMax_MPICUDA(Vec xin,PetscInt *idx,PetscReal *z) { PetscErrorCode ierr; PetscReal work; PetscFunctionBegin; ierr = VecMax_SeqCUDA(xin,idx,&work);CHKERRQ(ierr); if (!idx) { ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); } else { PetscReal work2[2],z2[2]; PetscInt rstart; rstart = xin->map->rstart; work2[0] = work; work2[1] = *idx + rstart; ierr = MPIU_Allreduce(work2,z2,2,MPIU_REAL,MPIU_MAXINDEX_OP,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); *z = z2[0]; *idx = (PetscInt)z2[1]; } PetscFunctionReturn(0); } PetscErrorCode VecMin_MPICUDA(Vec xin,PetscInt *idx,PetscReal *z) { PetscErrorCode ierr; PetscReal work; PetscFunctionBegin; ierr = VecMin_SeqCUDA(xin,idx,&work);CHKERRQ(ierr); if (!idx) { ierr = MPIU_Allreduce(&work,z,1,MPIU_REAL,MPIU_MIN,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); } else { PetscReal work2[2],z2[2]; PetscInt rstart; ierr = VecGetOwnershipRange(xin,&rstart,NULL);CHKERRQ(ierr); work2[0] = work; work2[1] = *idx + rstart; ierr = MPIU_Allreduce(work2,z2,2,MPIU_REAL,MPIU_MININDEX_OP,PetscObjectComm((PetscObject)xin));CHKERRMPI(ierr); *z = z2[0]; *idx = (PetscInt)z2[1]; } PetscFunctionReturn(0); } PetscErrorCode VecBindToCPU_MPICUDA(Vec V,PetscBool pin) { PetscErrorCode ierr; PetscFunctionBegin; V->boundtocpu = pin; if (pin) { ierr = VecCUDACopyFromGPU(V);CHKERRQ(ierr); V->offloadmask = PETSC_OFFLOAD_CPU; /* since the CPU code will likely change values in the vector */ V->ops->dotnorm2 = NULL; V->ops->waxpy = VecWAXPY_Seq; V->ops->dot = VecDot_MPI; V->ops->mdot = VecMDot_MPI; V->ops->tdot = VecTDot_MPI; V->ops->norm = VecNorm_MPI; V->ops->scale = VecScale_Seq; V->ops->copy = VecCopy_Seq; V->ops->set = VecSet_Seq; V->ops->swap = VecSwap_Seq; V->ops->axpy = VecAXPY_Seq; V->ops->axpby = VecAXPBY_Seq; V->ops->maxpy = VecMAXPY_Seq; V->ops->aypx = VecAYPX_Seq; V->ops->axpbypcz = VecAXPBYPCZ_Seq; V->ops->pointwisemult = VecPointwiseMult_Seq; V->ops->setrandom = VecSetRandom_Seq; V->ops->placearray = VecPlaceArray_Seq; V->ops->replacearray = VecReplaceArray_SeqCUDA; V->ops->resetarray = VecResetArray_Seq; V->ops->dot_local = VecDot_Seq; V->ops->tdot_local = VecTDot_Seq; V->ops->norm_local = VecNorm_Seq; V->ops->mdot_local = VecMDot_Seq; V->ops->pointwisedivide = VecPointwiseDivide_Seq; V->ops->getlocalvector = NULL; V->ops->restorelocalvector = NULL; V->ops->getlocalvectorread = NULL; V->ops->restorelocalvectorread = NULL; V->ops->getarraywrite = NULL; V->ops->max = VecMax_MPI; V->ops->min = VecMin_MPI; /* default random number generator */ ierr = PetscFree(V->defaultrandtype);CHKERRQ(ierr); ierr = PetscStrallocpy(PETSCRANDER48,&V->defaultrandtype);CHKERRQ(ierr); } else { V->ops->dotnorm2 = VecDotNorm2_MPICUDA; V->ops->waxpy = VecWAXPY_SeqCUDA; V->ops->duplicate = VecDuplicate_MPICUDA; V->ops->dot = VecDot_MPICUDA; V->ops->mdot = VecMDot_MPICUDA; V->ops->tdot = VecTDot_MPICUDA; V->ops->norm = VecNorm_MPICUDA; V->ops->scale = VecScale_SeqCUDA; V->ops->copy = VecCopy_SeqCUDA; V->ops->set = VecSet_SeqCUDA; V->ops->swap = VecSwap_SeqCUDA; V->ops->axpy = VecAXPY_SeqCUDA; V->ops->axpby = VecAXPBY_SeqCUDA; V->ops->maxpy = VecMAXPY_SeqCUDA; V->ops->aypx = VecAYPX_SeqCUDA; V->ops->axpbypcz = VecAXPBYPCZ_SeqCUDA; V->ops->pointwisemult = VecPointwiseMult_SeqCUDA; V->ops->setrandom = VecSetRandom_SeqCUDA; V->ops->placearray = VecPlaceArray_SeqCUDA; V->ops->replacearray = VecReplaceArray_SeqCUDA; V->ops->resetarray = VecResetArray_SeqCUDA; V->ops->dot_local = VecDot_SeqCUDA; V->ops->tdot_local = VecTDot_SeqCUDA; V->ops->norm_local = VecNorm_SeqCUDA; V->ops->mdot_local = VecMDot_SeqCUDA; V->ops->destroy = VecDestroy_MPICUDA; V->ops->pointwisedivide = VecPointwiseDivide_SeqCUDA; V->ops->getlocalvector = VecGetLocalVector_SeqCUDA; V->ops->restorelocalvector = VecRestoreLocalVector_SeqCUDA; V->ops->getlocalvectorread = VecGetLocalVector_SeqCUDA; V->ops->restorelocalvectorread = VecRestoreLocalVector_SeqCUDA; V->ops->getarraywrite = VecGetArrayWrite_SeqCUDA; V->ops->getarray = VecGetArray_SeqCUDA; V->ops->restorearray = VecRestoreArray_SeqCUDA; V->ops->getarrayandmemtype = VecGetArrayAndMemType_SeqCUDA; V->ops->restorearrayandmemtype = VecRestoreArrayAndMemType_SeqCUDA; V->ops->max = VecMax_MPICUDA; V->ops->min = VecMin_MPICUDA; /* default random number generator */ ierr = PetscFree(V->defaultrandtype);CHKERRQ(ierr); ierr = PetscStrallocpy(PETSCCURAND,&V->defaultrandtype);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode VecCreate_MPICUDA_Private(Vec vv,PetscBool alloc,PetscInt nghost,const PetscScalar array[]) { PetscErrorCode ierr; Vec_CUDA *veccuda; PetscFunctionBegin; ierr = VecCreate_MPI_Private(vv,PETSC_FALSE,0,0);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)vv,VECMPICUDA);CHKERRQ(ierr); ierr = VecBindToCPU_MPICUDA(vv,PETSC_FALSE);CHKERRQ(ierr); vv->ops->bindtocpu = VecBindToCPU_MPICUDA; /* Later, functions check for the Vec_CUDA structure existence, so do not create it without array */ if (alloc && !array) { ierr = VecCUDAAllocateCheck(vv);CHKERRQ(ierr); ierr = VecCUDAAllocateCheckHost(vv);CHKERRQ(ierr); ierr = VecSet(vv,0.0);CHKERRQ(ierr); ierr = VecSet_Seq(vv,0.0);CHKERRQ(ierr); vv->offloadmask = PETSC_OFFLOAD_BOTH; } if (array) { if (!vv->spptr) { PetscReal pinned_memory_min; PetscBool flag; /* Cannot use PetscNew() here because spptr is void* */ ierr = PetscMalloc(sizeof(Vec_CUDA),&vv->spptr);CHKERRQ(ierr); veccuda = (Vec_CUDA*)vv->spptr; veccuda->stream = 0; /* using default stream */ veccuda->GPUarray_allocated = 0; vv->offloadmask = PETSC_OFFLOAD_UNALLOCATED; vv->minimum_bytes_pinned_memory = 0; /* Need to parse command line for minimum size to use for pinned memory allocations on host here. Note: This same code duplicated in VecCreate_SeqCUDA_Private() and VecCUDAAllocateCheck(). Is there a good way to avoid this? */ ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)vv),((PetscObject)vv)->prefix,"VECCUDA Options","Vec");CHKERRQ(ierr); pinned_memory_min = vv->minimum_bytes_pinned_memory; ierr = PetscOptionsReal("-vec_pinned_memory_min","Minimum size (in bytes) for an allocation to use pinned memory on host","VecSetPinnedMemoryMin",pinned_memory_min,&pinned_memory_min,&flag);CHKERRQ(ierr); if (flag) vv->minimum_bytes_pinned_memory = pinned_memory_min; ierr = PetscOptionsEnd();CHKERRQ(ierr); } veccuda = (Vec_CUDA*)vv->spptr; veccuda->GPUarray = (PetscScalar*)array; } PetscFunctionReturn(0); }
682a9fba4e0c7da604636e72959720aeeb30b6ff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from sparse/blas/zmgecsrmv.cu, normal z -> d, Sun Nov 20 20:20:40 2016 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 __global__ void dmgecsrmv_kernel( int num_rows, int num_cols, int num_vecs, double alpha, double * dval, magma_index_t * drowptr, magma_index_t * dcolind, double * dx, double beta, double * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; extern __shared__ double dot[]; if( row<num_rows ){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_D_MAKE(0.0, 0.0); int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++ ){ int col = dcolind [ j ]; double val = dval[ j ]; for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[ col + i*num_cols ]; } for( int i=0; i<num_vecs; i++ ) dy[ row +i*num_cols ] = alpha * dot[ threadIdx.x + i*blockDim.x ] + beta * dy[ row + i*num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is CSR. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] alpha double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmgecsrmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, double alpha, magmaDouble_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ), 1, 1); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( double ); // num_vecs vectors hipLaunchKernelGGL(( dmgecsrmv_kernel), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream(), m, n, num_vecs, alpha, dval, drowptr, dcolind, dx, beta, dy); return MAGMA_SUCCESS; }
682a9fba4e0c7da604636e72959720aeeb30b6ff.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from sparse/blas/zmgecsrmv.cu, normal z -> d, Sun Nov 20 20:20:40 2016 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 __global__ void dmgecsrmv_kernel( int num_rows, int num_cols, int num_vecs, double alpha, double * dval, magma_index_t * drowptr, magma_index_t * dcolind, double * dx, double beta, double * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; extern __shared__ double dot[]; if( row<num_rows ){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_D_MAKE(0.0, 0.0); int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++ ){ int col = dcolind [ j ]; double val = dval[ j ]; for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[ col + i*num_cols ]; } for( int i=0; i<num_vecs; i++ ) dy[ row +i*num_cols ] = alpha * dot[ threadIdx.x + i*blockDim.x ] + beta * dy[ row + i*num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is CSR. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] alpha double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmgecsrmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, double alpha, magmaDouble_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ), 1, 1); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( double ); // num_vecs vectors dmgecsrmv_kernel<<< grid, threads, MEM_SIZE, queue->cuda_stream()>>> (m, n, num_vecs, alpha, dval, drowptr, dcolind, dx, beta, dy); return MAGMA_SUCCESS; }
f7ce05d13b9f2b6ffea835fe6930861d871d8e4a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "complex_mul.h" #include "complex_mul_impl.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/math/binary_elementwise_ops.h" namespace onnxruntime { namespace contrib { namespace cuda { template <typename T> __device__ __inline__ void _ComplexMul(T a0, T a1, T b0, T b1, T* output_data, bool is_conj) { if (is_conj) { T out_real = a0 * b0 + a1 * b1; T out_imag = a1 * b0 - a0 * b1; output_data[0] = out_real; output_data[1] = out_imag; } else { T out_real = a0 * b0 - a1 * b1; T out_imag = a0 * b1 + a1 * b0; output_data[0] = out_real; output_data[1] = out_imag; } }; // broadcast by computing output coordinate from offset, using fast_divmod template <typename T, bool lhs_need_compute, bool rhs_need_compute, int NumThreadsPerBlock, int NumElementsPerThread> __global__ void _ElementWiseWithStrideTwo( int32_t output_rank, const TArray<int64_t> lhs_padded_strides, const T* lhs_data, const TArray<int64_t> rhs_padded_strides, const T* rhs_data, const TArray<fast_divmod> fdm_output_strides, T* output_data, CUDA_LONG N, int64_t lhs_size, int64_t rhs_size, bool is_conj) { CUDA_LONG start = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x; T a[NumElementsPerThread]; T b[NumElementsPerThread]; T c[NumElementsPerThread]; T d[NumElementsPerThread]; CUDA_LONG id = start; #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N / 2) { CUDA_LONG lhs_index = (lhs_need_compute ? 0 : id); CUDA_LONG rhs_index = (rhs_need_compute ? 0 : id); // compute indexes with broadcasting rules: https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md CUDA_LONG offset = id; #pragma unroll for (auto dim = 0; dim < fdm_output_strides.GetCapacity(); dim++) { if (dim >= output_rank) { break; } int q, r; fdm_output_strides.data_[dim].divmod(offset, q, r); if (lhs_need_compute) { lhs_index += static_cast<int>(lhs_padded_strides.data_[dim]) * q; } if (rhs_need_compute) { rhs_index += static_cast<int>(rhs_padded_strides.data_[dim]) * q; } offset = r; } a[i] = lhs_data[(2 * lhs_index) % lhs_size]; b[i] = lhs_data[(2 * lhs_index + 1) % lhs_size]; c[i] = rhs_data[(2 * rhs_index) % rhs_size]; d[i] = rhs_data[(2 * rhs_index + 1) % rhs_size]; id += NumThreadsPerBlock; } } id = start; #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N / 2) { _ComplexMul(a[i], b[i], c[i], d[i], &output_data[2 * id], is_conj); id += NumThreadsPerBlock; } } }; template <typename T> void ComplexMul_Impl( int32_t output_rank_or_simple_broadcast, const TArray<int64_t>* lhs_padded_strides, const T* lhs_data, const TArray<int64_t>* rhs_padded_strides, const T* rhs_data, const TArray<onnxruntime::cuda::fast_divmod>* fdm_output_strides, const onnxruntime::cuda::fast_divmod& fdm_H, const onnxruntime::cuda::fast_divmod& fdm_C, T* output_data, int64_t count, int64_t lhs_size, int64_t rhs_size, bool is_conj) { if (count == 0) // special case where there's a dim value of 0 in the output shape return; int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); CUDA_LONG N = static_cast<CUDA_LONG>(count); if (lhs_padded_strides && rhs_padded_strides && lhs_padded_strides->size_ && rhs_padded_strides->size_) hipLaunchKernelGGL(( _ElementWiseWithStrideTwo<T, true, true, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, output_rank_or_simple_broadcast, *lhs_padded_strides, lhs_data, *rhs_padded_strides, rhs_data, *fdm_output_strides, output_data, N, lhs_size, rhs_size, is_conj); else if (lhs_padded_strides && lhs_padded_strides->size_) hipLaunchKernelGGL(( _ElementWiseWithStrideTwo<T, true, false, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, output_rank_or_simple_broadcast, *lhs_padded_strides, lhs_data, *rhs_padded_strides, rhs_data, *fdm_output_strides, output_data, N, lhs_size, rhs_size, is_conj); else hipLaunchKernelGGL(( _ElementWiseWithStrideTwo<T, false, true, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0, output_rank_or_simple_broadcast, *lhs_padded_strides, lhs_data, *rhs_padded_strides, rhs_data, *fdm_output_strides, output_data, N, lhs_size, rhs_size, is_conj); }; #define SPECIALIZE_STACKEDCOMPLEXMUL_IMPL(T) \ template void ComplexMul_Impl<T>( \ int32_t output_rank_or_simple_broadcast, \ const TArray<int64_t>* lhs_padded_strides, \ const T* lhs_data, \ const TArray<int64_t>* rhs_padded_strides, \ const T* rhs_data, \ const TArray<onnxruntime::cuda::fast_divmod>* fdm_output_strides, \ const onnxruntime::cuda::fast_divmod& fdm_H, \ const onnxruntime::cuda::fast_divmod& fdm_C, \ T* output_data, \ int64_t count, \ int64_t lhs_size, \ int64_t rhs_size, \ bool is_conj); SPECIALIZE_STACKEDCOMPLEXMUL_IMPL(float) SPECIALIZE_STACKEDCOMPLEXMUL_IMPL(half) } // namespace cuda } // namespace contrib } // namespace onnxruntime
f7ce05d13b9f2b6ffea835fe6930861d871d8e4a.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "complex_mul.h" #include "complex_mul_impl.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/math/binary_elementwise_ops.h" namespace onnxruntime { namespace contrib { namespace cuda { template <typename T> __device__ __inline__ void _ComplexMul(T a0, T a1, T b0, T b1, T* output_data, bool is_conj) { if (is_conj) { T out_real = a0 * b0 + a1 * b1; T out_imag = a1 * b0 - a0 * b1; output_data[0] = out_real; output_data[1] = out_imag; } else { T out_real = a0 * b0 - a1 * b1; T out_imag = a0 * b1 + a1 * b0; output_data[0] = out_real; output_data[1] = out_imag; } }; // broadcast by computing output coordinate from offset, using fast_divmod template <typename T, bool lhs_need_compute, bool rhs_need_compute, int NumThreadsPerBlock, int NumElementsPerThread> __global__ void _ElementWiseWithStrideTwo( int32_t output_rank, const TArray<int64_t> lhs_padded_strides, const T* lhs_data, const TArray<int64_t> rhs_padded_strides, const T* rhs_data, const TArray<fast_divmod> fdm_output_strides, T* output_data, CUDA_LONG N, int64_t lhs_size, int64_t rhs_size, bool is_conj) { CUDA_LONG start = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x; T a[NumElementsPerThread]; T b[NumElementsPerThread]; T c[NumElementsPerThread]; T d[NumElementsPerThread]; CUDA_LONG id = start; #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N / 2) { CUDA_LONG lhs_index = (lhs_need_compute ? 0 : id); CUDA_LONG rhs_index = (rhs_need_compute ? 0 : id); // compute indexes with broadcasting rules: https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md CUDA_LONG offset = id; #pragma unroll for (auto dim = 0; dim < fdm_output_strides.GetCapacity(); dim++) { if (dim >= output_rank) { break; } int q, r; fdm_output_strides.data_[dim].divmod(offset, q, r); if (lhs_need_compute) { lhs_index += static_cast<int>(lhs_padded_strides.data_[dim]) * q; } if (rhs_need_compute) { rhs_index += static_cast<int>(rhs_padded_strides.data_[dim]) * q; } offset = r; } a[i] = lhs_data[(2 * lhs_index) % lhs_size]; b[i] = lhs_data[(2 * lhs_index + 1) % lhs_size]; c[i] = rhs_data[(2 * rhs_index) % rhs_size]; d[i] = rhs_data[(2 * rhs_index + 1) % rhs_size]; id += NumThreadsPerBlock; } } id = start; #pragma unroll for (int i = 0; i < NumElementsPerThread; i++) { if (id < N / 2) { _ComplexMul(a[i], b[i], c[i], d[i], &output_data[2 * id], is_conj); id += NumThreadsPerBlock; } } }; template <typename T> void ComplexMul_Impl( int32_t output_rank_or_simple_broadcast, const TArray<int64_t>* lhs_padded_strides, const T* lhs_data, const TArray<int64_t>* rhs_padded_strides, const T* rhs_data, const TArray<onnxruntime::cuda::fast_divmod>* fdm_output_strides, const onnxruntime::cuda::fast_divmod& fdm_H, const onnxruntime::cuda::fast_divmod& fdm_C, T* output_data, int64_t count, int64_t lhs_size, int64_t rhs_size, bool is_conj) { if (count == 0) // special case where there's a dim value of 0 in the output shape return; int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread)); CUDA_LONG N = static_cast<CUDA_LONG>(count); if (lhs_padded_strides && rhs_padded_strides && lhs_padded_strides->size_ && rhs_padded_strides->size_) _ElementWiseWithStrideTwo<T, true, true, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( output_rank_or_simple_broadcast, *lhs_padded_strides, lhs_data, *rhs_padded_strides, rhs_data, *fdm_output_strides, output_data, N, lhs_size, rhs_size, is_conj); else if (lhs_padded_strides && lhs_padded_strides->size_) _ElementWiseWithStrideTwo<T, true, false, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( output_rank_or_simple_broadcast, *lhs_padded_strides, lhs_data, *rhs_padded_strides, rhs_data, *fdm_output_strides, output_data, N, lhs_size, rhs_size, is_conj); else _ElementWiseWithStrideTwo<T, false, true, GridDim::maxThreadsPerBlock, GridDim::maxElementsPerThread><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>( output_rank_or_simple_broadcast, *lhs_padded_strides, lhs_data, *rhs_padded_strides, rhs_data, *fdm_output_strides, output_data, N, lhs_size, rhs_size, is_conj); }; #define SPECIALIZE_STACKEDCOMPLEXMUL_IMPL(T) \ template void ComplexMul_Impl<T>( \ int32_t output_rank_or_simple_broadcast, \ const TArray<int64_t>* lhs_padded_strides, \ const T* lhs_data, \ const TArray<int64_t>* rhs_padded_strides, \ const T* rhs_data, \ const TArray<onnxruntime::cuda::fast_divmod>* fdm_output_strides, \ const onnxruntime::cuda::fast_divmod& fdm_H, \ const onnxruntime::cuda::fast_divmod& fdm_C, \ T* output_data, \ int64_t count, \ int64_t lhs_size, \ int64_t rhs_size, \ bool is_conj); SPECIALIZE_STACKEDCOMPLEXMUL_IMPL(float) SPECIALIZE_STACKEDCOMPLEXMUL_IMPL(half) } // namespace cuda } // namespace contrib } // namespace onnxruntime
7fe96c269a909229bd1aacb0a9266d9e74db5492.hip
// !!! This is a file automatically generated by hipify!!! #include "ed25519.h" #include "gpu_ctx.h" #include <pthread.h> #include "gpu_common.h" static pthread_mutex_t g_ctx_mutex = PTHREAD_MUTEX_INITIALIZER; #define MAX_NUM_GPUS 8 #define MAX_QUEUE_SIZE 8 static gpu_ctx_t g_gpu_ctx[MAX_NUM_GPUS][MAX_QUEUE_SIZE] = {0}; static uint32_t g_cur_gpu = 0; static uint32_t g_cur_queue[MAX_NUM_GPUS] = {0}; static int32_t g_total_gpus = -1; static bool cuda_crypt_init_locked() { if (g_total_gpus == -1) { hipGetDeviceCount(&g_total_gpus); g_total_gpus = min(MAX_NUM_GPUS, g_total_gpus); LOG("total_gpus: %d\n", g_total_gpus); for (int gpu = 0; gpu < g_total_gpus; gpu++) { CUDA_CHK(hipSetDevice(gpu)); for (int queue = 0; queue < MAX_QUEUE_SIZE; queue++) { int err = pthread_mutex_init(&g_gpu_ctx[gpu][queue].mutex, NULL); if (err != 0) { fprintf(stderr, "pthread_mutex_init error %d gpu: %d queue: %d\n", err, gpu, queue); g_total_gpus = 0; return false; } CUDA_CHK(hipStreamCreate(&g_gpu_ctx[gpu][queue].stream)); } } } return g_total_gpus > 0; } bool ed25519_init() { hipFree(0); pthread_mutex_lock(&g_ctx_mutex); bool success = cuda_crypt_init_locked(); pthread_mutex_unlock(&g_ctx_mutex); return success; } gpu_ctx_t* get_gpu_ctx() { int32_t cur_gpu, cur_queue; LOG("locking global mutex"); pthread_mutex_lock(&g_ctx_mutex); if (!cuda_crypt_init_locked()) { pthread_mutex_unlock(&g_ctx_mutex); LOG("No GPUs, exiting...\n"); return NULL; } cur_gpu = g_cur_gpu; g_cur_gpu++; g_cur_gpu %= g_total_gpus; cur_queue = g_cur_queue[cur_gpu]; g_cur_queue[cur_gpu]++; g_cur_queue[cur_gpu] %= MAX_QUEUE_SIZE; pthread_mutex_unlock(&g_ctx_mutex); gpu_ctx_t* cur_ctx = &g_gpu_ctx[cur_gpu][cur_queue]; LOG("locking contex mutex queue: %d gpu: %d", cur_queue, cur_gpu); pthread_mutex_lock(&cur_ctx->mutex); CUDA_CHK(hipSetDevice(cur_gpu)); LOG("selecting gpu: %d queue: %d\n", cur_gpu, cur_queue); return cur_ctx; } void setup_gpu_ctx(verify_ctx_t* cur_ctx, const gpu_Elems* elems, uint32_t num_elems, uint32_t message_size, uint32_t total_packets, uint32_t total_packets_size, uint32_t total_signatures, const uint32_t* message_lens, const uint32_t* public_key_offsets, const uint32_t* signature_offsets, const uint32_t* message_start_offsets, size_t out_size, hipStream_t stream ) { size_t offsets_size = total_signatures * sizeof(uint32_t); LOG("device allocate. packets: %d out: %d offsets_size: %zu\n", total_packets_size, (int)out_size, offsets_size); if (cur_ctx->packets == NULL || total_packets_size > cur_ctx->packets_size_bytes) { CUDA_CHK(hipFree(cur_ctx->packets)); CUDA_CHK(hipMalloc(&cur_ctx->packets, total_packets_size)); cur_ctx->packets_size_bytes = total_packets_size; } if (cur_ctx->out == NULL || cur_ctx->out_size_bytes < out_size) { CUDA_CHK(hipFree(cur_ctx->out)); CUDA_CHK(hipMalloc(&cur_ctx->out, out_size)); cur_ctx->out_size_bytes = total_signatures; } if (cur_ctx->public_key_offsets == NULL || cur_ctx->offsets_len < total_signatures) { CUDA_CHK(hipFree(cur_ctx->public_key_offsets)); CUDA_CHK(hipMalloc(&cur_ctx->public_key_offsets, offsets_size)); CUDA_CHK(hipFree(cur_ctx->signature_offsets)); CUDA_CHK(hipMalloc(&cur_ctx->signature_offsets, offsets_size)); CUDA_CHK(hipFree(cur_ctx->message_start_offsets)); CUDA_CHK(hipMalloc(&cur_ctx->message_start_offsets, offsets_size)); CUDA_CHK(hipFree(cur_ctx->message_lens)); CUDA_CHK(hipMalloc(&cur_ctx->message_lens, offsets_size)); cur_ctx->offsets_len = total_signatures; } LOG("Done alloc"); CUDA_CHK(hipMemcpyAsync(cur_ctx->public_key_offsets, public_key_offsets, offsets_size, hipMemcpyHostToDevice, stream)); CUDA_CHK(hipMemcpyAsync(cur_ctx->signature_offsets, signature_offsets, offsets_size, hipMemcpyHostToDevice, stream)); CUDA_CHK(hipMemcpyAsync(cur_ctx->message_start_offsets, message_start_offsets, offsets_size, hipMemcpyHostToDevice, stream)); CUDA_CHK(hipMemcpyAsync(cur_ctx->message_lens, message_lens, offsets_size, hipMemcpyHostToDevice, stream)); size_t cur = 0; for (size_t i = 0; i < num_elems; i++) { LOG("i: %zu size: %d\n", i, elems[i].num * message_size); CUDA_CHK(hipMemcpyAsync(&cur_ctx->packets[cur * message_size], elems[i].elems, elems[i].num * message_size, hipMemcpyHostToDevice, stream)); cur += elems[i].num; } } void release_gpu_ctx(gpu_ctx_t* cur_ctx) { pthread_mutex_unlock(&cur_ctx->mutex); } void ed25519_free_gpu_mem() { for (size_t gpu = 0; gpu < MAX_NUM_GPUS; gpu++) { for (size_t queue = 0; queue < MAX_QUEUE_SIZE; queue++) { gpu_ctx_t* cur_ctx = &g_gpu_ctx[gpu][queue]; CUDA_CHK(hipFree(cur_ctx->verify_ctx.packets)); CUDA_CHK(hipFree(cur_ctx->verify_ctx.out)); CUDA_CHK(hipFree(cur_ctx->verify_ctx.message_lens)); CUDA_CHK(hipFree(cur_ctx->verify_ctx.public_key_offsets)); CUDA_CHK(hipFree(cur_ctx->verify_ctx.private_key_offsets)); CUDA_CHK(hipFree(cur_ctx->verify_ctx.signature_offsets)); CUDA_CHK(hipFree(cur_ctx->verify_ctx.message_start_offsets)); if (cur_ctx->stream != 0) { CUDA_CHK(hipStreamDestroy(cur_ctx->stream)); } } } }
7fe96c269a909229bd1aacb0a9266d9e74db5492.cu
#include "ed25519.h" #include "gpu_ctx.h" #include <pthread.h> #include "gpu_common.h" static pthread_mutex_t g_ctx_mutex = PTHREAD_MUTEX_INITIALIZER; #define MAX_NUM_GPUS 8 #define MAX_QUEUE_SIZE 8 static gpu_ctx_t g_gpu_ctx[MAX_NUM_GPUS][MAX_QUEUE_SIZE] = {0}; static uint32_t g_cur_gpu = 0; static uint32_t g_cur_queue[MAX_NUM_GPUS] = {0}; static int32_t g_total_gpus = -1; static bool cuda_crypt_init_locked() { if (g_total_gpus == -1) { cudaGetDeviceCount(&g_total_gpus); g_total_gpus = min(MAX_NUM_GPUS, g_total_gpus); LOG("total_gpus: %d\n", g_total_gpus); for (int gpu = 0; gpu < g_total_gpus; gpu++) { CUDA_CHK(cudaSetDevice(gpu)); for (int queue = 0; queue < MAX_QUEUE_SIZE; queue++) { int err = pthread_mutex_init(&g_gpu_ctx[gpu][queue].mutex, NULL); if (err != 0) { fprintf(stderr, "pthread_mutex_init error %d gpu: %d queue: %d\n", err, gpu, queue); g_total_gpus = 0; return false; } CUDA_CHK(cudaStreamCreate(&g_gpu_ctx[gpu][queue].stream)); } } } return g_total_gpus > 0; } bool ed25519_init() { cudaFree(0); pthread_mutex_lock(&g_ctx_mutex); bool success = cuda_crypt_init_locked(); pthread_mutex_unlock(&g_ctx_mutex); return success; } gpu_ctx_t* get_gpu_ctx() { int32_t cur_gpu, cur_queue; LOG("locking global mutex"); pthread_mutex_lock(&g_ctx_mutex); if (!cuda_crypt_init_locked()) { pthread_mutex_unlock(&g_ctx_mutex); LOG("No GPUs, exiting...\n"); return NULL; } cur_gpu = g_cur_gpu; g_cur_gpu++; g_cur_gpu %= g_total_gpus; cur_queue = g_cur_queue[cur_gpu]; g_cur_queue[cur_gpu]++; g_cur_queue[cur_gpu] %= MAX_QUEUE_SIZE; pthread_mutex_unlock(&g_ctx_mutex); gpu_ctx_t* cur_ctx = &g_gpu_ctx[cur_gpu][cur_queue]; LOG("locking contex mutex queue: %d gpu: %d", cur_queue, cur_gpu); pthread_mutex_lock(&cur_ctx->mutex); CUDA_CHK(cudaSetDevice(cur_gpu)); LOG("selecting gpu: %d queue: %d\n", cur_gpu, cur_queue); return cur_ctx; } void setup_gpu_ctx(verify_ctx_t* cur_ctx, const gpu_Elems* elems, uint32_t num_elems, uint32_t message_size, uint32_t total_packets, uint32_t total_packets_size, uint32_t total_signatures, const uint32_t* message_lens, const uint32_t* public_key_offsets, const uint32_t* signature_offsets, const uint32_t* message_start_offsets, size_t out_size, cudaStream_t stream ) { size_t offsets_size = total_signatures * sizeof(uint32_t); LOG("device allocate. packets: %d out: %d offsets_size: %zu\n", total_packets_size, (int)out_size, offsets_size); if (cur_ctx->packets == NULL || total_packets_size > cur_ctx->packets_size_bytes) { CUDA_CHK(cudaFree(cur_ctx->packets)); CUDA_CHK(cudaMalloc(&cur_ctx->packets, total_packets_size)); cur_ctx->packets_size_bytes = total_packets_size; } if (cur_ctx->out == NULL || cur_ctx->out_size_bytes < out_size) { CUDA_CHK(cudaFree(cur_ctx->out)); CUDA_CHK(cudaMalloc(&cur_ctx->out, out_size)); cur_ctx->out_size_bytes = total_signatures; } if (cur_ctx->public_key_offsets == NULL || cur_ctx->offsets_len < total_signatures) { CUDA_CHK(cudaFree(cur_ctx->public_key_offsets)); CUDA_CHK(cudaMalloc(&cur_ctx->public_key_offsets, offsets_size)); CUDA_CHK(cudaFree(cur_ctx->signature_offsets)); CUDA_CHK(cudaMalloc(&cur_ctx->signature_offsets, offsets_size)); CUDA_CHK(cudaFree(cur_ctx->message_start_offsets)); CUDA_CHK(cudaMalloc(&cur_ctx->message_start_offsets, offsets_size)); CUDA_CHK(cudaFree(cur_ctx->message_lens)); CUDA_CHK(cudaMalloc(&cur_ctx->message_lens, offsets_size)); cur_ctx->offsets_len = total_signatures; } LOG("Done alloc"); CUDA_CHK(cudaMemcpyAsync(cur_ctx->public_key_offsets, public_key_offsets, offsets_size, cudaMemcpyHostToDevice, stream)); CUDA_CHK(cudaMemcpyAsync(cur_ctx->signature_offsets, signature_offsets, offsets_size, cudaMemcpyHostToDevice, stream)); CUDA_CHK(cudaMemcpyAsync(cur_ctx->message_start_offsets, message_start_offsets, offsets_size, cudaMemcpyHostToDevice, stream)); CUDA_CHK(cudaMemcpyAsync(cur_ctx->message_lens, message_lens, offsets_size, cudaMemcpyHostToDevice, stream)); size_t cur = 0; for (size_t i = 0; i < num_elems; i++) { LOG("i: %zu size: %d\n", i, elems[i].num * message_size); CUDA_CHK(cudaMemcpyAsync(&cur_ctx->packets[cur * message_size], elems[i].elems, elems[i].num * message_size, cudaMemcpyHostToDevice, stream)); cur += elems[i].num; } } void release_gpu_ctx(gpu_ctx_t* cur_ctx) { pthread_mutex_unlock(&cur_ctx->mutex); } void ed25519_free_gpu_mem() { for (size_t gpu = 0; gpu < MAX_NUM_GPUS; gpu++) { for (size_t queue = 0; queue < MAX_QUEUE_SIZE; queue++) { gpu_ctx_t* cur_ctx = &g_gpu_ctx[gpu][queue]; CUDA_CHK(cudaFree(cur_ctx->verify_ctx.packets)); CUDA_CHK(cudaFree(cur_ctx->verify_ctx.out)); CUDA_CHK(cudaFree(cur_ctx->verify_ctx.message_lens)); CUDA_CHK(cudaFree(cur_ctx->verify_ctx.public_key_offsets)); CUDA_CHK(cudaFree(cur_ctx->verify_ctx.private_key_offsets)); CUDA_CHK(cudaFree(cur_ctx->verify_ctx.signature_offsets)); CUDA_CHK(cudaFree(cur_ctx->verify_ctx.message_start_offsets)); if (cur_ctx->stream != 0) { CUDA_CHK(cudaStreamDestroy(cur_ctx->stream)); } } } }
cc3b1216d741d1a5143ed8f61422345065641b8c.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <math.h> #define blockSize 512 #define real float __global__ void redukcja (int N, real* v, real* out) { size_t s = threadIdx.x + blockIdx.x * blockDim.x; int sID = threadIdx.x; size_t i; __shared__ real pom[blockSize]; pom[sID] = 0; if (s<N) pom[sID] = v[s]; __syncthreads(); for (i=1; i<blockSize; i*=2){ if (sID%(2*i)==0){ pom[sID] += pom[sID + i]; } __syncthreads(); } if (sID==0) out[blockIdx.x] = pom[0]; } __global__ void wypelnij (int N, real* v) { size_t s = threadIdx.x + blockIdx.x * blockDim.x; if (s<N) { v[s] = sin(s * 2. * M_PI / 10.); } } int main () { size_t N = blockSize * blockSize * blockSize; int blocks = (N + blockSize-1) / blockSize; float dt_ms; hipEvent_t event1, event2; hipEventCreate(&event1); hipEventCreate(&event2); real* v; hipMalloc( (void**) &v, N * sizeof(real) ); real* outV; hipMalloc( (void**) &outV, blockSize * blockSize * sizeof(real) ); real* outVV; hipMalloc( (void**) &outVV, blockSize * sizeof(real) ); real out; int i; int M = 10; hipLaunchKernelGGL(( wypelnij) , dim3(blocks), dim3(blockSize), 0, 0, N, v); hipEventRecord(event1, 0); for (i=0; i<M; i++){ hipLaunchKernelGGL(( redukcja), dim3(blocks), dim3(blockSize), 0, 0, N, v, outV); hipLaunchKernelGGL(( redukcja), dim3(blockSize), dim3(blockSize), 0, 0, blockSize*blockSize, outV, outVV); hipLaunchKernelGGL(( redukcja), dim3(1), dim3(blockSize), 0, 0, blockSize, outVV, v); } hipEventRecord(event2, 0); hipEventSynchronize(event1); hipEventSynchronize(event2); hipEventElapsedTime(&dt_ms, event1, event2); hipMemcpy (&out, v, 1 * sizeof(real), hipMemcpyDeviceToHost); printf ("Czas redukcji: %f ms wynik; %f\n", dt_ms * 1./M, out); return 0; }
cc3b1216d741d1a5143ed8f61422345065641b8c.cu
#include <cuda.h> #include <stdio.h> #include <math.h> #define blockSize 512 #define real float __global__ void redukcja (int N, real* v, real* out) { size_t s = threadIdx.x + blockIdx.x * blockDim.x; int sID = threadIdx.x; size_t i; __shared__ real pom[blockSize]; pom[sID] = 0; if (s<N) pom[sID] = v[s]; __syncthreads(); for (i=1; i<blockSize; i*=2){ if (sID%(2*i)==0){ pom[sID] += pom[sID + i]; } __syncthreads(); } if (sID==0) out[blockIdx.x] = pom[0]; } __global__ void wypelnij (int N, real* v) { size_t s = threadIdx.x + blockIdx.x * blockDim.x; if (s<N) { v[s] = sin(s * 2. * M_PI / 10.); } } int main () { size_t N = blockSize * blockSize * blockSize; int blocks = (N + blockSize-1) / blockSize; float dt_ms; cudaEvent_t event1, event2; cudaEventCreate(&event1); cudaEventCreate(&event2); real* v; cudaMalloc( (void**) &v, N * sizeof(real) ); real* outV; cudaMalloc( (void**) &outV, blockSize * blockSize * sizeof(real) ); real* outVV; cudaMalloc( (void**) &outVV, blockSize * sizeof(real) ); real out; int i; int M = 10; wypelnij <<<blocks, blockSize>>> (N, v); cudaEventRecord(event1, 0); for (i=0; i<M; i++){ redukcja<<<blocks, blockSize>>> (N, v, outV); redukcja<<<blockSize, blockSize>>> (blockSize*blockSize, outV, outVV); redukcja<<<1, blockSize>>> (blockSize, outVV, v); } cudaEventRecord(event2, 0); cudaEventSynchronize(event1); cudaEventSynchronize(event2); cudaEventElapsedTime(&dt_ms, event1, event2); cudaMemcpy (&out, v, 1 * sizeof(real), cudaMemcpyDeviceToHost); printf ("Czas redukcji: %f ms wynik; %f\n", dt_ms * 1./M, out); return 0; }
dbd89b34753d6dee4b9ee49402ed37ec61a87e44.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "hip/device_functions.h" #include "device_launch_parameters.h" #include "math_functions.hpp" __global__ void partedCholesky_GPU(int M, float* A) { int x = threadIdx.x+blockIdx.x*1024; float multi = 0; //Left part int j = 0; for (; j < 9 * M; j++) { A[(x + 9 * M) * 18 * M + j] = (A[(x + 9 * M) * 18 * M + j]-multi)/A[j*18*M+j]; multi = 0; for (int i = 0; i <= j; i++) { multi += A[(j+1) * 18 * M + i] * A[(x + 9 * M) * 18 * M + i]; } } __syncthreads(); multi = 0; for (int i = 0; i <= j; i++) { multi += A[j * 18 * M + i] * A[(x + 9 * M) * 18 * M + i]; } //Right part for (j = 9 * M; j < 18 * M; j++) { if (j - 9 * M == x) { A[(x + 9 * M) * 18 * M + j] = sqrt(A[(x + 9 * M) * 18 * M + j] - multi); break; } A[(x + 9 * M) * 18 * M + j] = (A[(x + 9 * M) * 18 * M + j] - multi) / A[j * 18 * M + j]; multi = 0; for (int i = 0; i <= j; i++) { multi += A[(j+1) * 18 * M + i] * A[(x + 9 * M) * 18 * M + i]; } //synchronize every column __syncthreads(); } __syncthreads(); } void partedCholesky(int M, float* A) { int size = 18 * M; const dim3 blockSize(1024); const dim3 gridSize(18*M/1024+1); partedCholesky_GPU << <gridSize, blockSize >> >(M, A); }
dbd89b34753d6dee4b9ee49402ed37ec61a87e44.cu
#include <cuda_runtime.h> #include "device_functions.h" #include "device_launch_parameters.h" #include "math_functions.hpp" __global__ void partedCholesky_GPU(int M, float* A) { int x = threadIdx.x+blockIdx.x*1024; float multi = 0; //Left part int j = 0; for (; j < 9 * M; j++) { A[(x + 9 * M) * 18 * M + j] = (A[(x + 9 * M) * 18 * M + j]-multi)/A[j*18*M+j]; multi = 0; for (int i = 0; i <= j; i++) { multi += A[(j+1) * 18 * M + i] * A[(x + 9 * M) * 18 * M + i]; } } __syncthreads(); multi = 0; for (int i = 0; i <= j; i++) { multi += A[j * 18 * M + i] * A[(x + 9 * M) * 18 * M + i]; } //Right part for (j = 9 * M; j < 18 * M; j++) { if (j - 9 * M == x) { A[(x + 9 * M) * 18 * M + j] = sqrt(A[(x + 9 * M) * 18 * M + j] - multi); break; } A[(x + 9 * M) * 18 * M + j] = (A[(x + 9 * M) * 18 * M + j] - multi) / A[j * 18 * M + j]; multi = 0; for (int i = 0; i <= j; i++) { multi += A[(j+1) * 18 * M + i] * A[(x + 9 * M) * 18 * M + i]; } //synchronize every column __syncthreads(); } __syncthreads(); } void partedCholesky(int M, float* A) { int size = 18 * M; const dim3 blockSize(1024); const dim3 gridSize(18*M/1024+1); partedCholesky_GPU << <gridSize, blockSize >> >(M, A); }
61a1fdfe5dac2ec00954a2138547d600eeb6bc83.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @precisions mixed zc -> ds @author Mark Gates */ #include "magma_internal.h" #define BLK_X 64 #define BLK_Y 32 /* Divides matrix into ceil( n/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to zlag2c and zlaset. */ __global__ void clat2z_lower( int n, const magmaFloatComplex *SA, int ldsa, magmaDoubleComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < n && ind + BLK_X > iby ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } } } /* Similar to clat2z_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to zlag2c and zlaset. */ __global__ void clat2z_upper( int n, const magmaFloatComplex *SA, int ldsa, magmaDoubleComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < n && ind < iby + BLK_Y ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( ind <= iby+j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } } } } /***************************************************************************//** Purpose ------- CLAT2Z converts a single-complex matrix, SA, to a double-complex matrix, A. Note that while it is possible to overflow while converting from double to single, it is not possible to overflow when converting from single to double. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix A to be converted. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] n INTEGER The number of columns of the matrix A. n >= 0. @param[in] A COMPLEX_16 array, dimension (LDA,n) On entry, the n-by-n coefficient matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,n). @param[out] SA COMPLEX array, dimension (LDSA,n) On exit, if INFO=0, the n-by-n coefficient matrix SA; if INFO > 0, the content of SA is unspecified. @param[in] ldsa INTEGER The leading dimension of the array SA. LDSA >= max(1,n). @param[out] info INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lat2 *******************************************************************************/ extern "C" void magmablas_clat2z( magma_uplo_t uplo, magma_int_t n, magmaFloatComplex_const_ptr SA, magma_int_t ldsa, magmaDoubleComplex_ptr A, magma_int_t lda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) *info = -1; else if ( n < 0 ) *info = -2; else if ( lda < max(1,n) ) *info = -4; else if ( ldsa < max(1,n) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //*info; } /* quick return */ if ( n == 0 ) { return; } dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv( n, BLK_X ), magma_ceildiv( n, BLK_Y ) ); if (uplo == MagmaLower) { hipLaunchKernelGGL(( clat2z_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, SA, ldsa, A, lda); } else if (uplo == MagmaUpper) { hipLaunchKernelGGL(( clat2z_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, SA, ldsa, A, lda); } }
61a1fdfe5dac2ec00954a2138547d600eeb6bc83.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @precisions mixed zc -> ds @author Mark Gates */ #include "magma_internal.h" #define BLK_X 64 #define BLK_Y 32 /* Divides matrix into ceil( n/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to zlag2c and zlaset. */ __global__ void clat2z_lower( int n, const magmaFloatComplex *SA, int ldsa, magmaDoubleComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < n && ind + BLK_X > iby ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } } } /* Similar to clat2z_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to zlag2c and zlaset. */ __global__ void clat2z_upper( int n, const magmaFloatComplex *SA, int ldsa, magmaDoubleComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < n && ind < iby + BLK_Y ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( ind <= iby+j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } } } } /***************************************************************************//** Purpose ------- CLAT2Z converts a single-complex matrix, SA, to a double-complex matrix, A. Note that while it is possible to overflow while converting from double to single, it is not possible to overflow when converting from single to double. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix A to be converted. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] n INTEGER The number of columns of the matrix A. n >= 0. @param[in] A COMPLEX_16 array, dimension (LDA,n) On entry, the n-by-n coefficient matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,n). @param[out] SA COMPLEX array, dimension (LDSA,n) On exit, if INFO=0, the n-by-n coefficient matrix SA; if INFO > 0, the content of SA is unspecified. @param[in] ldsa INTEGER The leading dimension of the array SA. LDSA >= max(1,n). @param[out] info INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lat2 *******************************************************************************/ extern "C" void magmablas_clat2z( magma_uplo_t uplo, magma_int_t n, magmaFloatComplex_const_ptr SA, magma_int_t ldsa, magmaDoubleComplex_ptr A, magma_int_t lda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) *info = -1; else if ( n < 0 ) *info = -2; else if ( lda < max(1,n) ) *info = -4; else if ( ldsa < max(1,n) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //*info; } /* quick return */ if ( n == 0 ) { return; } dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv( n, BLK_X ), magma_ceildiv( n, BLK_Y ) ); if (uplo == MagmaLower) { clat2z_lower<<< grid, threads, 0, queue->cuda_stream() >>> (n, SA, ldsa, A, lda); } else if (uplo == MagmaUpper) { clat2z_upper<<< grid, threads, 0, queue->cuda_stream() >>> (n, SA, ldsa, A, lda); } }
f056b9d48fa52bc0c40d4b077cca19a14c973438.hip
// !!! This is a file automatically generated by hipify!!! // CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania // Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania // This file includes code from: // Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097 // Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/ // Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com #include <stdio.h> #include <hip/hip_runtime.h> #include <cmath> #include <math.h> #include "sceneStructs.h" #include "glm/glm.hpp" #include "utilities.h" #include "raytraceKernel.h" #include "intersections.h" #include "interactions.h" struct is_dead{ __host__ __device__ bool operator()(const ray& r) { return r.isDead; } }; void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } } // LOOK: This function demonstrates how to use thrust for random number generation on the GPU! // Function that generates static. __host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){ int index = x + (y * resolution.x); thrust::default_random_engine rng(hash(index*time)); thrust::uniform_real_distribution<float> u01(0,1); return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng)); } __host__ __device__ glm::vec2 generateRandomNumberAntiAliasing(float seed, float x, float y, float d){ thrust::default_random_engine rng(hash(seed)); thrust::uniform_real_distribution<float> u01(0,1); float xOffset = (float)u01(rng) * 2 * d; float yOffset = (float)u01(rng) * 2 * d; return glm::vec2(x - d + xOffset, y - d + yOffset); } // TODO: IMPLEMENT THIS FUNCTION // Function that does the initial raycast from the camera __host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov){ glm::vec3 A = glm::cross(view, up); glm::vec3 B = glm::cross(A, view); glm::vec3 M = eye + view; glm::vec3 V = (float)tan(fov.y/(float)180.0*PI) * glm::length(view) * glm::normalize(B); glm::vec3 H = (float)tan(fov.x/(float)180.0*PI) * glm::length(view) * glm::normalize(A); //choose point on the image plane based on pixel location float Sh = float(x)/float(resolution.x-1); float Sv = 1- float(y)/float(resolution.y-1); //invert y coordinate //choose random point on image plane /*thrust::default_random_engine rng(hash(index*time)); thrust::uniform_real_distribution<float> u01(0,1); float Sh = (float) u01(rng); float Sv = (float) u01(rng);*/ //sreen coordinates to world coordinates glm::vec3 point = M + (float)(2*Sh-1)*H + (float)(2*Sv-1)*V; //initial cast of ray ray r; r.direction = glm::normalize(point - eye); r.origin = eye; r.color = glm::vec3(1,1,1); r.isDead = false; return r; } //Kernel that blacks out a given image buffer __global__ void clearImage(glm::vec2 resolution, glm::vec3* image){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ image[index] = glm::vec3(0,0,0); } } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image, float iterations){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ glm::vec3 color; color.x = image[index].x*255.0f/iterations; color.y = image[index].y*255.0f/iterations; color.z = image[index].z*255.0f/iterations; //weight for each iteration if(color.x>255){ color.x = 255; } if(color.y>255){ color.y = 255; } if(color.z>255){ color.z = 255; } // Each thread writes one pixel location in the texture (textel) PBOpos[index].w = 0; PBOpos[index].x = color.x; PBOpos[index].y = color.y; PBOpos[index].z = color.z; } } // loop through all geometry to test ray intersection, returns the geoID that corresponds to intersected geometry __host__ __device__ int findHitGeo(ray r, glm::vec3& intersect, glm::vec3& normal, staticGeom* geoms, int numberOfGeoms, triangle * cudatris){ if(r.isDead) return -1; float distMin = -2, dist = -1; glm::vec3 tempIntersect(0.0f); glm::vec3 tempNormal(0.0f); int ID = -1; //geometry and ray intersect tesing for (int g=0; g<numberOfGeoms; g++){ if(geoms[g].type == SPHERE){ dist = sphereIntersectionTest(geoms[g], r, tempIntersect, tempNormal); } else if(geoms[g].type == CUBE ){ dist = boxIntersectionTest(geoms[g], r, tempIntersect, tempNormal); } else if (geoms[g].type == MESH){ dist = polygonIntersectionTest(geoms[g], r, tempIntersect, tempNormal, cudatris); } //overwrite minimum distance if needed if( (distMin < 0 && dist > -0.5f ) || ( distMin > -1 && dist < distMin && dist > -0.5f ) ){ distMin = dist; //update minimum dist ID = g; //update ID of geometry intersect = tempIntersect; //update intersct point normal = tempNormal; //update normal } } return ID; } //return true if there is direct lighting __host__ __device__ bool ShadowRayTest(ray sr, staticGeom* geoms, int numberOfGeoms, material* materials, triangle * cudatris){ glm::vec3 intersPoint(0.0f); glm::vec3 intersNormal(0.0f); //printf("shadow ray: [%f,%f,%f], [%f,%f,%f]\n", sr.origin.x,sr.origin.y,sr.origin.z,sr.direction.x,sr.direction.y,sr.direction.z); int geoID = findHitGeo(sr, intersPoint, intersNormal, geoms, numberOfGeoms, cudatris); if( geoID>-1 && geoms[geoID].materialid >= 0 &&materials[geoms[geoID].materialid].emittance > 0){ //hit light soource return true; } else{ return false; } } // get shaw ray to a random chosen light, modify the shadowray, return ID of chosen light __device__ __host__ int getRandomShadowRayDirection(float seed, glm::vec3& theIntersect, int* lights, int numOfLights, staticGeom* geoms, ray& shadowRay, float& rayLength, glm::vec3 lightNormal, float lightArea){ // ****************** choose light first ******************************** // int chosenLight = lights[0]; //only one light if( numOfLights > 1){ //more than 1 light thrust::default_random_engine rng(hash(seed)); thrust::uniform_real_distribution<float> u01(0,1); chosenLight = lights[(int)((float)u01(rng) * numOfLights)]; //randomly choose a light to sample } // ****************** find a point on light ******************************** // glm::vec3 Lnormal(0.0f); //light normal float Larea; //light area glm::vec3 Plight; //random point on light if( geoms[chosenLight].type == CUBE ){ //Plight = getRandomPointOnCube( geoms[chosenLight], seed); Plight = getRandomPointOnCube( geoms[chosenLight], seed, Lnormal, Larea); } else if( geoms[chosenLight].type == SPHERE ){ Plight = getRandomPointOnSphere( geoms[chosenLight], seed, Lnormal, Larea); } // ****************** shadow ray test ******************************** // shadowRay.direction = glm::normalize(Plight - theIntersect); //from intersect to light shadowRay.origin = theIntersect + (float)EPSILON * shadowRay.direction; rayLength = glm::length(Plight - theIntersect); return chosenLight; } __device__ __host__ glm::vec3 getTextureColor(glm::vec3* cudatexs, tex* cudatexIDs, glm::vec3 &intersect, staticGeom& geom){ tex theTex = cudatexIDs[abs(geom.materialid)-1]; // printf("theTex: h=%d, w=%d, start=%d\n", theTex.h, theTex.w, theTex.start); glm::vec3 p = multiplyMV(geom.inverseTransform, glm::vec4(intersect,1.0f)); float u,v; if(geom.type == CUBE){ // printf("p.x=%f, p.y = %f, p.z=%f, intersect.x=%f, intersect.y=%f, intersect.z=%f\n",p.x, p.y, p.z, intersect.x, intersect.y, intersect.z); if(std::abs(0.5f - abs(p.x)) < EPSILON){ //left or right face u = p.z + 0.5f; v = p.y + 0.5f; }else if(std::abs(0.5f - abs(p.y)) < EPSILON){ // top or bottom face u = p.x + 0.5f; v = p.z + 0.5f; }else if(std::abs(0.5f - abs(p.z)) < EPSILON){ //front or back face u = p.x + 0.5f; v = p.y + 0.5f; v = 1.0f - v; } }else if(geom.type == SPHERE){ glm::vec3 d = glm::vec3(0.0)- glm::vec3(p.x, p.y, p.z); // printf("p.x=%f, p.y = %f, p.z=%f, intersect.x=%f, intersect.y=%f, intersect.z=%f\n",p.x, p.y, p.z, intersect.x, intersect.y, intersect.z); u = 0.5f + atan2(d.z, d.x) * 0.5f / PI; v = 0.5f - asin(d.y) / PI; } int i,j,idx = -1; i = u * (float)theTex.w; j = v * (float)theTex.h; idx = i + j * theTex.w + theTex.start; // printf("x=%f, z=%f, u=%f, v=%f, idx = %d\n",intersect.x * 0.2f,intersect.z * 0.2f, u, v, idx); if( idx <= theTex.w * theTex.h + theTex.start && idx>=theTex.start ){ glm::vec3 color(cudatexs[idx].r/255.0, cudatexs[idx].g/255.0, cudatexs[idx].b/255.0); return color; } return intersect; } //calculates the direct lighting for a certain hit point and modify color of that hit __device__ __host__ void directLighting(float seed, glm::vec3& theColor, glm::vec3& theIntersect, glm::vec3& theNormal, int geoID, int* lights, int numOfLights, material* cudamats, staticGeom* geoms, int numOfGeoms, triangle * cudatris){ ray shadowRay; float rayLen,lightArea; glm::vec3 lightNormal; int lightID = getRandomShadowRayDirection(seed, theIntersect, lights, numOfLights, geoms, shadowRay, rayLen, lightNormal, lightArea); // ****************** shading if direct illumination ****************** // if(geoms[geoID].materialid >= 0 ){ material curMat = cudamats[geoms[geoID].materialid]; //material of the hit goemetry if(ShadowRayTest(shadowRay, geoms, numOfGeoms, cudamats, cudatris) ){ float cosTerm = glm::clamp( glm::dot( theNormal, shadowRay.direction ), 0.0f, 1.0f); //proportion of facing light float cosTerm2 = glm::clamp( glm::dot( lightNormal, -shadowRay.direction ), 0.0f, 1.0f); //proportion of incoming light float areaSampling = lightArea / (float) pow( rayLen, 2.0f) ; // dA/r^2 theColor += cudamats[lightID].emittance * curMat.color * cosTerm * cosTerm2 * areaSampling ; } } //don't kill any ray in direct lighting calculation } // TODO: IMPLEMENT THIS FUNCTION // Core raytracer kernel __global__ void raytraceRay(ray* rays, float time, int rayDepth, int numOfRays, glm::vec3* colors, staticGeom* geoms, int numberOfGeoms, material* cudamats, int* lights, int numOfLights, cameraData cam, triangle* cudatris, glm::vec3* cudatexs, tex* cudatexIDs){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; // int index = x * blockDim.y + y; // int index = x + (y * resolution.x); int index = x + (int)ceil(sqrt((float)numOfRays))* y; // printf("blockDim: %d, %d\n", blockDim.x, blockDim.y); if( index < numOfRays ){ ray r = rays[index]; glm::vec3 Pintersect(0.0f); glm::vec3 Pnormal(0.0f); int hitGeoID = findHitGeo(r, Pintersect, Pnormal, geoms, numberOfGeoms, cudatris); if(hitGeoID!=-1){ material curMat; if(geoms[hitGeoID].materialid >= 0) curMat = cudamats[geoms[hitGeoID].materialid]; if( curMat.emittance > 0 ){ //end when hit light source if(glm::length(r.color)>0.6f){ // printf("ray color:[%f, %f, %f]", r.color.r,r.color.g,r.color.b); } colors[r.pixel] += r.color * curMat.color * curMat.emittance; r.isDead = true; } else{ //int mode = calculateBSDF(r, Pintersect, Pnormal, color, uncolor, cudamats[matID], hash(index*time)); float seed = (float)index * (float)time * ( (float)rayDepth + 1.0f ); if(curMat.hasReflective > 0 || curMat.hasRefractive > 0){ //------------------------------- calculate Fresnel reflectance and transmittance --------------------------// Fresnel F; float reflectance; glm::vec3 reflectDir, transmitDir; if(glm::dot(r.direction,Pnormal)<0){ //ray is outside F = calculateFresnel(Pnormal,r.direction,1.0f, curMat.indexOfRefraction); reflectDir = calculateReflectionDirection(Pnormal, r.direction); transmitDir = calculateTransmissionDirection(Pnormal, r.direction,1.0f, curMat.indexOfRefraction); } else{ //ray is inside F = calculateFresnel(-Pnormal,r.direction, curMat.indexOfRefraction,1.0f); reflectDir = calculateReflectionDirection(-Pnormal, r.direction); transmitDir = calculateTransmissionDirection(-Pnormal, r.direction, curMat.indexOfRefraction, 1.0f); } //--------------------------------------------------------------------------------------------------------// //----------------------- choosing between reflection or refraction or both -------------------------------// if( curMat.hasRefractive > 0 && curMat.hasReflective > 0){ thrust::default_random_engine rng( hash( seed ) ); thrust::uniform_real_distribution<float> u01(0,1); if((float) u01(rng) < F.reflectionCoefficient ){ //reflected r.direction = reflectDir; r.origin = Pintersect + (float)EPSILON * r.direction; //colors[r.pixel] += glm::abs(r.direction); if(glm::length(curMat.color)>0) r.color *= curMat.color ; //r.color *= curMat.color * F.reflectionCoefficient; } else{ //transmitted r.direction = transmitDir; r.origin = Pintersect + (float)EPSILON * r.direction; //colors[r.pixel] += glm::abs(r.direction); if(glm::length(curMat.color)>0) r.color *= curMat.color ; } } else if(curMat.hasReflective > 0){ //only reflection r.direction = reflectDir; r.origin = Pintersect + (float)EPSILON * r.direction; //colors[r.pixel] += glm::abs(r.direction); if(glm::length(curMat.color)>0) r.color *= curMat.color ; } else if (curMat.hasRefractive > 0){ //only refraction r.direction = transmitDir; r.origin = Pintersect + (float)EPSILON * r.direction; //colors[r.pixel] += glm::abs(r.direction); if(glm::length(curMat.color)>0) r.color *= curMat.color ; } } //--------------------------------------------------------------------------------------------------------// else if (curMat.hasScatter>0){ } else{ //diffuse rays thrust::default_random_engine rng( hash( seed ) ); thrust::uniform_real_distribution<float> u01(0,1); if((float) u01(rng) < 0.01f ){ //proportion to calculate direct lighting directLighting(seed,r.color,Pintersect,Pnormal,hitGeoID,lights,numOfLights, cudamats,geoms, numberOfGeoms, cudatris); } else{ //proportion to calculate indirect lighting //cos weighted importance sampling r.direction = calculateCosWeightedRandomDirInHemisphere(Pnormal, (float) u01(rng), (float) u01(rng)); r.origin = Pintersect + (float)EPSILON * r.direction ; float diffuseTerm = glm::clamp( glm::dot( Pnormal,r.direction ), 0.0f, 1.0f); if(geoms[hitGeoID].materialid < 0){ //texture r.color *= diffuseTerm * getTextureColor(cudatexs, cudatexIDs, Pintersect, geoms[hitGeoID]); }else{ r.color *= diffuseTerm * curMat.color; } } } //----------------------------------------- Other Effects ----------------------------------------------// if(curMat.specularExponent > 0 ){ //specularity & glossiness thrust::default_random_engine rng( hash( seed ) ); thrust::uniform_real_distribution<float> u01(0,1); ray shadowRay; float rayLen,lightArea; glm::vec3 lightNormal; int lightID = getRandomShadowRayDirection(seed, Pintersect, lights, numOfLights, geoms, shadowRay, rayLen, lightNormal, lightArea); glm::vec3 viewDir = glm::normalize( cam.position - Pintersect ); glm::vec3 halfVector = glm::normalize(shadowRay.direction + viewDir); //H=(L+V)/length(L+V) float D = glm::clamp( glm::dot( Pnormal,halfVector), 0.0f, 1.0f); float specularTerm = pow(D, curMat.specularExponent); //perfect specular means normal vector = half vector r.color *= specularTerm * curMat.color; } } } else{ //hit nothing r.isDead = true; } rays[index] = r; } } // establish parrallel ray pool __global__ void initialRayPool(ray * rayPool, cameraData cam, float iterations,glm::vec3 *colors, staticGeom* geoms, int numberOfGeoms, material* cudamats, int * lightIDs, int numberOfLights, triangle * cudatris){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * cam.resolution.x); ray r = rayPool[index]; if( x<= cam.resolution.x && y <= cam.resolution.y ){ if(ANTI_ALIASING){ glm::vec2 jitter = generateRandomNumberAntiAliasing((float)index * iterations, x, y, 0.5f); //anti-alizsing r = raycastFromCameraKernel( cam.resolution, iterations, jitter.x, jitter.y, cam.position, cam.view, cam.up, cam.fov ); } else{ r = raycastFromCameraKernel( cam.resolution, iterations, x, y, cam.position, cam.view, cam.up, cam.fov ); } r.pixel = index; //mark ray with pixel indexing, after compaction, (r.pixel) will represent correct pixel location if(DEPTH_OF_FIELD){ glm::vec3 focalPoint = r.origin + r.direction * cam.focalLength / glm::dot(cam.view, r.direction); //L = f/cos(theta) thrust::default_random_engine rng(hash((float)index*iterations)); thrust::uniform_real_distribution<float> u01(0,1); float theta = 2.0f * PI * u01(rng); float radius = u01(rng) * cam.aperture; glm::vec3 eyeOffset(cos(theta)*radius, sin(theta)*radius, 0); glm::vec3 newEyePoint = cam.position + eyeOffset; //offseted cam eye location r.origin = newEyePoint; r.direction = glm::normalize(focalPoint - newEyePoint); } glm::vec3 Pintersect(0.0f); glm::vec3 Pnormal(0.0f); int geoID = findHitGeo(r, Pintersect, Pnormal, geoms, numberOfGeoms, cudatris); if( geoID > -1){ // cast shadow ray towards lights and calculate direct lighting directLighting((float)index*iterations, colors[index], Pintersect, Pnormal,geoID, lightIDs, numberOfLights, cudamats, geoms, numberOfGeoms, cudatris); } rayPool[index] = r; } } // TODO: FINISH THIS FUNCTION // Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms, std::vector<glm::vec3> &textures, std::vector<tex> &textureIDs){ //frame: current frame number //iterations: current iteration of rendering < (cam.iterations) if(iterations == 572 || iterations == 46 || iterations == 7){ printf("problem"); } // send image to GPU glm::vec3* cudaimage = NULL; hipMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3)); hipMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyHostToDevice); // package geometry and materials and sent to GPU staticGeom* geomList = new staticGeom[numberOfGeoms]; // triangle* triList; int meshID = -1; triangle* cudatris = NULL; for(int i=0; i<numberOfGeoms; i++){ staticGeom newStaticGeom; newStaticGeom.type = geoms[i].type; newStaticGeom.materialid = geoms[i].materialid; newStaticGeom.translation = geoms[i].translations[frame]; newStaticGeom.rotation = geoms[i].rotations[frame]; newStaticGeom.scale = geoms[i].scales[frame]; newStaticGeom.transform = geoms[i].transforms[frame]; newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame]; if(geoms[i].type == MESH){ meshID = i; // my code now only handles one obj load (unfortunately as I am not able to handle list of triangles well) newStaticGeom.bBoxMax = geoms[i].bBoxMax; //bBox is in local coordinates, dont change over frames. newStaticGeom.bBoxMin = geoms[i].bBoxMin; newStaticGeom.numOfTris = geoms[i].numOfTris; hipMalloc((void**)&cudatris, geoms[meshID].numOfTris*sizeof(triangle)); hipMemcpy( cudatris, geoms[meshID].tris, geoms[meshID].numOfTris *sizeof(triangle), hipMemcpyHostToDevice); //printf("num of tris: %d\n",geoms[meshID].numOfTris); /*if(iterations == 3){ for (int j=0; j<geoms[meshID].numOfTris; j++){ printf("geoms triangle %d: \n [%.2f, %.2f, %.2f]\n [%.2f, %.2f, %.2f]\n [%.2f, %.2f, %.2f]\n", j, geoms[meshID].tris[j].p1.x, geoms[meshID].tris[j].p1.y, geoms[meshID].tris[j].p1.z, geoms[meshID].tris[j].p2.x, geoms[meshID].tris[j].p2.y, geoms[meshID].tris[j].p2.z, geoms[meshID].tris[j].p3.x, geoms[meshID].tris[j].p3.y, geoms[meshID].tris[j].p3.z); } }*/ /* newStaticGeom.tris = cudatris; if(iterations == 3){ for (int j=0; j<geoms[meshID].numOfTris; j++){ printf("StaticGeom triangle %d: \n [%.2f, %.2f, %.2f]\n [%.2f, %.2f, %.2f]\n [%.2f, %.2f, %.2f]\n", j, newStaticGeom.tris[j].p1.x, newStaticGeom.tris[j].p1.y, newStaticGeom.tris[j].p1.z, newStaticGeom.tris[j].p2.x, newStaticGeom.tris[j].p2.y, newStaticGeom.tris[j].p2.z, newStaticGeom.tris[j].p3.x, newStaticGeom.tris[j].p3.y, newStaticGeom.tris[j].p3.z); } }*/ } geomList[i] = newStaticGeom; } staticGeom* cudageoms = NULL; hipMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom)); hipMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), hipMemcpyHostToDevice); // package camera cameraData cam; cam.resolution = renderCam->resolution; cam.position = renderCam->positions[frame]; cam.view = renderCam->views[frame]; cam.up = renderCam->ups[frame]; cam.fov = renderCam->fov; cam.aperture = renderCam->aperture; cam.focalLength = renderCam->focalLength; // material setup material* cudamats = NULL; hipMalloc((void**)&cudamats, numberOfMaterials*sizeof(material)); hipMemcpy( cudamats, materials, numberOfMaterials*sizeof(material), hipMemcpyHostToDevice); //lights setup int numberOfLights = 0; for(int i = 0; i < numberOfGeoms; ++i){ if( geoms[i].materialid >= 0 && materials[geoms[i].materialid].emittance > 0){ numberOfLights ++ ; } } int *lightIDs = new int[numberOfLights]; int k = 0; for(int i = 0; i < numberOfGeoms; ++i){ if( geoms[i].materialid >= 0 && materials[geoms[i].materialid].emittance > 0){ lightIDs[k] = i; k++; } } int* cudalightIDs = NULL; hipMalloc((void**)&cudalightIDs, numberOfLights*sizeof(int)); hipMemcpy( cudalightIDs, lightIDs, numberOfLights*sizeof(int), hipMemcpyHostToDevice); //set up textures int numberOfPixels = textures.size(); glm::vec3 *texs = new glm::vec3[numberOfPixels]; for(int i = 0; i < numberOfPixels; ++i){ texs[i] = textures[i]; } glm::vec3 *cudatexs = NULL; hipMalloc((void**)&cudatexs,numberOfPixels * sizeof(glm::vec3)); hipMemcpy( cudatexs, texs, numberOfPixels * sizeof(glm::vec3), hipMemcpyHostToDevice); //set up textures id int numOfTextures = textureIDs.size(); tex *texIDs = new tex[numOfTextures]; for(int i = 0; i < numOfTextures; ++i){ texIDs[i] = textureIDs[i]; } tex *cudatexIDs = NULL; hipMalloc((void**)&cudatexIDs, numOfTextures * sizeof(tex)); hipMemcpy( cudatexIDs, texIDs, numOfTextures* sizeof(tex), hipMemcpyHostToDevice); //set up ray pool on device ray* cudarays = NULL; int numOfRays = cam.resolution.x * cam.resolution.y; hipMalloc((void**)&cudarays, numOfRays*sizeof(ray)); // set up crucial magic int tileSize = 8; dim3 threadsPerBlock(tileSize, tileSize); dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize))); hipLaunchKernelGGL(( initialRayPool), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, cudarays, cam, (float)iterations, cudaimage,cudageoms, numberOfGeoms, cudamats, cudalightIDs, numberOfLights, cudatris); for(int k=0; k<MAX_DEPTH && numOfRays>0; k++){ if(STREAM_COMPACT){ thrust::device_ptr<ray> Start = thrust::device_pointer_cast(cudarays); //coverts cuda pointer to thrust pointer thrust::device_ptr<ray> End = thrust::remove_if(Start, Start + numOfRays, is_dead()); numOfRays = thrust::distance(Start, End); } //xBlocks * yBlocks = numOfRays / (tileSize*tileSize) int xBlocks = (int) ceil( sqrt((float)numOfRays)/(float)(tileSize) ); int yBlocks = (int) ceil( sqrt((float)numOfRays)/(float)(tileSize) ); dim3 newBlocksPerGrid(xBlocks,yBlocks); hipLaunchKernelGGL(( raytraceRay), dim3(newBlocksPerGrid), dim3(threadsPerBlock), 0, 0, cudarays, (float)iterations, k, (int)numOfRays, cudaimage, cudageoms, numberOfGeoms, cudamats, cudalightIDs, numberOfLights, cam, cudatris, cudatexs, cudatexIDs); } hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, renderCam->resolution, cudaimage,(float)iterations); // retrieve image from GPU hipMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), hipMemcpyDeviceToHost); // free up stuff, or else we'll leak memory like a madman hipFree( cudaimage ); hipFree( cudageoms ); hipFree( cudamats ); hipFree( cudarays ); hipFree( cudatexs ); hipFree( cudatexIDs ); hipFree( cudalightIDs ); if(meshID >-1 ){ hipFree( cudatris ); } delete geomList; //delete matList; delete lightIDs; delete texs; delete texIDs; // make certain the kernel has completed hipDeviceSynchronize(); checkCUDAError("Kernel failed!"); }
f056b9d48fa52bc0c40d4b077cca19a14c973438.cu
// CIS565 CUDA Raytracer: A parallel raytracer for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania // Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania // This file includes code from: // Rob Farber for CUDA-GL interop, from CUDA Supercomputing For The Masses: http://www.drdobbs.com/architecture-and-design/cuda-supercomputing-for-the-masses-part/222600097 // Peter Kutz and Yining Karl Li's GPU Pathtracer: http://gpupathtracer.blogspot.com/ // Yining Karl Li's TAKUA Render, a massively parallel pathtracing renderer: http://www.yiningkarlli.com #include <stdio.h> #include <cuda.h> #include <cmath> #include <math.h> #include "sceneStructs.h" #include "glm/glm.hpp" #include "utilities.h" #include "raytraceKernel.h" #include "intersections.h" #include "interactions.h" struct is_dead{ __host__ __device__ bool operator()(const ray& r) { return r.isDead; } }; void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } } // LOOK: This function demonstrates how to use thrust for random number generation on the GPU! // Function that generates static. __host__ __device__ glm::vec3 generateRandomNumberFromThread(glm::vec2 resolution, float time, int x, int y){ int index = x + (y * resolution.x); thrust::default_random_engine rng(hash(index*time)); thrust::uniform_real_distribution<float> u01(0,1); return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng)); } __host__ __device__ glm::vec2 generateRandomNumberAntiAliasing(float seed, float x, float y, float d){ thrust::default_random_engine rng(hash(seed)); thrust::uniform_real_distribution<float> u01(0,1); float xOffset = (float)u01(rng) * 2 * d; float yOffset = (float)u01(rng) * 2 * d; return glm::vec2(x - d + xOffset, y - d + yOffset); } // TODO: IMPLEMENT THIS FUNCTION // Function that does the initial raycast from the camera __host__ __device__ ray raycastFromCameraKernel(glm::vec2 resolution, float time, int x, int y, glm::vec3 eye, glm::vec3 view, glm::vec3 up, glm::vec2 fov){ glm::vec3 A = glm::cross(view, up); glm::vec3 B = glm::cross(A, view); glm::vec3 M = eye + view; glm::vec3 V = (float)tan(fov.y/(float)180.0*PI) * glm::length(view) * glm::normalize(B); glm::vec3 H = (float)tan(fov.x/(float)180.0*PI) * glm::length(view) * glm::normalize(A); //choose point on the image plane based on pixel location float Sh = float(x)/float(resolution.x-1); float Sv = 1- float(y)/float(resolution.y-1); //invert y coordinate //choose random point on image plane /*thrust::default_random_engine rng(hash(index*time)); thrust::uniform_real_distribution<float> u01(0,1); float Sh = (float) u01(rng); float Sv = (float) u01(rng);*/ //sreen coordinates to world coordinates glm::vec3 point = M + (float)(2*Sh-1)*H + (float)(2*Sv-1)*V; //initial cast of ray ray r; r.direction = glm::normalize(point - eye); r.origin = eye; r.color = glm::vec3(1,1,1); r.isDead = false; return r; } //Kernel that blacks out a given image buffer __global__ void clearImage(glm::vec2 resolution, glm::vec3* image){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ image[index] = glm::vec3(0,0,0); } } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image, float iterations){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ glm::vec3 color; color.x = image[index].x*255.0f/iterations; color.y = image[index].y*255.0f/iterations; color.z = image[index].z*255.0f/iterations; //weight for each iteration if(color.x>255){ color.x = 255; } if(color.y>255){ color.y = 255; } if(color.z>255){ color.z = 255; } // Each thread writes one pixel location in the texture (textel) PBOpos[index].w = 0; PBOpos[index].x = color.x; PBOpos[index].y = color.y; PBOpos[index].z = color.z; } } // loop through all geometry to test ray intersection, returns the geoID that corresponds to intersected geometry __host__ __device__ int findHitGeo(ray r, glm::vec3& intersect, glm::vec3& normal, staticGeom* geoms, int numberOfGeoms, triangle * cudatris){ if(r.isDead) return -1; float distMin = -2, dist = -1; glm::vec3 tempIntersect(0.0f); glm::vec3 tempNormal(0.0f); int ID = -1; //geometry and ray intersect tesing for (int g=0; g<numberOfGeoms; g++){ if(geoms[g].type == SPHERE){ dist = sphereIntersectionTest(geoms[g], r, tempIntersect, tempNormal); } else if(geoms[g].type == CUBE ){ dist = boxIntersectionTest(geoms[g], r, tempIntersect, tempNormal); } else if (geoms[g].type == MESH){ dist = polygonIntersectionTest(geoms[g], r, tempIntersect, tempNormal, cudatris); } //overwrite minimum distance if needed if( (distMin < 0 && dist > -0.5f ) || ( distMin > -1 && dist < distMin && dist > -0.5f ) ){ distMin = dist; //update minimum dist ID = g; //update ID of geometry intersect = tempIntersect; //update intersct point normal = tempNormal; //update normal } } return ID; } //return true if there is direct lighting __host__ __device__ bool ShadowRayTest(ray sr, staticGeom* geoms, int numberOfGeoms, material* materials, triangle * cudatris){ glm::vec3 intersPoint(0.0f); glm::vec3 intersNormal(0.0f); //printf("shadow ray: [%f,%f,%f], [%f,%f,%f]\n", sr.origin.x,sr.origin.y,sr.origin.z,sr.direction.x,sr.direction.y,sr.direction.z); int geoID = findHitGeo(sr, intersPoint, intersNormal, geoms, numberOfGeoms, cudatris); if( geoID>-1 && geoms[geoID].materialid >= 0 &&materials[geoms[geoID].materialid].emittance > 0){ //hit light soource return true; } else{ return false; } } // get shaw ray to a random chosen light, modify the shadowray, return ID of chosen light __device__ __host__ int getRandomShadowRayDirection(float seed, glm::vec3& theIntersect, int* lights, int numOfLights, staticGeom* geoms, ray& shadowRay, float& rayLength, glm::vec3 lightNormal, float lightArea){ // ****************** choose light first ******************************** // int chosenLight = lights[0]; //only one light if( numOfLights > 1){ //more than 1 light thrust::default_random_engine rng(hash(seed)); thrust::uniform_real_distribution<float> u01(0,1); chosenLight = lights[(int)((float)u01(rng) * numOfLights)]; //randomly choose a light to sample } // ****************** find a point on light ******************************** // glm::vec3 Lnormal(0.0f); //light normal float Larea; //light area glm::vec3 Plight; //random point on light if( geoms[chosenLight].type == CUBE ){ //Plight = getRandomPointOnCube( geoms[chosenLight], seed); Plight = getRandomPointOnCube( geoms[chosenLight], seed, Lnormal, Larea); } else if( geoms[chosenLight].type == SPHERE ){ Plight = getRandomPointOnSphere( geoms[chosenLight], seed, Lnormal, Larea); } // ****************** shadow ray test ******************************** // shadowRay.direction = glm::normalize(Plight - theIntersect); //from intersect to light shadowRay.origin = theIntersect + (float)EPSILON * shadowRay.direction; rayLength = glm::length(Plight - theIntersect); return chosenLight; } __device__ __host__ glm::vec3 getTextureColor(glm::vec3* cudatexs, tex* cudatexIDs, glm::vec3 &intersect, staticGeom& geom){ tex theTex = cudatexIDs[abs(geom.materialid)-1]; // printf("theTex: h=%d, w=%d, start=%d\n", theTex.h, theTex.w, theTex.start); glm::vec3 p = multiplyMV(geom.inverseTransform, glm::vec4(intersect,1.0f)); float u,v; if(geom.type == CUBE){ // printf("p.x=%f, p.y = %f, p.z=%f, intersect.x=%f, intersect.y=%f, intersect.z=%f\n",p.x, p.y, p.z, intersect.x, intersect.y, intersect.z); if(std::abs(0.5f - abs(p.x)) < EPSILON){ //left or right face u = p.z + 0.5f; v = p.y + 0.5f; }else if(std::abs(0.5f - abs(p.y)) < EPSILON){ // top or bottom face u = p.x + 0.5f; v = p.z + 0.5f; }else if(std::abs(0.5f - abs(p.z)) < EPSILON){ //front or back face u = p.x + 0.5f; v = p.y + 0.5f; v = 1.0f - v; } }else if(geom.type == SPHERE){ glm::vec3 d = glm::vec3(0.0)- glm::vec3(p.x, p.y, p.z); // printf("p.x=%f, p.y = %f, p.z=%f, intersect.x=%f, intersect.y=%f, intersect.z=%f\n",p.x, p.y, p.z, intersect.x, intersect.y, intersect.z); u = 0.5f + atan2(d.z, d.x) * 0.5f / PI; v = 0.5f - asin(d.y) / PI; } int i,j,idx = -1; i = u * (float)theTex.w; j = v * (float)theTex.h; idx = i + j * theTex.w + theTex.start; // printf("x=%f, z=%f, u=%f, v=%f, idx = %d\n",intersect.x * 0.2f,intersect.z * 0.2f, u, v, idx); if( idx <= theTex.w * theTex.h + theTex.start && idx>=theTex.start ){ glm::vec3 color(cudatexs[idx].r/255.0, cudatexs[idx].g/255.0, cudatexs[idx].b/255.0); return color; } return intersect; } //calculates the direct lighting for a certain hit point and modify color of that hit __device__ __host__ void directLighting(float seed, glm::vec3& theColor, glm::vec3& theIntersect, glm::vec3& theNormal, int geoID, int* lights, int numOfLights, material* cudamats, staticGeom* geoms, int numOfGeoms, triangle * cudatris){ ray shadowRay; float rayLen,lightArea; glm::vec3 lightNormal; int lightID = getRandomShadowRayDirection(seed, theIntersect, lights, numOfLights, geoms, shadowRay, rayLen, lightNormal, lightArea); // ****************** shading if direct illumination ****************** // if(geoms[geoID].materialid >= 0 ){ material curMat = cudamats[geoms[geoID].materialid]; //material of the hit goemetry if(ShadowRayTest(shadowRay, geoms, numOfGeoms, cudamats, cudatris) ){ float cosTerm = glm::clamp( glm::dot( theNormal, shadowRay.direction ), 0.0f, 1.0f); //proportion of facing light float cosTerm2 = glm::clamp( glm::dot( lightNormal, -shadowRay.direction ), 0.0f, 1.0f); //proportion of incoming light float areaSampling = lightArea / (float) pow( rayLen, 2.0f) ; // dA/r^2 theColor += cudamats[lightID].emittance * curMat.color * cosTerm * cosTerm2 * areaSampling ; } } //don't kill any ray in direct lighting calculation } // TODO: IMPLEMENT THIS FUNCTION // Core raytracer kernel __global__ void raytraceRay(ray* rays, float time, int rayDepth, int numOfRays, glm::vec3* colors, staticGeom* geoms, int numberOfGeoms, material* cudamats, int* lights, int numOfLights, cameraData cam, triangle* cudatris, glm::vec3* cudatexs, tex* cudatexIDs){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; // int index = x * blockDim.y + y; // int index = x + (y * resolution.x); int index = x + (int)ceil(sqrt((float)numOfRays))* y; // printf("blockDim: %d, %d\n", blockDim.x, blockDim.y); if( index < numOfRays ){ ray r = rays[index]; glm::vec3 Pintersect(0.0f); glm::vec3 Pnormal(0.0f); int hitGeoID = findHitGeo(r, Pintersect, Pnormal, geoms, numberOfGeoms, cudatris); if(hitGeoID!=-1){ material curMat; if(geoms[hitGeoID].materialid >= 0) curMat = cudamats[geoms[hitGeoID].materialid]; if( curMat.emittance > 0 ){ //end when hit light source if(glm::length(r.color)>0.6f){ // printf("ray color:[%f, %f, %f]", r.color.r,r.color.g,r.color.b); } colors[r.pixel] += r.color * curMat.color * curMat.emittance; r.isDead = true; } else{ //int mode = calculateBSDF(r, Pintersect, Pnormal, color, uncolor, cudamats[matID], hash(index*time)); float seed = (float)index * (float)time * ( (float)rayDepth + 1.0f ); if(curMat.hasReflective > 0 || curMat.hasRefractive > 0){ //------------------------------- calculate Fresnel reflectance and transmittance --------------------------// Fresnel F; float reflectance; glm::vec3 reflectDir, transmitDir; if(glm::dot(r.direction,Pnormal)<0){ //ray is outside F = calculateFresnel(Pnormal,r.direction,1.0f, curMat.indexOfRefraction); reflectDir = calculateReflectionDirection(Pnormal, r.direction); transmitDir = calculateTransmissionDirection(Pnormal, r.direction,1.0f, curMat.indexOfRefraction); } else{ //ray is inside F = calculateFresnel(-Pnormal,r.direction, curMat.indexOfRefraction,1.0f); reflectDir = calculateReflectionDirection(-Pnormal, r.direction); transmitDir = calculateTransmissionDirection(-Pnormal, r.direction, curMat.indexOfRefraction, 1.0f); } //--------------------------------------------------------------------------------------------------------// //----------------------- choosing between reflection or refraction or both -------------------------------// if( curMat.hasRefractive > 0 && curMat.hasReflective > 0){ thrust::default_random_engine rng( hash( seed ) ); thrust::uniform_real_distribution<float> u01(0,1); if((float) u01(rng) < F.reflectionCoefficient ){ //reflected r.direction = reflectDir; r.origin = Pintersect + (float)EPSILON * r.direction; //colors[r.pixel] += glm::abs(r.direction); if(glm::length(curMat.color)>0) r.color *= curMat.color ; //r.color *= curMat.color * F.reflectionCoefficient; } else{ //transmitted r.direction = transmitDir; r.origin = Pintersect + (float)EPSILON * r.direction; //colors[r.pixel] += glm::abs(r.direction); if(glm::length(curMat.color)>0) r.color *= curMat.color ; } } else if(curMat.hasReflective > 0){ //only reflection r.direction = reflectDir; r.origin = Pintersect + (float)EPSILON * r.direction; //colors[r.pixel] += glm::abs(r.direction); if(glm::length(curMat.color)>0) r.color *= curMat.color ; } else if (curMat.hasRefractive > 0){ //only refraction r.direction = transmitDir; r.origin = Pintersect + (float)EPSILON * r.direction; //colors[r.pixel] += glm::abs(r.direction); if(glm::length(curMat.color)>0) r.color *= curMat.color ; } } //--------------------------------------------------------------------------------------------------------// else if (curMat.hasScatter>0){ } else{ //diffuse rays thrust::default_random_engine rng( hash( seed ) ); thrust::uniform_real_distribution<float> u01(0,1); if((float) u01(rng) < 0.01f ){ //proportion to calculate direct lighting directLighting(seed,r.color,Pintersect,Pnormal,hitGeoID,lights,numOfLights, cudamats,geoms, numberOfGeoms, cudatris); } else{ //proportion to calculate indirect lighting //cos weighted importance sampling r.direction = calculateCosWeightedRandomDirInHemisphere(Pnormal, (float) u01(rng), (float) u01(rng)); r.origin = Pintersect + (float)EPSILON * r.direction ; float diffuseTerm = glm::clamp( glm::dot( Pnormal,r.direction ), 0.0f, 1.0f); if(geoms[hitGeoID].materialid < 0){ //texture r.color *= diffuseTerm * getTextureColor(cudatexs, cudatexIDs, Pintersect, geoms[hitGeoID]); }else{ r.color *= diffuseTerm * curMat.color; } } } //----------------------------------------- Other Effects ----------------------------------------------// if(curMat.specularExponent > 0 ){ //specularity & glossiness thrust::default_random_engine rng( hash( seed ) ); thrust::uniform_real_distribution<float> u01(0,1); ray shadowRay; float rayLen,lightArea; glm::vec3 lightNormal; int lightID = getRandomShadowRayDirection(seed, Pintersect, lights, numOfLights, geoms, shadowRay, rayLen, lightNormal, lightArea); glm::vec3 viewDir = glm::normalize( cam.position - Pintersect ); glm::vec3 halfVector = glm::normalize(shadowRay.direction + viewDir); //H=(L+V)/length(L+V) float D = glm::clamp( glm::dot( Pnormal,halfVector), 0.0f, 1.0f); float specularTerm = pow(D, curMat.specularExponent); //perfect specular means normal vector = half vector r.color *= specularTerm * curMat.color; } } } else{ //hit nothing r.isDead = true; } rays[index] = r; } } // establish parrallel ray pool __global__ void initialRayPool(ray * rayPool, cameraData cam, float iterations,glm::vec3 *colors, staticGeom* geoms, int numberOfGeoms, material* cudamats, int * lightIDs, int numberOfLights, triangle * cudatris){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * cam.resolution.x); ray r = rayPool[index]; if( x<= cam.resolution.x && y <= cam.resolution.y ){ if(ANTI_ALIASING){ glm::vec2 jitter = generateRandomNumberAntiAliasing((float)index * iterations, x, y, 0.5f); //anti-alizsing r = raycastFromCameraKernel( cam.resolution, iterations, jitter.x, jitter.y, cam.position, cam.view, cam.up, cam.fov ); } else{ r = raycastFromCameraKernel( cam.resolution, iterations, x, y, cam.position, cam.view, cam.up, cam.fov ); } r.pixel = index; //mark ray with pixel indexing, after compaction, (r.pixel) will represent correct pixel location if(DEPTH_OF_FIELD){ glm::vec3 focalPoint = r.origin + r.direction * cam.focalLength / glm::dot(cam.view, r.direction); //L = f/cos(theta) thrust::default_random_engine rng(hash((float)index*iterations)); thrust::uniform_real_distribution<float> u01(0,1); float theta = 2.0f * PI * u01(rng); float radius = u01(rng) * cam.aperture; glm::vec3 eyeOffset(cos(theta)*radius, sin(theta)*radius, 0); glm::vec3 newEyePoint = cam.position + eyeOffset; //offseted cam eye location r.origin = newEyePoint; r.direction = glm::normalize(focalPoint - newEyePoint); } glm::vec3 Pintersect(0.0f); glm::vec3 Pnormal(0.0f); int geoID = findHitGeo(r, Pintersect, Pnormal, geoms, numberOfGeoms, cudatris); if( geoID > -1){ // cast shadow ray towards lights and calculate direct lighting directLighting((float)index*iterations, colors[index], Pintersect, Pnormal,geoID, lightIDs, numberOfLights, cudamats, geoms, numberOfGeoms, cudatris); } rayPool[index] = r; } } // TODO: FINISH THIS FUNCTION // Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management void cudaRaytraceCore(uchar4* PBOpos, camera* renderCam, int frame, int iterations, material* materials, int numberOfMaterials, geom* geoms, int numberOfGeoms, std::vector<glm::vec3> &textures, std::vector<tex> &textureIDs){ //frame: current frame number //iterations: current iteration of rendering < (cam.iterations) if(iterations == 572 || iterations == 46 || iterations == 7){ printf("problem"); } // send image to GPU glm::vec3* cudaimage = NULL; cudaMalloc((void**)&cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3)); cudaMemcpy( cudaimage, renderCam->image, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyHostToDevice); // package geometry and materials and sent to GPU staticGeom* geomList = new staticGeom[numberOfGeoms]; // triangle* triList; int meshID = -1; triangle* cudatris = NULL; for(int i=0; i<numberOfGeoms; i++){ staticGeom newStaticGeom; newStaticGeom.type = geoms[i].type; newStaticGeom.materialid = geoms[i].materialid; newStaticGeom.translation = geoms[i].translations[frame]; newStaticGeom.rotation = geoms[i].rotations[frame]; newStaticGeom.scale = geoms[i].scales[frame]; newStaticGeom.transform = geoms[i].transforms[frame]; newStaticGeom.inverseTransform = geoms[i].inverseTransforms[frame]; if(geoms[i].type == MESH){ meshID = i; // my code now only handles one obj load (unfortunately as I am not able to handle list of triangles well) newStaticGeom.bBoxMax = geoms[i].bBoxMax; //bBox is in local coordinates, dont change over frames. newStaticGeom.bBoxMin = geoms[i].bBoxMin; newStaticGeom.numOfTris = geoms[i].numOfTris; cudaMalloc((void**)&cudatris, geoms[meshID].numOfTris*sizeof(triangle)); cudaMemcpy( cudatris, geoms[meshID].tris, geoms[meshID].numOfTris *sizeof(triangle), cudaMemcpyHostToDevice); //printf("num of tris: %d\n",geoms[meshID].numOfTris); /*if(iterations == 3){ for (int j=0; j<geoms[meshID].numOfTris; j++){ printf("geoms triangle %d: \n [%.2f, %.2f, %.2f]\n [%.2f, %.2f, %.2f]\n [%.2f, %.2f, %.2f]\n", j, geoms[meshID].tris[j].p1.x, geoms[meshID].tris[j].p1.y, geoms[meshID].tris[j].p1.z, geoms[meshID].tris[j].p2.x, geoms[meshID].tris[j].p2.y, geoms[meshID].tris[j].p2.z, geoms[meshID].tris[j].p3.x, geoms[meshID].tris[j].p3.y, geoms[meshID].tris[j].p3.z); } }*/ /* newStaticGeom.tris = cudatris; if(iterations == 3){ for (int j=0; j<geoms[meshID].numOfTris; j++){ printf("StaticGeom triangle %d: \n [%.2f, %.2f, %.2f]\n [%.2f, %.2f, %.2f]\n [%.2f, %.2f, %.2f]\n", j, newStaticGeom.tris[j].p1.x, newStaticGeom.tris[j].p1.y, newStaticGeom.tris[j].p1.z, newStaticGeom.tris[j].p2.x, newStaticGeom.tris[j].p2.y, newStaticGeom.tris[j].p2.z, newStaticGeom.tris[j].p3.x, newStaticGeom.tris[j].p3.y, newStaticGeom.tris[j].p3.z); } }*/ } geomList[i] = newStaticGeom; } staticGeom* cudageoms = NULL; cudaMalloc((void**)&cudageoms, numberOfGeoms*sizeof(staticGeom)); cudaMemcpy( cudageoms, geomList, numberOfGeoms*sizeof(staticGeom), cudaMemcpyHostToDevice); // package camera cameraData cam; cam.resolution = renderCam->resolution; cam.position = renderCam->positions[frame]; cam.view = renderCam->views[frame]; cam.up = renderCam->ups[frame]; cam.fov = renderCam->fov; cam.aperture = renderCam->aperture; cam.focalLength = renderCam->focalLength; // material setup material* cudamats = NULL; cudaMalloc((void**)&cudamats, numberOfMaterials*sizeof(material)); cudaMemcpy( cudamats, materials, numberOfMaterials*sizeof(material), cudaMemcpyHostToDevice); //lights setup int numberOfLights = 0; for(int i = 0; i < numberOfGeoms; ++i){ if( geoms[i].materialid >= 0 && materials[geoms[i].materialid].emittance > 0){ numberOfLights ++ ; } } int *lightIDs = new int[numberOfLights]; int k = 0; for(int i = 0; i < numberOfGeoms; ++i){ if( geoms[i].materialid >= 0 && materials[geoms[i].materialid].emittance > 0){ lightIDs[k] = i; k++; } } int* cudalightIDs = NULL; cudaMalloc((void**)&cudalightIDs, numberOfLights*sizeof(int)); cudaMemcpy( cudalightIDs, lightIDs, numberOfLights*sizeof(int), cudaMemcpyHostToDevice); //set up textures int numberOfPixels = textures.size(); glm::vec3 *texs = new glm::vec3[numberOfPixels]; for(int i = 0; i < numberOfPixels; ++i){ texs[i] = textures[i]; } glm::vec3 *cudatexs = NULL; cudaMalloc((void**)&cudatexs,numberOfPixels * sizeof(glm::vec3)); cudaMemcpy( cudatexs, texs, numberOfPixels * sizeof(glm::vec3), cudaMemcpyHostToDevice); //set up textures id int numOfTextures = textureIDs.size(); tex *texIDs = new tex[numOfTextures]; for(int i = 0; i < numOfTextures; ++i){ texIDs[i] = textureIDs[i]; } tex *cudatexIDs = NULL; cudaMalloc((void**)&cudatexIDs, numOfTextures * sizeof(tex)); cudaMemcpy( cudatexIDs, texIDs, numOfTextures* sizeof(tex), cudaMemcpyHostToDevice); //set up ray pool on device ray* cudarays = NULL; int numOfRays = cam.resolution.x * cam.resolution.y; cudaMalloc((void**)&cudarays, numOfRays*sizeof(ray)); // set up crucial magic int tileSize = 8; dim3 threadsPerBlock(tileSize, tileSize); dim3 fullBlocksPerGrid((int)ceil(float(renderCam->resolution.x)/float(tileSize)), (int)ceil(float(renderCam->resolution.y)/float(tileSize))); initialRayPool<<<fullBlocksPerGrid, threadsPerBlock>>>(cudarays, cam, (float)iterations, cudaimage,cudageoms, numberOfGeoms, cudamats, cudalightIDs, numberOfLights, cudatris); for(int k=0; k<MAX_DEPTH && numOfRays>0; k++){ if(STREAM_COMPACT){ thrust::device_ptr<ray> Start = thrust::device_pointer_cast(cudarays); //coverts cuda pointer to thrust pointer thrust::device_ptr<ray> End = thrust::remove_if(Start, Start + numOfRays, is_dead()); numOfRays = thrust::distance(Start, End); } //xBlocks * yBlocks = numOfRays / (tileSize*tileSize) int xBlocks = (int) ceil( sqrt((float)numOfRays)/(float)(tileSize) ); int yBlocks = (int) ceil( sqrt((float)numOfRays)/(float)(tileSize) ); dim3 newBlocksPerGrid(xBlocks,yBlocks); raytraceRay<<<newBlocksPerGrid, threadsPerBlock>>>(cudarays, (float)iterations, k, (int)numOfRays, cudaimage, cudageoms, numberOfGeoms, cudamats, cudalightIDs, numberOfLights, cam, cudatris, cudatexs, cudatexIDs); } sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, renderCam->resolution, cudaimage,(float)iterations); // retrieve image from GPU cudaMemcpy( renderCam->image, cudaimage, (int)renderCam->resolution.x*(int)renderCam->resolution.y*sizeof(glm::vec3), cudaMemcpyDeviceToHost); // free up stuff, or else we'll leak memory like a madman cudaFree( cudaimage ); cudaFree( cudageoms ); cudaFree( cudamats ); cudaFree( cudarays ); cudaFree( cudatexs ); cudaFree( cudatexIDs ); cudaFree( cudalightIDs ); if(meshID >-1 ){ cudaFree( cudatris ); } delete geomList; //delete matList; delete lightIDs; delete texs; delete texIDs; // make certain the kernel has completed cudaThreadSynchronize(); checkCUDAError("Kernel failed!"); }
93ea783ba6080d8127e7667307acd4a6e56ba080.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Host Side Code for Cross-correlation in GPU #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <fstream> #include "corr2Mex.h" //#include "Cross_Data_type.h" #include "normXcorr_GPUKernel.cu" using namespace std; Matrix AllocateDeviceMatrix(const Matrix M); Matrix AllocateMatrix(int height, int width,int init); void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost); void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice); bool CompareResults(float* A, float* B, int elements, float eps); void FreeDeviceMatrix(Matrix* M); void FreeMatrix(Matrix* M); //// Cuda Kernel Call ////// void CorrelationOnDevice(const Matrix Pre, const Matrix Post, float *CorrH, params parameters) { // Load Pre and Post to the device Matrix Pred = AllocateDeviceMatrix(Pre); CopyToDeviceMatrix(Pred, Pre); Matrix Postd = AllocateDeviceMatrix(Post); CopyToDeviceMatrix(Postd, Post); // Allocate Space for Pre-Mean float *preMean; float *preVar; hipMalloc((void **)&preMean,sizeof(float)*parameters.numX*parameters.numY); hipMalloc((void **)&preVar,sizeof(float)*parameters.numX*parameters.numY); // Allocate SoA on the device ????? float *CorrD; hipMalloc((void **)&CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY); //hipMalloc((SoA_Corr **)&CorrD,sizeof(SoA_Corr)*parameters.numX*parameters.numY); // Setup the execution configuration dim3 dimBlock(parameters.searchX, parameters.searchY); dim3 dimGrid(parameters.numX, parameters.numY); int sharedmemsize = 2*parameters.searchX*parameters.searchY*sizeof(float); // Launch the device computation threads! hipLaunchKernelGGL(( normXcorr_GPU), dim3(dimGrid), dim3(dimBlock),sharedmemsize, 0, Pred,Postd,CorrD,parameters,preMean,preVar); //Copting SoA from Device to Host //CopyFromDeviceMatrix(Corr, Corrd); //hipMemcpy(CorrH,CorrD,sizeof(SoA_Corr)*parameters.numX*parameters.numY,hipMemcpyDeviceToHost); hipMemcpy(CorrH,CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY,hipMemcpyDeviceToHost); // Free device matrices FreeDeviceMatrix(&Pred); FreeDeviceMatrix(&Postd); hipFree(CorrD); //FreeDeviceMatrix(&Corrd); } // Allocate a device matrix of same size as M. Matrix AllocateDeviceMatrix(const Matrix M) { Matrix Mdevice = M; int size = M.width * M.height * sizeof(float); hipMalloc((void**)&Mdevice.elements, size); return Mdevice; } Matrix AllocateMatrix(int height, int width,int init) // 1 is file read/ 0 is just allocation { Matrix M; M.width = M.pitch = width; M.height = height; int size = M.width * M.height; M.elements = NULL; FILE *fp; fp = fopen("trialNumbers.inp","r"); // don't allocate memory on option 2 M.elements = (float*) malloc(size*sizeof(float)); if(init) { for(unsigned int i = 0; i < M.width * M.height; i++) { fscanf(fp,"%f",&M.elements[i]); } } return M; } // Copy a host matrix to a device matrix. void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.width * Mhost.height * sizeof(float); Mdevice.height = Mhost.height; Mdevice.width = Mhost.width; Mdevice.pitch = Mhost.pitch; hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.width * Mdevice.height * sizeof(float); hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost); } // Free a device matrix. void FreeDeviceMatrix(Matrix* M) { hipFree(M->elements); M->elements = NULL; } // Free a host Matrix void FreeMatrix(Matrix* M) { free(M->elements); M->elements = NULL; }
93ea783ba6080d8127e7667307acd4a6e56ba080.cu
// Host Side Code for Cross-correlation in GPU #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <fstream> #include "corr2Mex.h" //#include "Cross_Data_type.h" #include "normXcorr_GPUKernel.cu" using namespace std; Matrix AllocateDeviceMatrix(const Matrix M); Matrix AllocateMatrix(int height, int width,int init); void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost); void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice); bool CompareResults(float* A, float* B, int elements, float eps); void FreeDeviceMatrix(Matrix* M); void FreeMatrix(Matrix* M); //// Cuda Kernel Call ////// void CorrelationOnDevice(const Matrix Pre, const Matrix Post, float *CorrH, params parameters) { // Load Pre and Post to the device Matrix Pred = AllocateDeviceMatrix(Pre); CopyToDeviceMatrix(Pred, Pre); Matrix Postd = AllocateDeviceMatrix(Post); CopyToDeviceMatrix(Postd, Post); // Allocate Space for Pre-Mean float *preMean; float *preVar; cudaMalloc((void **)&preMean,sizeof(float)*parameters.numX*parameters.numY); cudaMalloc((void **)&preVar,sizeof(float)*parameters.numX*parameters.numY); // Allocate SoA on the device ????? float *CorrD; cudaMalloc((void **)&CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY); //cudaMalloc((SoA_Corr **)&CorrD,sizeof(SoA_Corr)*parameters.numX*parameters.numY); // Setup the execution configuration dim3 dimBlock(parameters.searchX, parameters.searchY); dim3 dimGrid(parameters.numX, parameters.numY); int sharedmemsize = 2*parameters.searchX*parameters.searchY*sizeof(float); // Launch the device computation threads! normXcorr_GPU<<<dimGrid, dimBlock,sharedmemsize>>>(Pred,Postd,CorrD,parameters,preMean,preVar); //Copting SoA from Device to Host //CopyFromDeviceMatrix(Corr, Corrd); //cudaMemcpy(CorrH,CorrD,sizeof(SoA_Corr)*parameters.numX*parameters.numY,cudaMemcpyDeviceToHost); cudaMemcpy(CorrH,CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY,cudaMemcpyDeviceToHost); // Free device matrices FreeDeviceMatrix(&Pred); FreeDeviceMatrix(&Postd); cudaFree(CorrD); //FreeDeviceMatrix(&Corrd); } // Allocate a device matrix of same size as M. Matrix AllocateDeviceMatrix(const Matrix M) { Matrix Mdevice = M; int size = M.width * M.height * sizeof(float); cudaMalloc((void**)&Mdevice.elements, size); return Mdevice; } Matrix AllocateMatrix(int height, int width,int init) // 1 is file read/ 0 is just allocation { Matrix M; M.width = M.pitch = width; M.height = height; int size = M.width * M.height; M.elements = NULL; FILE *fp; fp = fopen("trialNumbers.inp","r"); // don't allocate memory on option 2 M.elements = (float*) malloc(size*sizeof(float)); if(init) { for(unsigned int i = 0; i < M.width * M.height; i++) { fscanf(fp,"%f",&M.elements[i]); } } return M; } // Copy a host matrix to a device matrix. void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.width * Mhost.height * sizeof(float); Mdevice.height = Mhost.height; Mdevice.width = Mhost.width; Mdevice.pitch = Mhost.pitch; cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.width * Mdevice.height * sizeof(float); cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost); } // Free a device matrix. void FreeDeviceMatrix(Matrix* M) { cudaFree(M->elements); M->elements = NULL; } // Free a host Matrix void FreeMatrix(Matrix* M) { free(M->elements); M->elements = NULL; }
a68df1d224c92424dfeac1437c1878745d01ea92.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> __device__ void mergeDevice(int *list, int *sorted, int start, int mid, int end) { int ti=start, i=start, j=mid; while (i<mid || j<end) { if (j==end) sorted[ti] = list[i++]; else if (i==mid) sorted[ti] = list[j++]; else if (list[i]<list[j]) sorted[ti] = list[i++]; else sorted[ti] = list[j++]; ti++; } for (ti=start; ti<end; ti++) list[ti] = sorted[ti]; } void mergeHost(int *list, int *sorted, int start, int mid, int end) { int ti=start, i=start, j=mid; while (i<mid || j<end) { if (j==end) sorted[ti] = list[i++]; else if (i==mid) sorted[ti] = list[j++]; else if (list[i]<list[j]) sorted[ti] = list[i++]; else sorted[ti] = list[j++]; ti++; } for (ti=start; ti<end; ti++) list[ti] = sorted[ti]; } __device__ void mergeSortKernel(int *list, int *sorted, int start, int end) { //Final 1: hi ha mes threads que elements del vector if (end-start<2) return; mergeSortKernel(list, sorted, start, start + (end-start)/2); mergeSortKernel(list, sorted, start + (end-start)/2, end); mergeDevice(list, sorted, start, start + (end-start)/2, end); } __global__ void callMergeSort(int *list, int *sorted, int chunkSize, int N) { int tid = blockIdx.x*blockDim.x + threadIdx.x; int start = tid*chunkSize; int end = start + chunkSize; if (end > N) { end = N; } mergeSortKernel(list, sorted, start, end); } void printArray(int A[], int size) { int i; for (i=0; i < size; i++) printf("%d ", A[i]); printf("\n"); } void InitV(int N, int *v) { int i; for (i=0; i<N; i++) v[i] = rand() % 4145000; } int main() { int *arr_h, *arrSorted_h, *arrSortedF_h; int *arr_d, *arrSorted_d, *arrSortedF_d; int chunkSize; unsigned int nBytes; unsigned int N; unsigned int nBlocks, nThreads; N = 4145152; nThreads = 256; nBlocks = 16; chunkSize = N/(nThreads*nBlocks); nBytes = N * sizeof(int); hipEvent_t start, stop; float timeTaken; hipEventCreate(&start); hipEventCreate(&stop); arr_h = (int*) malloc(nBytes); arrSorted_h = (int*) malloc(nBytes); arrSortedF_h = (int*) malloc(nBytes); InitV(N, arr_h); hipMalloc((int**)&arr_d, nBytes); hipMalloc((int**)&arrSorted_d, nBytes); hipMalloc((int**)&arrSortedF_d, nBytes); hipMemcpy(arr_d, arr_h, nBytes, hipMemcpyHostToDevice); printf("Given array is \n"); printArray(arr_h, N); hipEventRecord(start, 0); hipLaunchKernelGGL(( callMergeSort), dim3(nBlocks), dim3(nThreads), 0, 0, arr_d, arrSorted_d,chunkSize, N); hipMemcpy(arrSorted_h, arrSorted_d, nBytes, hipMemcpyDeviceToHost); for (int i = chunkSize*2; i < N + chunkSize; i = i + chunkSize) { int mid = i-chunkSize; int end = i; if (end > N) { end = N; } mergeHost(arrSorted_h,arrSortedF_h, 0, mid, end); } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipFree(arr_d); hipFree(arrSorted_d); hipFree(arrSortedF_d); hipEventElapsedTime(&timeTaken, start, stop); printf("\nSorted array is \n"); printArray(arrSortedF_h, N); printf("nThreads: %d\n", nThreads); printf("nBlocks: %d\n", nBlocks); printf("Tiempo Total %4.6f ms\n", timeTaken); printf("Ancho de Banda %4.3f GB/s\n", (N * sizeof(int)) / (1000000 * timeTaken)); return 0; }
a68df1d224c92424dfeac1437c1878745d01ea92.cu
#include <stdlib.h> #include <stdio.h> __device__ void mergeDevice(int *list, int *sorted, int start, int mid, int end) { int ti=start, i=start, j=mid; while (i<mid || j<end) { if (j==end) sorted[ti] = list[i++]; else if (i==mid) sorted[ti] = list[j++]; else if (list[i]<list[j]) sorted[ti] = list[i++]; else sorted[ti] = list[j++]; ti++; } for (ti=start; ti<end; ti++) list[ti] = sorted[ti]; } void mergeHost(int *list, int *sorted, int start, int mid, int end) { int ti=start, i=start, j=mid; while (i<mid || j<end) { if (j==end) sorted[ti] = list[i++]; else if (i==mid) sorted[ti] = list[j++]; else if (list[i]<list[j]) sorted[ti] = list[i++]; else sorted[ti] = list[j++]; ti++; } for (ti=start; ti<end; ti++) list[ti] = sorted[ti]; } __device__ void mergeSortKernel(int *list, int *sorted, int start, int end) { //Final 1: hi ha mes threads que elements del vector if (end-start<2) return; mergeSortKernel(list, sorted, start, start + (end-start)/2); mergeSortKernel(list, sorted, start + (end-start)/2, end); mergeDevice(list, sorted, start, start + (end-start)/2, end); } __global__ void callMergeSort(int *list, int *sorted, int chunkSize, int N) { int tid = blockIdx.x*blockDim.x + threadIdx.x; int start = tid*chunkSize; int end = start + chunkSize; if (end > N) { end = N; } mergeSortKernel(list, sorted, start, end); } void printArray(int A[], int size) { int i; for (i=0; i < size; i++) printf("%d ", A[i]); printf("\n"); } void InitV(int N, int *v) { int i; for (i=0; i<N; i++) v[i] = rand() % 4145000; } int main() { int *arr_h, *arrSorted_h, *arrSortedF_h; int *arr_d, *arrSorted_d, *arrSortedF_d; int chunkSize; unsigned int nBytes; unsigned int N; unsigned int nBlocks, nThreads; N = 4145152; nThreads = 256; nBlocks = 16; chunkSize = N/(nThreads*nBlocks); nBytes = N * sizeof(int); cudaEvent_t start, stop; float timeTaken; cudaEventCreate(&start); cudaEventCreate(&stop); arr_h = (int*) malloc(nBytes); arrSorted_h = (int*) malloc(nBytes); arrSortedF_h = (int*) malloc(nBytes); InitV(N, arr_h); cudaMalloc((int**)&arr_d, nBytes); cudaMalloc((int**)&arrSorted_d, nBytes); cudaMalloc((int**)&arrSortedF_d, nBytes); cudaMemcpy(arr_d, arr_h, nBytes, cudaMemcpyHostToDevice); printf("Given array is \n"); printArray(arr_h, N); cudaEventRecord(start, 0); callMergeSort<<<nBlocks, nThreads>>>(arr_d, arrSorted_d,chunkSize, N); cudaMemcpy(arrSorted_h, arrSorted_d, nBytes, cudaMemcpyDeviceToHost); for (int i = chunkSize*2; i < N + chunkSize; i = i + chunkSize) { int mid = i-chunkSize; int end = i; if (end > N) { end = N; } mergeHost(arrSorted_h,arrSortedF_h, 0, mid, end); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaFree(arr_d); cudaFree(arrSorted_d); cudaFree(arrSortedF_d); cudaEventElapsedTime(&timeTaken, start, stop); printf("\nSorted array is \n"); printArray(arrSortedF_h, N); printf("nThreads: %d\n", nThreads); printf("nBlocks: %d\n", nBlocks); printf("Tiempo Total %4.6f ms\n", timeTaken); printf("Ancho de Banda %4.3f GB/s\n", (N * sizeof(int)) / (1000000 * timeTaken)); return 0; }
d1f5755233cf8baf1b4d35372c72933d8725f291.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <stdio.h> #include <string> #include <fstream> #include <iostream> #include <algorithm> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> using namespace std; /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ #define OMEGANUM 99840 #define TIMENUM 512 //__constant__ float gOmega[OMEGANUM]; //__constant__ float gTime[OMEGANUM]; __global__ void genARS(const float *inputOmega, float *outputARS, int numElems){ const float PI = 3.14159265358; const float A = 0.0484; int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElems){ float omega = inputOmega[i]; float t = PI * 2 / omega; if (t < 0.1) outputARS[i] = A * (5.5 * t + 0.45); else if (t < 0.35) outputARS[i] = A; else outputARS[i] = A * (float)(0.35 / t); } } __global__ void genWRS(const float *inputARS, const float *inputOmega, float *outputWRS, float duration, int numElems){ const float PI = 3.14159265358; int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElems){ outputWRS[i] = 0.05 * (inputARS[i] * inputARS[i]) / (-1 * (PI * inputOmega[i]) * log(-PI * log(0.85) / (inputOmega[i] * duration))); if (outputWRS[i] < 0) outputWRS[i] = 0; } } __global__ void setupPRNG(hiprandState_t *state){ int idx = threadIdx.x + blockDim.x * blockIdx.x; hiprand_init(731, idx, 0, &state[idx]); } __global__ void genASW(const float *inputWRS, const float *inputOmega, const float *inputTime, float *outputASW, int numTimeElems, int numOmegaElems, hiprandState_t *state){ const float PI = 3.14159265358; int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numTimeElems){ float t = inputTime[i]; float sum = 0; for (int j = 0; j < numOmegaElems; ++j){ float r = hiprand_uniform(state + j) * PI * 2; sum += sqrt(4 * 0.01 * inputWRS[j]) * __cosf(inputOmega[j] * t + r); } outputASW[i] = sum; } } float regenARS(const float inputTime, const float *inputASW, int numTimeElems){ const float PI = 3.14159265358; float angfreq = 2 * PI / inputTime; float deltime = 0.0390625; float dampr = 0.05; float a = angfreq * sqrt(1 - dampr * dampr) * deltime; float b = dampr * angfreq * deltime; float d = sqrt(1 - dampr * dampr); float e = exp(-b); float a11 = e * (dampr * sin(a) / d + cos(a)); float a12 = e * sin(a) / angfreq / d; float a21 = e * (-angfreq) * sin(a) / d; float a22 = e * (-dampr * sin(a) / d + cos(a)); float b11 = e * ((2 * dampr * dampr - 1 + b)*sin(a) / pow(angfreq, 2) / a + (2 * dampr + angfreq * deltime)*cos(a) / pow(angfreq, 3) / deltime) - 2 * dampr / pow(angfreq, 3) / deltime; float b12 = e * ((1 - 2 * dampr * dampr) * sin(a) / pow(angfreq, 2) / a - 2 * dampr *cos(a) / pow(angfreq, 3) / deltime) - 1 / pow(angfreq, 2) + 2 * dampr / pow(angfreq, 3) / deltime; float b21 = e * ((-dampr - angfreq*deltime)*sin(a) / angfreq / a - cos(a) / pow(angfreq, 2) / deltime) + 1 / pow(angfreq, 2) / deltime; float b22 = e * (dampr *sin(a) / angfreq / a + cos(a) / pow(angfreq, 2) / deltime) - 1 / pow(angfreq, 2) / deltime; float *tempdis = new float[numTimeElems]; float *tempvel = new float[numTimeElems]; float *tempacc = new float[numTimeElems]; tempdis[0] = tempvel[0] = tempacc[0] = 0; float max = 0; for (int k = 1; k < numTimeElems; ++k){ tempdis[k] = a11 * tempdis[k - 1] + a12 * tempvel[k - 1] + b11 * inputASW[k - 1] + b12 * inputASW[k]; tempvel[k] = a21 * tempdis[k - 1] + a22 * tempvel[k - 1] + b21 * inputASW[k - 1] + b22 * inputASW[k]; tempacc[k] = -(2 * dampr * angfreq * tempvel[k] + angfreq * angfreq * tempdis[k]); if (abs(tempacc[k]) > max) max = abs(tempacc[k]); } return max; } /** * Host main routine */ void assertError(hipError_t err, char *prompt){ if (err != hipSuccess) { fprintf(stderr, prompt, hipGetErrorString(err)); exit(EXIT_FAILURE); } } float envelopeF(float t, float v){ if (t <= 2) return v * (t * t / 4); else if (t >= 16) return v * exp(-(2.5 / (0.4 * 20)) * (t - 16)); else return v; } float findMax(float *A, int l){ float max = 0; for (int i = 0; i < l; ++i) if (abs(A[i]) > max) max = abs(A[i]); return max; } void queryCUDACard(){ int deviceCount, device; int gpuDeviceCount = 0; struct hipDeviceProp_t properties; hipError_t cudaResultCode = hipGetDeviceCount(&deviceCount); if (cudaResultCode != hipSuccess) deviceCount = 0; /* machines with no GPUs can still report one emulation device */ for (device = 0; device < deviceCount; ++device) { hipGetDeviceProperties(&properties, device); if (properties.major != 9999) /* 9999 means emulation only */ if (device == 0) { printf("multiProcessorCount %d\n", properties.multiProcessorCount); printf("maxThreadsPerMultiProcessor %d\n", properties.maxThreadsPerMultiProcessor); } } } #define PREPARE_VAR(name, size) float *name = NULL; \ assertError(hipMalloc((void **)&name, size), "Failed to allocate device var (error code %s)!\n"); \ /*int main(void) { queryCUDACard(); hipError_t err = hipSuccess; hiprandState_t *rState; hipMalloc(&rState, sizeof(hiprandState_t)); setupPRNG<<<1, 1>>>(rState); // Print the vector length to be used, and compute its size int numOmegaElems = OMEGANUM; int numTimeElems = TIMENUM; float dOmega = 0.01; float dTime = (20.0 / (float)numTimeElems); size_t size = numOmegaElems * sizeof(float); size_t size2 = numTimeElems * sizeof(float); //printf("[Vector addition of %d elements]\n", numElements); float *hTime = new float[size]; float *hOmega = new float[size]; float *hOTime = new float[size2]; // Initialize the host input vectors for (int i = 0; i < numOmegaElems; ++i){ hOmega[i] = dOmega * (i + 1); hTime[i] = 3.14159265358 * 2 / hOmega[i]; } for (int i = 0; i < numTimeElems; ++i) hOTime[i] = dTime * (i + 1); PREPARE_VAR(inputOmega, size); PREPARE_VAR(inputTime, size2); PREPARE_VAR(zARS, size); float *hARS = new float[size]; PREPARE_VAR(zWRS, size); PREPARE_VAR(zASW, size); float *hASW = new float[size2]; printf("Copy input data from the host memory to the CUDA device\n"); err = hipMemcpy(inputOmega, hOmega, size, hipMemcpyHostToDevice); assertError(err, "Failed to copy from host to device (error code %s)!\n"); err = hipMemcpy(inputTime, hOTime, size2, hipMemcpyHostToDevice); assertError(err, "Failed to copy from host to device (error code %s)!\n"); // Launch the Vector Add CUDA Kernel int TPB = 256; int blocksPerGrid = (numOmegaElems + TPB - 1) / TPB; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, TPB); genARS<<<blocksPerGrid, TPB>>>(inputOmega, zARS, numOmegaElems); genWRS<<<blocksPerGrid, TPB>>>(zARS, inputOmega, zWRS, 20, numOmegaElems); genASW<<<TPB, (numTimeElems + TPB - 1) / TPB>>>(zWRS, inputOmega, inputTime, zASW, numTimeElems, numOmegaElems, rState); printf("Copy output data from the CUDA device to the host memory\n"); err = hipMemcpy(hARS, zARS, size, hipMemcpyDeviceToHost); assertError(err, "Failed to copy ARS from device to host (error code %s)!\n"); err = hipMemcpy(hASW, zASW, size2, hipMemcpyDeviceToHost); assertError(err, "Failed to copy ASW from device to host (error code %s)!\n"); printf("Done\n"); ofstream output, sgs, newars, ars; string tttt = "D:\\wave.dat"; output.open(tttt); //sgs.open("D:\\test.sgs"); newars.open("D:\\newars.dat"); ars.open("D:\\ars.dat"); printf("Output to gnuplot\n"); output << "#x y" << endl; //sgs << "*SGSw\n*TITLE, zzz\n*TITLE, \n*X-AXIS, Time(sec)\n*Y-AXIS, Ground Accel. (g)\n*UNIT&TYPE, GRAV, ACCEL\n*FLAGS, 0, 0\n*DATA\n"; for (int i = 0; i < numTimeElems; ++i) hASW[i] = envelopeF(hOTime[i], hASW[i]); float maxV = findMax(hASW, numTimeElems); //printf("%f\n", maxV); for (int i = 0; i < numTimeElems; ++i){ float v = hASW[i] / maxV * 0.0218; output << hOTime[i] << " " << v << endl; //sgs << hOTime[i] << "," << v << endl; hASW[i] = v; } //sgs << "*ENDDATA\n"; output.close(); //sgs.close(); ars << "#x y" << endl; for (int i = 0; i < numOmegaElems; ++i){ ars << hTime[i] << " " << hARS[i] << endl; } ars.close(); float *hARS2 = new float[size2]; for (int i = 0; i < numTimeElems; ++i){ hARS2[i] = regenARS(hOTime[i], hASW, numTimeElems); } newars << "#x y" << endl; float *dd = new float[numTimeElems]; for (int i = 0; i < numTimeElems; ++i){ newars << hOTime[i] << " " << hARS2[i] << endl; float tom = 3.1415926 * 2 / hOTime[i]; dd[i] = hARS2[i] - hARS[(int)(tom / dOmega)]; } newars.close(); float avg = 0, sum = 0; for (int i = 0; i < numTimeElems; ++i) sum += dd[i]; avg = sum / numTimeElems; sum = 0; for (int i = 0; i < numTimeElems; ++i) sum += pow(dd[i] - avg, 2); avg = sum / numTimeElems * 1000000; printf("S: %f\n", avg); // Free device global memory err = hipFree(inputOmega); err = hipFree(inputTime); err = hipFree(zARS); err = hipFree(zWRS); err = hipFree(zASW); free(hOmega); free(hTime); free(hOTime); free(hARS2); free(hARS); err = hipDeviceReset(); assertError(err, "Failed to deinitialize the device! error=%s\n"); printf("Done\n"); //system("\"C:\\Program Files\\gnuplot\\bin\\gnuplot.exe\" D:\\output.plt"); return 0; }*/ float cuWave(string outputFile, string outputARS1, string outputARS2) { hipError_t err = hipSuccess; hiprandState_t *rState; hipMalloc(&rState, sizeof(hiprandState_t)); setupPRNG << <1, 1 >> >(rState); // Print the vector length to be used, and compute its size int numOmegaElems = OMEGANUM; int numTimeElems = TIMENUM; float dOmega = 0.01; float dTime = (20.0 / (float)numTimeElems); size_t size = numOmegaElems * sizeof(float); size_t size2 = numTimeElems * sizeof(float); //printf("[Vector addition of %d elements]\n", numElements); float *hTime = new float[size]; float *hOmega = new float[size]; float *hOTime = new float[size2]; // Initialize the host input vectors for (int i = 0; i < numOmegaElems; ++i){ hOmega[i] = dOmega * (i + 1); hTime[i] = 3.14159265358 * 2 / hOmega[i]; } for (int i = 0; i < numTimeElems; ++i) hOTime[i] = dTime * (i + 1); PREPARE_VAR(inputOmega, size); PREPARE_VAR(inputTime, size2); PREPARE_VAR(zARS, size); float *hARS = new float[size]; PREPARE_VAR(zWRS, size); PREPARE_VAR(zASW, size); float *hASW = new float[size2]; //printf("Copy input data from the host memory to the CUDA device\n"); err = hipMemcpy(inputOmega, hOmega, size, hipMemcpyHostToDevice); assertError(err, "Failed to copy from host to device (error code %s)!\n"); err = hipMemcpy(inputTime, hOTime, size2, hipMemcpyHostToDevice); assertError(err, "Failed to copy from host to device (error code %s)!\n"); // Launch the Vector Add CUDA Kernel int TPB = 256; int blocksPerGrid = (numOmegaElems + TPB - 1) / TPB; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, TPB); genARS << <blocksPerGrid, TPB >> >(inputOmega, zARS, numOmegaElems); genWRS << <blocksPerGrid, TPB >> >(zARS, inputOmega, zWRS, 20, numOmegaElems); genASW << <TPB, (numTimeElems + TPB - 1) / TPB >> >(zWRS, inputOmega, inputTime, zASW, numTimeElems, numOmegaElems, rState); //printf("Copy output data from the CUDA device to the host memory\n"); err = hipMemcpy(hARS, zARS, size, hipMemcpyDeviceToHost); assertError(err, "Failed to copy ARS from device to host (error code %s)!\n"); err = hipMemcpy(hASW, zASW, size2, hipMemcpyDeviceToHost); assertError(err, "Failed to copy ASW from device to host (error code %s)!\n"); printf("CUDA Device Done\n"); ofstream output, sgs, newars, ars; output.open(outputFile); //sgs.open("D:\\test.sgs"); newars.open(outputARS2); //printf("Output to gnuplot\n"); output << "#x y" << endl; //sgs << "*SGSw\n*TITLE, zzz\n*TITLE, \n*X-AXIS, Time(sec)\n*Y-AXIS, Ground Accel. (g)\n*UNIT&TYPE, GRAV, ACCEL\n*FLAGS, 0, 0\n*DATA\n"; for (int i = 0; i < numTimeElems; ++i) hASW[i] = envelopeF(hOTime[i], hASW[i]); float maxV = findMax(hASW, numTimeElems); //printf("%f\n", maxV); for (int i = 0; i < numTimeElems; ++i){ float v = hASW[i] / maxV * 0.0218; output << hOTime[i] << " " << v << endl; //sgs << hOTime[i] << "," << v << endl; hASW[i] = v; } //sgs << "*ENDDATA\n"; output.close(); //sgs.close(); if (!outputARS1.empty()){ ars.open(outputARS1); ars << "#x y" << endl; for (int i = 0; i < numOmegaElems; ++i){ ars << hTime[i] << " " << hARS[i] << endl; } ars.close(); } float *hARS2 = new float[size2]; for (int i = 0; i < numTimeElems; ++i){ hARS2[i] = regenARS(hOTime[i], hASW, numTimeElems); } newars << "#x y" << endl; float *dd = new float[numTimeElems]; for (int i = 0; i < numTimeElems; ++i){ newars << hOTime[i] << " " << hARS2[i] << endl; float tom = 3.1415926 * 2 / hOTime[i]; dd[i] = hARS2[i] - hARS[(int)(tom / dOmega)]; } newars.close(); float avg = 0, sum = 0; for (int i = 0; i < numTimeElems; ++i) sum += dd[i]; avg = sum / numTimeElems; sum = 0; for (int i = 0; i < numTimeElems; ++i) sum += pow(dd[i] - avg, 2); avg = sum / numTimeElems * 1000000; //printf("S: %f\n", avg); // Free device global memory err = hipFree(inputOmega); err = hipFree(inputTime); err = hipFree(zARS); err = hipFree(zWRS); err = hipFree(zASW); free(hOmega); free(hTime); free(hOTime); free(hARS2); free(hARS); err = hipDeviceReset(); assertError(err, "Failed to deinitialize the device! error=%s\n"); printf("Done\n"); return avg; } int main(void){ cout << "cuQuake" << endl; queryCUDACard(); float *dd = new float[10]; for (int i = 0; i < 10; ++i){ string ofn = "D:\\wave_" + to_string(i) + ".dat"; string ofnars1 = "D:\\ars_" + to_string(i) + ".dat"; string ofnars2 = "D:\\newars_" + to_string(i) + ".dat"; if (i==0) dd[i] = cuWave(ofn, ofnars1, ofnars2); else dd[i] = cuWave(ofn, "", ofnars2); } sort(dd, dd + 10); cout << "Best:" << dd[0] << " Worst:" << dd[9] << endl; return 0; }
d1f5755233cf8baf1b4d35372c72933d8725f291.cu
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <stdio.h> #include <string> #include <fstream> #include <iostream> #include <algorithm> #include <cuda_runtime.h> #include <curand.h> #include <curand_kernel.h> using namespace std; /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ #define OMEGANUM 99840 #define TIMENUM 512 //__constant__ float gOmega[OMEGANUM]; //__constant__ float gTime[OMEGANUM]; __global__ void genARS(const float *inputOmega, float *outputARS, int numElems){ const float PI = 3.14159265358; const float A = 0.0484; int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElems){ float omega = inputOmega[i]; float t = PI * 2 / omega; if (t < 0.1) outputARS[i] = A * (5.5 * t + 0.45); else if (t < 0.35) outputARS[i] = A; else outputARS[i] = A * (float)(0.35 / t); } } __global__ void genWRS(const float *inputARS, const float *inputOmega, float *outputWRS, float duration, int numElems){ const float PI = 3.14159265358; int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElems){ outputWRS[i] = 0.05 * (inputARS[i] * inputARS[i]) / (-1 * (PI * inputOmega[i]) * log(-PI * log(0.85) / (inputOmega[i] * duration))); if (outputWRS[i] < 0) outputWRS[i] = 0; } } __global__ void setupPRNG(curandState *state){ int idx = threadIdx.x + blockDim.x * blockIdx.x; curand_init(731, idx, 0, &state[idx]); } __global__ void genASW(const float *inputWRS, const float *inputOmega, const float *inputTime, float *outputASW, int numTimeElems, int numOmegaElems, curandState *state){ const float PI = 3.14159265358; int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numTimeElems){ float t = inputTime[i]; float sum = 0; for (int j = 0; j < numOmegaElems; ++j){ float r = curand_uniform(state + j) * PI * 2; sum += sqrt(4 * 0.01 * inputWRS[j]) * __cosf(inputOmega[j] * t + r); } outputASW[i] = sum; } } float regenARS(const float inputTime, const float *inputASW, int numTimeElems){ const float PI = 3.14159265358; float angfreq = 2 * PI / inputTime; float deltime = 0.0390625; float dampr = 0.05; float a = angfreq * sqrt(1 - dampr * dampr) * deltime; float b = dampr * angfreq * deltime; float d = sqrt(1 - dampr * dampr); float e = exp(-b); float a11 = e * (dampr * sin(a) / d + cos(a)); float a12 = e * sin(a) / angfreq / d; float a21 = e * (-angfreq) * sin(a) / d; float a22 = e * (-dampr * sin(a) / d + cos(a)); float b11 = e * ((2 * dampr * dampr - 1 + b)*sin(a) / pow(angfreq, 2) / a + (2 * dampr + angfreq * deltime)*cos(a) / pow(angfreq, 3) / deltime) - 2 * dampr / pow(angfreq, 3) / deltime; float b12 = e * ((1 - 2 * dampr * dampr) * sin(a) / pow(angfreq, 2) / a - 2 * dampr *cos(a) / pow(angfreq, 3) / deltime) - 1 / pow(angfreq, 2) + 2 * dampr / pow(angfreq, 3) / deltime; float b21 = e * ((-dampr - angfreq*deltime)*sin(a) / angfreq / a - cos(a) / pow(angfreq, 2) / deltime) + 1 / pow(angfreq, 2) / deltime; float b22 = e * (dampr *sin(a) / angfreq / a + cos(a) / pow(angfreq, 2) / deltime) - 1 / pow(angfreq, 2) / deltime; float *tempdis = new float[numTimeElems]; float *tempvel = new float[numTimeElems]; float *tempacc = new float[numTimeElems]; tempdis[0] = tempvel[0] = tempacc[0] = 0; float max = 0; for (int k = 1; k < numTimeElems; ++k){ tempdis[k] = a11 * tempdis[k - 1] + a12 * tempvel[k - 1] + b11 * inputASW[k - 1] + b12 * inputASW[k]; tempvel[k] = a21 * tempdis[k - 1] + a22 * tempvel[k - 1] + b21 * inputASW[k - 1] + b22 * inputASW[k]; tempacc[k] = -(2 * dampr * angfreq * tempvel[k] + angfreq * angfreq * tempdis[k]); if (abs(tempacc[k]) > max) max = abs(tempacc[k]); } return max; } /** * Host main routine */ void assertError(cudaError_t err, char *prompt){ if (err != cudaSuccess) { fprintf(stderr, prompt, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } float envelopeF(float t, float v){ if (t <= 2) return v * (t * t / 4); else if (t >= 16) return v * exp(-(2.5 / (0.4 * 20)) * (t - 16)); else return v; } float findMax(float *A, int l){ float max = 0; for (int i = 0; i < l; ++i) if (abs(A[i]) > max) max = abs(A[i]); return max; } void queryCUDACard(){ int deviceCount, device; int gpuDeviceCount = 0; struct cudaDeviceProp properties; cudaError_t cudaResultCode = cudaGetDeviceCount(&deviceCount); if (cudaResultCode != cudaSuccess) deviceCount = 0; /* machines with no GPUs can still report one emulation device */ for (device = 0; device < deviceCount; ++device) { cudaGetDeviceProperties(&properties, device); if (properties.major != 9999) /* 9999 means emulation only */ if (device == 0) { printf("multiProcessorCount %d\n", properties.multiProcessorCount); printf("maxThreadsPerMultiProcessor %d\n", properties.maxThreadsPerMultiProcessor); } } } #define PREPARE_VAR(name, size) float *name = NULL; \ assertError(cudaMalloc((void **)&name, size), "Failed to allocate device var (error code %s)!\n"); \ /*int main(void) { queryCUDACard(); cudaError_t err = cudaSuccess; curandState *rState; cudaMalloc(&rState, sizeof(curandState)); setupPRNG<<<1, 1>>>(rState); // Print the vector length to be used, and compute its size int numOmegaElems = OMEGANUM; int numTimeElems = TIMENUM; float dOmega = 0.01; float dTime = (20.0 / (float)numTimeElems); size_t size = numOmegaElems * sizeof(float); size_t size2 = numTimeElems * sizeof(float); //printf("[Vector addition of %d elements]\n", numElements); float *hTime = new float[size]; float *hOmega = new float[size]; float *hOTime = new float[size2]; // Initialize the host input vectors for (int i = 0; i < numOmegaElems; ++i){ hOmega[i] = dOmega * (i + 1); hTime[i] = 3.14159265358 * 2 / hOmega[i]; } for (int i = 0; i < numTimeElems; ++i) hOTime[i] = dTime * (i + 1); PREPARE_VAR(inputOmega, size); PREPARE_VAR(inputTime, size2); PREPARE_VAR(zARS, size); float *hARS = new float[size]; PREPARE_VAR(zWRS, size); PREPARE_VAR(zASW, size); float *hASW = new float[size2]; printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(inputOmega, hOmega, size, cudaMemcpyHostToDevice); assertError(err, "Failed to copy from host to device (error code %s)!\n"); err = cudaMemcpy(inputTime, hOTime, size2, cudaMemcpyHostToDevice); assertError(err, "Failed to copy from host to device (error code %s)!\n"); // Launch the Vector Add CUDA Kernel int TPB = 256; int blocksPerGrid = (numOmegaElems + TPB - 1) / TPB; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, TPB); genARS<<<blocksPerGrid, TPB>>>(inputOmega, zARS, numOmegaElems); genWRS<<<blocksPerGrid, TPB>>>(zARS, inputOmega, zWRS, 20, numOmegaElems); genASW<<<TPB, (numTimeElems + TPB - 1) / TPB>>>(zWRS, inputOmega, inputTime, zASW, numTimeElems, numOmegaElems, rState); printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(hARS, zARS, size, cudaMemcpyDeviceToHost); assertError(err, "Failed to copy ARS from device to host (error code %s)!\n"); err = cudaMemcpy(hASW, zASW, size2, cudaMemcpyDeviceToHost); assertError(err, "Failed to copy ASW from device to host (error code %s)!\n"); printf("Done\n"); ofstream output, sgs, newars, ars; string tttt = "D:\\wave.dat"; output.open(tttt); //sgs.open("D:\\test.sgs"); newars.open("D:\\newars.dat"); ars.open("D:\\ars.dat"); printf("Output to gnuplot\n"); output << "#x y" << endl; //sgs << "*SGSw\n*TITLE, zzz\n*TITLE, \n*X-AXIS, Time(sec)\n*Y-AXIS, Ground Accel. (g)\n*UNIT&TYPE, GRAV, ACCEL\n*FLAGS, 0, 0\n*DATA\n"; for (int i = 0; i < numTimeElems; ++i) hASW[i] = envelopeF(hOTime[i], hASW[i]); float maxV = findMax(hASW, numTimeElems); //printf("%f\n", maxV); for (int i = 0; i < numTimeElems; ++i){ float v = hASW[i] / maxV * 0.0218; output << hOTime[i] << " " << v << endl; //sgs << hOTime[i] << "," << v << endl; hASW[i] = v; } //sgs << "*ENDDATA\n"; output.close(); //sgs.close(); ars << "#x y" << endl; for (int i = 0; i < numOmegaElems; ++i){ ars << hTime[i] << " " << hARS[i] << endl; } ars.close(); float *hARS2 = new float[size2]; for (int i = 0; i < numTimeElems; ++i){ hARS2[i] = regenARS(hOTime[i], hASW, numTimeElems); } newars << "#x y" << endl; float *dd = new float[numTimeElems]; for (int i = 0; i < numTimeElems; ++i){ newars << hOTime[i] << " " << hARS2[i] << endl; float tom = 3.1415926 * 2 / hOTime[i]; dd[i] = hARS2[i] - hARS[(int)(tom / dOmega)]; } newars.close(); float avg = 0, sum = 0; for (int i = 0; i < numTimeElems; ++i) sum += dd[i]; avg = sum / numTimeElems; sum = 0; for (int i = 0; i < numTimeElems; ++i) sum += pow(dd[i] - avg, 2); avg = sum / numTimeElems * 1000000; printf("S: %f\n", avg); // Free device global memory err = cudaFree(inputOmega); err = cudaFree(inputTime); err = cudaFree(zARS); err = cudaFree(zWRS); err = cudaFree(zASW); free(hOmega); free(hTime); free(hOTime); free(hARS2); free(hARS); err = cudaDeviceReset(); assertError(err, "Failed to deinitialize the device! error=%s\n"); printf("Done\n"); //system("\"C:\\Program Files\\gnuplot\\bin\\gnuplot.exe\" D:\\output.plt"); return 0; }*/ float cuWave(string outputFile, string outputARS1, string outputARS2) { cudaError_t err = cudaSuccess; curandState *rState; cudaMalloc(&rState, sizeof(curandState)); setupPRNG << <1, 1 >> >(rState); // Print the vector length to be used, and compute its size int numOmegaElems = OMEGANUM; int numTimeElems = TIMENUM; float dOmega = 0.01; float dTime = (20.0 / (float)numTimeElems); size_t size = numOmegaElems * sizeof(float); size_t size2 = numTimeElems * sizeof(float); //printf("[Vector addition of %d elements]\n", numElements); float *hTime = new float[size]; float *hOmega = new float[size]; float *hOTime = new float[size2]; // Initialize the host input vectors for (int i = 0; i < numOmegaElems; ++i){ hOmega[i] = dOmega * (i + 1); hTime[i] = 3.14159265358 * 2 / hOmega[i]; } for (int i = 0; i < numTimeElems; ++i) hOTime[i] = dTime * (i + 1); PREPARE_VAR(inputOmega, size); PREPARE_VAR(inputTime, size2); PREPARE_VAR(zARS, size); float *hARS = new float[size]; PREPARE_VAR(zWRS, size); PREPARE_VAR(zASW, size); float *hASW = new float[size2]; //printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(inputOmega, hOmega, size, cudaMemcpyHostToDevice); assertError(err, "Failed to copy from host to device (error code %s)!\n"); err = cudaMemcpy(inputTime, hOTime, size2, cudaMemcpyHostToDevice); assertError(err, "Failed to copy from host to device (error code %s)!\n"); // Launch the Vector Add CUDA Kernel int TPB = 256; int blocksPerGrid = (numOmegaElems + TPB - 1) / TPB; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, TPB); genARS << <blocksPerGrid, TPB >> >(inputOmega, zARS, numOmegaElems); genWRS << <blocksPerGrid, TPB >> >(zARS, inputOmega, zWRS, 20, numOmegaElems); genASW << <TPB, (numTimeElems + TPB - 1) / TPB >> >(zWRS, inputOmega, inputTime, zASW, numTimeElems, numOmegaElems, rState); //printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(hARS, zARS, size, cudaMemcpyDeviceToHost); assertError(err, "Failed to copy ARS from device to host (error code %s)!\n"); err = cudaMemcpy(hASW, zASW, size2, cudaMemcpyDeviceToHost); assertError(err, "Failed to copy ASW from device to host (error code %s)!\n"); printf("CUDA Device Done\n"); ofstream output, sgs, newars, ars; output.open(outputFile); //sgs.open("D:\\test.sgs"); newars.open(outputARS2); //printf("Output to gnuplot\n"); output << "#x y" << endl; //sgs << "*SGSw\n*TITLE, zzz\n*TITLE, \n*X-AXIS, Time(sec)\n*Y-AXIS, Ground Accel. (g)\n*UNIT&TYPE, GRAV, ACCEL\n*FLAGS, 0, 0\n*DATA\n"; for (int i = 0; i < numTimeElems; ++i) hASW[i] = envelopeF(hOTime[i], hASW[i]); float maxV = findMax(hASW, numTimeElems); //printf("%f\n", maxV); for (int i = 0; i < numTimeElems; ++i){ float v = hASW[i] / maxV * 0.0218; output << hOTime[i] << " " << v << endl; //sgs << hOTime[i] << "," << v << endl; hASW[i] = v; } //sgs << "*ENDDATA\n"; output.close(); //sgs.close(); if (!outputARS1.empty()){ ars.open(outputARS1); ars << "#x y" << endl; for (int i = 0; i < numOmegaElems; ++i){ ars << hTime[i] << " " << hARS[i] << endl; } ars.close(); } float *hARS2 = new float[size2]; for (int i = 0; i < numTimeElems; ++i){ hARS2[i] = regenARS(hOTime[i], hASW, numTimeElems); } newars << "#x y" << endl; float *dd = new float[numTimeElems]; for (int i = 0; i < numTimeElems; ++i){ newars << hOTime[i] << " " << hARS2[i] << endl; float tom = 3.1415926 * 2 / hOTime[i]; dd[i] = hARS2[i] - hARS[(int)(tom / dOmega)]; } newars.close(); float avg = 0, sum = 0; for (int i = 0; i < numTimeElems; ++i) sum += dd[i]; avg = sum / numTimeElems; sum = 0; for (int i = 0; i < numTimeElems; ++i) sum += pow(dd[i] - avg, 2); avg = sum / numTimeElems * 1000000; //printf("S: %f\n", avg); // Free device global memory err = cudaFree(inputOmega); err = cudaFree(inputTime); err = cudaFree(zARS); err = cudaFree(zWRS); err = cudaFree(zASW); free(hOmega); free(hTime); free(hOTime); free(hARS2); free(hARS); err = cudaDeviceReset(); assertError(err, "Failed to deinitialize the device! error=%s\n"); printf("Done\n"); return avg; } int main(void){ cout << "cuQuake" << endl; queryCUDACard(); float *dd = new float[10]; for (int i = 0; i < 10; ++i){ string ofn = "D:\\wave_" + to_string(i) + ".dat"; string ofnars1 = "D:\\ars_" + to_string(i) + ".dat"; string ofnars2 = "D:\\newars_" + to_string(i) + ".dat"; if (i==0) dd[i] = cuWave(ofn, ofnars1, ofnars2); else dd[i] = cuWave(ofn, "", ofnars2); } sort(dd, dd + 10); cout << "Best:" << dd[0] << " Worst:" << dd[9] << endl; return 0; }
2f44e257e6c27117c1ee61059670e7d6a7da8869.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> void foo() { int s0; const int s1 = 4; unsigned int s2; int *p0; const int *p1 = &s1; int *p2 = (int *)&s2; //int a0[]; int a1[5]; int a2[10][20]; extern __shared__ int es0[]; extern __shared__ int *es1; __shared__ int ss0[5]; int3 v0; int3 *pv0 = (int3 *)&v0; }
2f44e257e6c27117c1ee61059670e7d6a7da8869.cu
#include <wb.h> void foo() { int s0; const int s1 = 4; unsigned int s2; int *p0; const int *p1 = &s1; int *p2 = (int *)&s2; //int a0[]; int a1[5]; int a2[10][20]; extern __shared__ int es0[]; extern __shared__ int *es1; __shared__ int ss0[5]; int3 v0; int3 *pv0 = (int3 *)&v0; }
a521d07195fc46b32a8b81330266066b5483524c.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// @file check/core/solver/l2l.cu /// @brief Test nbfmm::Solver::l2l /// /// @author Mu Yang <[email protected]> /// #include "../solver.hpp" void TestNbfmmSolver::l2l() { hipError_t cuda_status; // Allocate memory float2 cell_effect0[num_level][base_dim][base_dim]; float2 cell_effect[base_dim][base_dim]; // Copy random vectors memcpy(cell_effect0, random_cell_position, base_dim * base_dim * num_level * sizeof(float2)); // Copy input vectors cuda_status = hipMemcpy(solver.gpuptr_cell_effect_, cell_effect0, base_dim * base_dim * num_level * sizeof(float2), hipMemcpyHostToDevice); CPPUNIT_ASSERT(cuda_status == hipSuccess); // Compute effects for ( auto l = num_level-1; l > 0; --l ) { int cell_size = 1 << l; int offset = cell_size / 2; for ( auto j = 0; j < base_dim; j += cell_size ) { for ( auto i = 0; i < base_dim; i += cell_size ) { cell_effect0[l-1][j][i] += cell_effect0[l][j][i]; cell_effect0[l-1][j][i+offset] += cell_effect0[l][j][i]; cell_effect0[l-1][j+offset][i] += cell_effect0[l][j][i]; cell_effect0[l-1][j+offset][i+offset] += cell_effect0[l][j][i]; } } } // Run l2l solver.l2l(); // Copy output vectors cuda_status = hipMemcpy(cell_effect, solver.gpuptr_cell_effect_, base_dim * base_dim * sizeof(float2), hipMemcpyDeviceToHost); CPPUNIT_ASSERT(cuda_status == hipSuccess); // Check for ( auto j = 0; j < base_dim; ++j ) { for ( auto i = 0; i < base_dim; ++i ) { // printf("\n (%2d, %2d): (%12.4f, %12.4f) | (%12.4f, %12.4f)", i, j, // cell_effect0[0][j][i].x, cell_effect0[0][j][i].y, cell_effect[j][i].x, cell_effect[j][i].y); CPPUNIT_ASSERT_DOUBLES_EQUAL(cell_effect0[0][j][i].x, cell_effect[j][i].x, 1e-4); CPPUNIT_ASSERT_DOUBLES_EQUAL(cell_effect0[0][j][i].y, cell_effect[j][i].y, 1e-4); } } }
a521d07195fc46b32a8b81330266066b5483524c.cu
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// @file check/core/solver/l2l.cu /// @brief Test nbfmm::Solver::l2l /// /// @author Mu Yang <[email protected]> /// #include "../solver.hpp" void TestNbfmmSolver::l2l() { cudaError_t cuda_status; // Allocate memory float2 cell_effect0[num_level][base_dim][base_dim]; float2 cell_effect[base_dim][base_dim]; // Copy random vectors memcpy(cell_effect0, random_cell_position, base_dim * base_dim * num_level * sizeof(float2)); // Copy input vectors cuda_status = cudaMemcpy(solver.gpuptr_cell_effect_, cell_effect0, base_dim * base_dim * num_level * sizeof(float2), cudaMemcpyHostToDevice); CPPUNIT_ASSERT(cuda_status == cudaSuccess); // Compute effects for ( auto l = num_level-1; l > 0; --l ) { int cell_size = 1 << l; int offset = cell_size / 2; for ( auto j = 0; j < base_dim; j += cell_size ) { for ( auto i = 0; i < base_dim; i += cell_size ) { cell_effect0[l-1][j][i] += cell_effect0[l][j][i]; cell_effect0[l-1][j][i+offset] += cell_effect0[l][j][i]; cell_effect0[l-1][j+offset][i] += cell_effect0[l][j][i]; cell_effect0[l-1][j+offset][i+offset] += cell_effect0[l][j][i]; } } } // Run l2l solver.l2l(); // Copy output vectors cuda_status = cudaMemcpy(cell_effect, solver.gpuptr_cell_effect_, base_dim * base_dim * sizeof(float2), cudaMemcpyDeviceToHost); CPPUNIT_ASSERT(cuda_status == cudaSuccess); // Check for ( auto j = 0; j < base_dim; ++j ) { for ( auto i = 0; i < base_dim; ++i ) { // printf("\n (%2d, %2d): (%12.4f, %12.4f) | (%12.4f, %12.4f)", i, j, // cell_effect0[0][j][i].x, cell_effect0[0][j][i].y, cell_effect[j][i].x, cell_effect[j][i].y); CPPUNIT_ASSERT_DOUBLES_EQUAL(cell_effect0[0][j][i].x, cell_effect[j][i].x, 1e-4); CPPUNIT_ASSERT_DOUBLES_EQUAL(cell_effect0[0][j][i].y, cell_effect[j][i].y, 1e-4); } } }
e1c27f7e49d74d5fe8e38ebbbcc4e8430a1b6b59.hip
// !!! This is a file automatically generated by hipify!!! /* Based off work by Nelson, et al. Brigham Young University (2010) Adapted by Kevin Yuh (2015) Modified by Jordan Bonilla and Matthew Cedeno (2016) */ #include <stdio.h> #include <hip/hip_runtime.h> #include <assert.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <hipfft.h> #include <time.h> #include "ta_utilities.hpp" #define PI 3.14159265358979 /* Check errors on CUDA runtime functions */ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); exit(code); } } /* Check errors on cuFFT functions */ void gpuFFTchk(int errval){ if (errval != HIPFFT_SUCCESS){ printf("Failed FFT call, error code %d\n", errval); } } /* Check errors on CUDA kernel calls */ void checkCUDAKernelError() { hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "Error %s\n", hipGetErrorString(err)); } else { fprintf(stderr, "No kernel error detected\n"); } } // Performs a high pass filter on the sinogram data __global__ void cudaHighPassKernel(hipfftComplex *raw_data, const int sinogram_width, const int size){ uint idx = blockDim.x * blockIdx.x + threadIdx.x; // The scaling factor is 0 at sinogram_width / 2, and scales to 1 at 0 and // sinogram width, so we first shift down sinogram_width / 2, putting the // center at 0. This means the scaling factor is then abs(shifted value) / // (sinogram_width / 2) float scalingFactor = (idx % sinogram_width) - sinogram_width / 2.0; scalingFactor = abs(scalingFactor)/(sinogram_width / 2.0); // Scale every value by the scaling factor while(idx < size){ raw_data[idx].x = raw_data[idx].x * scalingFactor; raw_data[idx].y = raw_data[idx].y * scalingFactor; idx += blockDim.x * gridDim.x; } } // Calls the high pass kernel void cudaCallHighPassKernel(const unsigned int blocks, const unsigned int threadsPerBlock, hipfftComplex *raw_data, const int sinogram_width, const int size){ hipLaunchKernelGGL(( cudaHighPassKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, raw_data, sinogram_width, size); } __global__ // Just takes the real value of every complex sinogram data and puts it into the // output. void cudaCmplxToFloat(const hipfftComplex *raw_data, float *output_data, int size){ uint idx = threadIdx.x + blockIdx.x * blockDim.x; while(idx < size){ output_data[idx] = raw_data[idx].x; idx += blockDim.x * gridDim.x; } } // Calls the complex to float kernel void cudaCallCmplxToFloat(unsigned int blocks, unsigned int threadsPerBlock, const hipfftComplex *raw_data, float *output_data, int size){ hipLaunchKernelGGL(( cudaCmplxToFloat), dim3(blocks), dim3(threadsPerBlock), 0, 0, raw_data, output_data, size); } __global__ // Performs the backprojection of the image to reconstruct the sinogram void cudaBackprojection(const float *input_data, float *output_data, const int sinogram_width, const int height, const int angles, const int size){ uint idx = threadIdx.x + blockIdx.x * blockDim.x; while(idx < size){ // Calculate the geometric location of our current pixel int geo_x = (idx % height) - height / 2; int geo_y = -1 * ((int)(idx / height)) + height / 2; // For each angle, check if it is an edge case. Otherwise, calculate the // distance we are from the center of the angle's emitter. Then find the // value of the sinogram for that pixel. for(int i = 0; i < angles; i++){ float theta = i * PI / angles; int d; if(theta == 0){ d = geo_x; } else if(theta == PI / 2){ d = geo_y; } else{ float m = -1 * (cos(theta) / sin(theta)); float q = -1 / m; float x_i = (geo_y - m * geo_x) / (q - m); float y_i = q * x_i; d = (int) sqrt(pow(x_i, 2) + pow(y_i, 2)); if((q > 0 && x_i < 0) || (q < 0 && x_i > 0)){ d *= -1; } } output_data[idx] += input_data[sinogram_width * d + i]; } idx += blockDim.x * gridDim.x; } } void cudaCallBackprojection(unsigned int blocks, unsigned int threadsPerBlock, const float *input_data, float *output_data, const int sinogram_width, const int height, const int angles, const int size){ hipLaunchKernelGGL(( cudaBackprojection), dim3(blocks), dim3(threadsPerBlock), 0, 0, input_data, output_data, sinogram_width, height, angles, size); } int main(int argc, char** argv){ // These functions allow you to select the least utilized GPU // on your system as well as enforce a time limit on program execution. // Please leave these enabled as a courtesy to your fellow classmates // if you are using a shared computer. You may ignore or remove these // functions if you are running on your local machine. TA_Utilities::select_least_utilized_GPU(); int max_time_allowed_in_seconds = 10; TA_Utilities::enforce_time_limit(max_time_allowed_in_seconds); // Begin timer and check for the correct number of inputs time_t start = clock(); if (argc != 7){ fprintf(stderr, "Incorrect number of arguments.\n\n"); fprintf(stderr, "\nArguments: \n \ < Input sinogram text file's name > \n \ < Width or height of original image, whichever is larger > \n \ < Number of angles in sinogram >\n \ < threads per block >\n \ < number of blocks >\n \ < output text file's name >\n"); exit(EXIT_FAILURE); } /********** Parameters **********/ int width = atoi(argv[2]); int height = width; int sinogram_width = (int)ceilf( height * sqrt(2) ); int nAngles = atoi(argv[3]); int threadsPerBlock = atoi(argv[4]); int nBlocks = atoi(argv[5]); /********** Data storage *********/ // GPU DATA STORAGE hipfftComplex *dev_sinogram_cmplx; float *dev_sinogram_float; float* dev_output; // Image storage hipfftComplex *sinogram_host; size_t size_result = width*height*sizeof(float); float *output_host = (float *)malloc(size_result); /*********** Set up IO, Read in data ************/ sinogram_host = (hipfftComplex *)malloc( sinogram_width*nAngles*sizeof(hipfftComplex) ); FILE *dataFile = fopen(argv[1],"r"); if (dataFile == NULL){ fprintf(stderr, "Sinogram file missing\n"); exit(EXIT_FAILURE); } FILE *outputFile = fopen(argv[6], "w"); if (outputFile == NULL){ fprintf(stderr, "Output file cannot be written\n"); exit(EXIT_FAILURE); } int j, i; for(i = 0; i < nAngles * sinogram_width; i++){ fscanf(dataFile,"%f",&sinogram_host[i].x); sinogram_host[i].y = 0; } fclose(dataFile); /*********** Assignment starts here *********/ /* TODO: Allocate memory for all GPU storage above, copy input sinogram over to dev_sinogram_cmplx. */ gpuErrchk(hipMalloc((void**) &dev_sinogram_float, sizeof(float) * size_result)); gpuErrchk(hipMalloc((void**) &dev_sinogram_cmplx, sizeof(hipfftComplex) * sinogram_width * nAngles)); gpuErrchk(hipMalloc((void**) &dev_output, sizeof(float) * size_result)); gpuErrchk(hipMemcpy(dev_sinogram_cmplx, sinogram_host, sizeof(hipfftComplex) * sinogram_width * nAngles, hipMemcpyHostToDevice)); /* TODO 1: Implement the high-pass filter: - Use cuFFT for the forward FFT - Create your own kernel for the frequency scaling. - Use cuFFT for the inverse FFT - extract real components to floats - Free the original sinogram (dev_sinogram_cmplx) Note: If you want to deal with real-to-complex and complex-to-real transforms in cuFFT, you'll have to slightly change our code above. */ hipfftHandle plan; hipfftPlan1d(&plan, sinogram_width, HIPFFT_C2C, nAngles); hipfftExecC2C(plan, dev_sinogram_cmplx, dev_sinogram_cmplx, HIPFFT_FORWARD); cudaCallHighPassKernel(nBlocks, threadsPerBlock, dev_sinogram_cmplx, sinogram_width, sinogram_width * nAngles); hipfftExecC2C(plan, dev_sinogram_cmplx, dev_sinogram_cmplx, HIPFFT_BACKWARD); cudaCallCmplxToFloat(nBlocks, threadsPerBlock, dev_sinogram_cmplx, dev_sinogram_float, sinogram_width * height); hipFree(dev_sinogram_cmplx); hipfftDestroy(plan); /* TODO 2: Implement backprojection. - Allocate memory for the output image. got it - Create your own kernel to accelerate backprojection. - Copy the reconstructed image back to output_host. - Free all remaining memory on the GPU. */ cudaCallBackprojection(nBlocks, threadsPerBlock, dev_sinogram_float, dev_output, sinogram_width, height, nAngles, size_result); fprintf(stderr, "backproject"); gpuErrchk(hipMemcpy(output_host, dev_output, sizeof(float) * size_result, hipMemcpyDeviceToHost)); fprintf(stderr, "copying back"); hipFree(dev_sinogram_float); hipFree(dev_output); /* Export image data. */ for(j = 0; j < width; j++){ for(i = 0; i < height; i++){ fprintf(outputFile, "%e ",output_host[j*width + i]); } fprintf(outputFile, "\n"); } /* Cleanup: Free host memory, close files. */ free(sinogram_host); free(output_host); fclose(outputFile); printf("CT reconstruction complete. Total run time: %f seconds\n", (float) (clock() - start) / 1000.0); return 0; }
e1c27f7e49d74d5fe8e38ebbbcc4e8430a1b6b59.cu
/* Based off work by Nelson, et al. Brigham Young University (2010) Adapted by Kevin Yuh (2015) Modified by Jordan Bonilla and Matthew Cedeno (2016) */ #include <stdio.h> #include <cuda.h> #include <assert.h> #include <cuda_runtime.h> #include <stdio.h> #include <cufft.h> #include <time.h> #include "ta_utilities.hpp" #define PI 3.14159265358979 /* Check errors on CUDA runtime functions */ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); exit(code); } } /* Check errors on cuFFT functions */ void gpuFFTchk(int errval){ if (errval != CUFFT_SUCCESS){ printf("Failed FFT call, error code %d\n", errval); } } /* Check errors on CUDA kernel calls */ void checkCUDAKernelError() { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "Error %s\n", cudaGetErrorString(err)); } else { fprintf(stderr, "No kernel error detected\n"); } } // Performs a high pass filter on the sinogram data __global__ void cudaHighPassKernel(cufftComplex *raw_data, const int sinogram_width, const int size){ uint idx = blockDim.x * blockIdx.x + threadIdx.x; // The scaling factor is 0 at sinogram_width / 2, and scales to 1 at 0 and // sinogram width, so we first shift down sinogram_width / 2, putting the // center at 0. This means the scaling factor is then abs(shifted value) / // (sinogram_width / 2) float scalingFactor = (idx % sinogram_width) - sinogram_width / 2.0; scalingFactor = abs(scalingFactor)/(sinogram_width / 2.0); // Scale every value by the scaling factor while(idx < size){ raw_data[idx].x = raw_data[idx].x * scalingFactor; raw_data[idx].y = raw_data[idx].y * scalingFactor; idx += blockDim.x * gridDim.x; } } // Calls the high pass kernel void cudaCallHighPassKernel(const unsigned int blocks, const unsigned int threadsPerBlock, cufftComplex *raw_data, const int sinogram_width, const int size){ cudaHighPassKernel<<<blocks, threadsPerBlock>>>(raw_data, sinogram_width, size); } __global__ // Just takes the real value of every complex sinogram data and puts it into the // output. void cudaCmplxToFloat(const cufftComplex *raw_data, float *output_data, int size){ uint idx = threadIdx.x + blockIdx.x * blockDim.x; while(idx < size){ output_data[idx] = raw_data[idx].x; idx += blockDim.x * gridDim.x; } } // Calls the complex to float kernel void cudaCallCmplxToFloat(unsigned int blocks, unsigned int threadsPerBlock, const cufftComplex *raw_data, float *output_data, int size){ cudaCmplxToFloat<<<blocks, threadsPerBlock>>>(raw_data, output_data, size); } __global__ // Performs the backprojection of the image to reconstruct the sinogram void cudaBackprojection(const float *input_data, float *output_data, const int sinogram_width, const int height, const int angles, const int size){ uint idx = threadIdx.x + blockIdx.x * blockDim.x; while(idx < size){ // Calculate the geometric location of our current pixel int geo_x = (idx % height) - height / 2; int geo_y = -1 * ((int)(idx / height)) + height / 2; // For each angle, check if it is an edge case. Otherwise, calculate the // distance we are from the center of the angle's emitter. Then find the // value of the sinogram for that pixel. for(int i = 0; i < angles; i++){ float theta = i * PI / angles; int d; if(theta == 0){ d = geo_x; } else if(theta == PI / 2){ d = geo_y; } else{ float m = -1 * (cos(theta) / sin(theta)); float q = -1 / m; float x_i = (geo_y - m * geo_x) / (q - m); float y_i = q * x_i; d = (int) sqrt(pow(x_i, 2) + pow(y_i, 2)); if((q > 0 && x_i < 0) || (q < 0 && x_i > 0)){ d *= -1; } } output_data[idx] += input_data[sinogram_width * d + i]; } idx += blockDim.x * gridDim.x; } } void cudaCallBackprojection(unsigned int blocks, unsigned int threadsPerBlock, const float *input_data, float *output_data, const int sinogram_width, const int height, const int angles, const int size){ cudaBackprojection<<<blocks, threadsPerBlock>>>(input_data, output_data, sinogram_width, height, angles, size); } int main(int argc, char** argv){ // These functions allow you to select the least utilized GPU // on your system as well as enforce a time limit on program execution. // Please leave these enabled as a courtesy to your fellow classmates // if you are using a shared computer. You may ignore or remove these // functions if you are running on your local machine. TA_Utilities::select_least_utilized_GPU(); int max_time_allowed_in_seconds = 10; TA_Utilities::enforce_time_limit(max_time_allowed_in_seconds); // Begin timer and check for the correct number of inputs time_t start = clock(); if (argc != 7){ fprintf(stderr, "Incorrect number of arguments.\n\n"); fprintf(stderr, "\nArguments: \n \ < Input sinogram text file's name > \n \ < Width or height of original image, whichever is larger > \n \ < Number of angles in sinogram >\n \ < threads per block >\n \ < number of blocks >\n \ < output text file's name >\n"); exit(EXIT_FAILURE); } /********** Parameters **********/ int width = atoi(argv[2]); int height = width; int sinogram_width = (int)ceilf( height * sqrt(2) ); int nAngles = atoi(argv[3]); int threadsPerBlock = atoi(argv[4]); int nBlocks = atoi(argv[5]); /********** Data storage *********/ // GPU DATA STORAGE cufftComplex *dev_sinogram_cmplx; float *dev_sinogram_float; float* dev_output; // Image storage cufftComplex *sinogram_host; size_t size_result = width*height*sizeof(float); float *output_host = (float *)malloc(size_result); /*********** Set up IO, Read in data ************/ sinogram_host = (cufftComplex *)malloc( sinogram_width*nAngles*sizeof(cufftComplex) ); FILE *dataFile = fopen(argv[1],"r"); if (dataFile == NULL){ fprintf(stderr, "Sinogram file missing\n"); exit(EXIT_FAILURE); } FILE *outputFile = fopen(argv[6], "w"); if (outputFile == NULL){ fprintf(stderr, "Output file cannot be written\n"); exit(EXIT_FAILURE); } int j, i; for(i = 0; i < nAngles * sinogram_width; i++){ fscanf(dataFile,"%f",&sinogram_host[i].x); sinogram_host[i].y = 0; } fclose(dataFile); /*********** Assignment starts here *********/ /* TODO: Allocate memory for all GPU storage above, copy input sinogram over to dev_sinogram_cmplx. */ gpuErrchk(cudaMalloc((void**) &dev_sinogram_float, sizeof(float) * size_result)); gpuErrchk(cudaMalloc((void**) &dev_sinogram_cmplx, sizeof(cufftComplex) * sinogram_width * nAngles)); gpuErrchk(cudaMalloc((void**) &dev_output, sizeof(float) * size_result)); gpuErrchk(cudaMemcpy(dev_sinogram_cmplx, sinogram_host, sizeof(cufftComplex) * sinogram_width * nAngles, cudaMemcpyHostToDevice)); /* TODO 1: Implement the high-pass filter: - Use cuFFT for the forward FFT - Create your own kernel for the frequency scaling. - Use cuFFT for the inverse FFT - extract real components to floats - Free the original sinogram (dev_sinogram_cmplx) Note: If you want to deal with real-to-complex and complex-to-real transforms in cuFFT, you'll have to slightly change our code above. */ cufftHandle plan; cufftPlan1d(&plan, sinogram_width, CUFFT_C2C, nAngles); cufftExecC2C(plan, dev_sinogram_cmplx, dev_sinogram_cmplx, CUFFT_FORWARD); cudaCallHighPassKernel(nBlocks, threadsPerBlock, dev_sinogram_cmplx, sinogram_width, sinogram_width * nAngles); cufftExecC2C(plan, dev_sinogram_cmplx, dev_sinogram_cmplx, CUFFT_INVERSE); cudaCallCmplxToFloat(nBlocks, threadsPerBlock, dev_sinogram_cmplx, dev_sinogram_float, sinogram_width * height); cudaFree(dev_sinogram_cmplx); cufftDestroy(plan); /* TODO 2: Implement backprojection. - Allocate memory for the output image. got it - Create your own kernel to accelerate backprojection. - Copy the reconstructed image back to output_host. - Free all remaining memory on the GPU. */ cudaCallBackprojection(nBlocks, threadsPerBlock, dev_sinogram_float, dev_output, sinogram_width, height, nAngles, size_result); fprintf(stderr, "backproject"); gpuErrchk(cudaMemcpy(output_host, dev_output, sizeof(float) * size_result, cudaMemcpyDeviceToHost)); fprintf(stderr, "copying back"); cudaFree(dev_sinogram_float); cudaFree(dev_output); /* Export image data. */ for(j = 0; j < width; j++){ for(i = 0; i < height; i++){ fprintf(outputFile, "%e ",output_host[j*width + i]); } fprintf(outputFile, "\n"); } /* Cleanup: Free host memory, close files. */ free(sinogram_host); free(output_host); fclose(outputFile); printf("CT reconstruction complete. Total run time: %f seconds\n", (float) (clock() - start) / 1000.0); return 0; }
7eebd3289bbfbb569c55999a3960f36e269350f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void calcSoftmaxDivForwardGPU(float *out, float *sum, int batch_size, int in_size_x, unsigned int n) { // int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if(index<n && *(sum + blockIdx.x)>0.0){ // out[id] = out[id] / *sum; out[index] = out[index] / *(sum + blockIdx.x); } /* original for ( int i = 0; i < in.size.x; ++i ){ out( b, i, 0, 0 ) = out( b, i, 0, 0 ) / sum; } */ }
7eebd3289bbfbb569c55999a3960f36e269350f9.cu
#include "includes.h" __global__ void calcSoftmaxDivForwardGPU(float *out, float *sum, int batch_size, int in_size_x, unsigned int n) { // int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if(index<n && *(sum + blockIdx.x)>0.0){ // out[id] = out[id] / *sum; out[index] = out[index] / *(sum + blockIdx.x); } /* original for ( int i = 0; i < in.size.x; ++i ){ out( b, i, 0, 0 ) = out( b, i, 0, 0 ) / sum; } */ }
87aaf7e9963c383e9d697a84ac983892ae7e7cb5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from magmablas/zgemm_fermi.cu, normal z -> d, Thu Oct 8 23:05:32 2020 @author Jakub Kurzak @author Stan Tomov @author Mark Gates [zcds]gemm_fermi.cu defines the CPU driver. [zcds]gemm_fermi_kernels.h defines the block sizes for each precision. gemm_stencil_defs.h defines types and functions for precision-independent code. These files are included multiple times, once for each transpose version. gemm_stencil.cuh defines the GPU kernel (device function). gemm_kernel.cuh defines the GPU kernel (global function). The batched version uses gemm_kernel_batched.cuh instead of gemm_kernel.cuh. */ #include "magma_internal.h" #include "commonblas_d.h" #define PRECISION_d #include "dgemm_fermi_kernels.h" /***************************************************************************//** Purpose ------- DGEMM performs one of the matrix-matrix operations C = alpha*op( A )*op( B ) + beta*C, where op( X ) is one of op( X ) = X or op( X ) = X**T or op( X ) = X**H, alpha and beta are scalars, and A, B and C are matrices, with op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix. Parameters ---------- @param[in] transA magma_trans_t. On entry, transA specifies the form of op( A ) to be used in the matrix multiplication as follows: - = MagmaNoTrans: op( A ) = A. - = MagmaTrans: op( A ) = A**T. - = MagmaConjTrans: op( A ) = A**H. @param[in] transB magma_trans_t. On entry, transB specifies the form of op( B ) to be used in the matrix multiplication as follows: - = MagmaNoTrans: op( B ) = B. - = MagmaTrans: op( B ) = B**T. - = MagmaConjTrans: op( B ) = B**H. @param[in] m INTEGER. On entry, M specifies the number of rows of the matrix op( dA ) and of the matrix dC. M must be at least zero. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix op( dB ) and the number of columns of the matrix dC. N must be at least zero. @param[in] k INTEGER. On entry, K specifies the number of columns of the matrix op( dA ) and the number of rows of the matrix op( dB ). K must be at least zero. @param[in] alpha DOUBLE PRECISION On entry, ALPHA specifies the scalar alpha. @param[in] dA DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is k when transA = MagmaNoTrans, and is m otherwise. Before entry with transA = MagmaNoTrans, the leading m by k part of the array dA must contain the matrix dA, otherwise the leading k by m part of the array dA must contain the matrix dA. @param[in] ldda INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When transA = MagmaNoTrans then LDA must be at least max( 1, m ), otherwise LDA must be at least max( 1, k ). @param[in] dB DOUBLE PRECISION array of DIMENSION ( LDB, kb ), where kb is n when transB = MagmaNoTrans, and is k otherwise. Before entry with transB = MagmaNoTrans, the leading k by n part of the array dB must contain the matrix dB, otherwise the leading n by k part of the array dB must contain the matrix dB. @param[in] lddb INTEGER. On entry, LDB specifies the first dimension of dB as declared in the calling (sub) program. When transB = MagmaNoTrans then LDB must be at least max( 1, k ), otherwise LDB must be at least max( 1, n ). @param[in] beta DOUBLE PRECISION. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then dC need not be set on input. @param[in,out] dC DOUBLE PRECISION array of DIMENSION ( LDC, n ). Before entry, the leading m by n part of the array dC must contain the matrix dC, except when beta is zero, in which case dC need not be set on entry. On exit, the array dC is overwritten by the m by n matrix ( alpha*op( dA )*op( dB ) + beta*dC ). @param[in] lddc INTEGER. On entry, LDC specifies the first dimension of dC as declared in the calling (sub) program. LDC must be at least max( 1, m ). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_gemm *******************************************************************************/ extern "C" void magmablas_dgemm( magma_trans_t transA, magma_trans_t transB, magma_int_t m, magma_int_t n, magma_int_t k, double alpha, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_const_ptr dB, magma_int_t lddb, double beta, magmaDouble_ptr dC, magma_int_t lddc, magma_queue_t queue ) { magma_int_t info = 0; if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != MagmaConjTrans ) info = -1; else if ( transB != MagmaNoTrans && transB != MagmaTrans && transB != MagmaConjTrans ) info = -2; else if ( m < 0 ) info = -3; else if ( n < 0 ) info = -4; else if ( k < 0 ) info = -5; else if ( transA == MagmaNoTrans ? ldda < m : ldda < k ) info = -8; else if ( transB == MagmaNoTrans ? lddb < k : lddb < n ) info = -10; else if ( lddc < m ) info = -13; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } // -------------------- // CUDA ARCH 2.x (Fermi) version if ( m <= 0 || n <= 0 || k <= 0 ) return; size_t offsetA = 0; size_t offsetB = 0; int TransA = 2, TransB = 2; if ( transA == MagmaTrans ) TransA = 1; else if ( transA == MagmaNoTrans ) TransA = 0; if ( transB == MagmaTrans ) TransB = 1; else if ( transB == MagmaNoTrans ) TransB = 0; magma_int_t Am = ( ! TransA ? m : k); magma_int_t An = (!TransA ? k : m); magma_int_t Bm = ( ! TransB ? k : n); magma_int_t Bn = (!TransB ? n : k); size_t sizeA = (size_t) ldda * (An - 1) + Am; size_t sizeB = (size_t) lddb * (Bn - 1) + Bm; size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512); if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE || sizeB >= CUBLAS_MAX_1DBUF_SIZE ) { magma_dgemm( transA, transB, m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc, queue ); return; } #ifdef TEXTURE_1D // Set textures parameters tex_ref_Amagma_d.normalized = false; tex_ref_Amagma_d.filterMode = hipFilterModePoint; tex_ref_Amagma_d.addressMode[0] = hipAddressModeClamp; tex_ref_Bmagma_d.normalized = false; tex_ref_Bmagma_d.filterMode = hipFilterModePoint; tex_ref_Bmagma_d.addressMode[0] = hipAddressModeClamp; // Bind A and B to texture references hipError_t err; err = hipBindTexture(&offsetA, tex_ref_Amagma_d, dA, sizeA*sizeof(double)); if ( err != hipSuccess ) { fprintf( stderr, "cannot bind A to texture: %s (%d)\n", hipGetErrorString(err), err ); return; } err = hipBindTexture(&offsetB, tex_ref_Bmagma_d, dB, sizeB*sizeof(double)); if ( err != hipSuccess ) { fprintf( stderr, "cannot bind B to texture: %s (%d)\n", hipGetErrorString(err), err ); hipUnbindTexture( tex_ref_Amagma_d ); return; } #endif // Set up grids dim3 dimBlock(DIM_X, DIM_Y); offsetA = offsetA/sizeof(dA[0]); offsetB = offsetB/sizeof(dB[0]); if ( TransA == 0 && TransB == 0 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_nn ), magma_ceildiv( n, BLK_N_nn ) ); hipLaunchKernelGGL(( dgemm_kernel_fermi_nn), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() , m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 0 && TransB == 1 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_nt ), magma_ceildiv( n, BLK_N_nt ) ); hipLaunchKernelGGL(( dgemm_kernel_fermi_nt), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() , m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 0 && TransB == 2 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_nc ), magma_ceildiv( n, BLK_N_nc ) ); hipLaunchKernelGGL(( dgemm_kernel_fermi_nc), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() , m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 0 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_tn ), magma_ceildiv( n, BLK_N_tn ) ); hipLaunchKernelGGL(( dgemm_kernel_fermi_tn), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() , m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 1 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_tt ), magma_ceildiv( n, BLK_N_tt ) ); hipLaunchKernelGGL(( dgemm_kernel_fermi_tt), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() , m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 2 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_tc ), magma_ceildiv( n, BLK_N_tc ) ); hipLaunchKernelGGL(( dgemm_kernel_fermi_tc), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() , m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 0 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_cn ), magma_ceildiv( n, BLK_N_cn ) ); hipLaunchKernelGGL(( dgemm_kernel_fermi_cn), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() , m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 1 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_ct ), magma_ceildiv( n, BLK_N_ct ) ); hipLaunchKernelGGL(( dgemm_kernel_fermi_ct), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() , m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 2 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_cc ), magma_ceildiv( n, BLK_N_cc ) ); hipLaunchKernelGGL(( dgemm_kernel_fermi_cc), dim3(dimGrid), dim3(dimBlock), 0, queue->cuda_stream() , m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } #ifdef TEXTURE_1D hipUnbindTexture( tex_ref_Amagma_d ); hipUnbindTexture( tex_ref_Bmagma_d ); #endif }
87aaf7e9963c383e9d697a84ac983892ae7e7cb5.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from magmablas/zgemm_fermi.cu, normal z -> d, Thu Oct 8 23:05:32 2020 @author Jakub Kurzak @author Stan Tomov @author Mark Gates [zcds]gemm_fermi.cu defines the CPU driver. [zcds]gemm_fermi_kernels.h defines the block sizes for each precision. gemm_stencil_defs.h defines types and functions for precision-independent code. These files are included multiple times, once for each transpose version. gemm_stencil.cuh defines the GPU kernel (device function). gemm_kernel.cuh defines the GPU kernel (global function). The batched version uses gemm_kernel_batched.cuh instead of gemm_kernel.cuh. */ #include "magma_internal.h" #include "commonblas_d.h" #define PRECISION_d #include "dgemm_fermi_kernels.h" /***************************************************************************//** Purpose ------- DGEMM performs one of the matrix-matrix operations C = alpha*op( A )*op( B ) + beta*C, where op( X ) is one of op( X ) = X or op( X ) = X**T or op( X ) = X**H, alpha and beta are scalars, and A, B and C are matrices, with op( A ) an m by k matrix, op( B ) a k by n matrix and C an m by n matrix. Parameters ---------- @param[in] transA magma_trans_t. On entry, transA specifies the form of op( A ) to be used in the matrix multiplication as follows: - = MagmaNoTrans: op( A ) = A. - = MagmaTrans: op( A ) = A**T. - = MagmaConjTrans: op( A ) = A**H. @param[in] transB magma_trans_t. On entry, transB specifies the form of op( B ) to be used in the matrix multiplication as follows: - = MagmaNoTrans: op( B ) = B. - = MagmaTrans: op( B ) = B**T. - = MagmaConjTrans: op( B ) = B**H. @param[in] m INTEGER. On entry, M specifies the number of rows of the matrix op( dA ) and of the matrix dC. M must be at least zero. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix op( dB ) and the number of columns of the matrix dC. N must be at least zero. @param[in] k INTEGER. On entry, K specifies the number of columns of the matrix op( dA ) and the number of rows of the matrix op( dB ). K must be at least zero. @param[in] alpha DOUBLE PRECISION On entry, ALPHA specifies the scalar alpha. @param[in] dA DOUBLE PRECISION array of DIMENSION ( LDA, ka ), where ka is k when transA = MagmaNoTrans, and is m otherwise. Before entry with transA = MagmaNoTrans, the leading m by k part of the array dA must contain the matrix dA, otherwise the leading k by m part of the array dA must contain the matrix dA. @param[in] ldda INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. When transA = MagmaNoTrans then LDA must be at least max( 1, m ), otherwise LDA must be at least max( 1, k ). @param[in] dB DOUBLE PRECISION array of DIMENSION ( LDB, kb ), where kb is n when transB = MagmaNoTrans, and is k otherwise. Before entry with transB = MagmaNoTrans, the leading k by n part of the array dB must contain the matrix dB, otherwise the leading n by k part of the array dB must contain the matrix dB. @param[in] lddb INTEGER. On entry, LDB specifies the first dimension of dB as declared in the calling (sub) program. When transB = MagmaNoTrans then LDB must be at least max( 1, k ), otherwise LDB must be at least max( 1, n ). @param[in] beta DOUBLE PRECISION. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then dC need not be set on input. @param[in,out] dC DOUBLE PRECISION array of DIMENSION ( LDC, n ). Before entry, the leading m by n part of the array dC must contain the matrix dC, except when beta is zero, in which case dC need not be set on entry. On exit, the array dC is overwritten by the m by n matrix ( alpha*op( dA )*op( dB ) + beta*dC ). @param[in] lddc INTEGER. On entry, LDC specifies the first dimension of dC as declared in the calling (sub) program. LDC must be at least max( 1, m ). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_gemm *******************************************************************************/ extern "C" void magmablas_dgemm( magma_trans_t transA, magma_trans_t transB, magma_int_t m, magma_int_t n, magma_int_t k, double alpha, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_const_ptr dB, magma_int_t lddb, double beta, magmaDouble_ptr dC, magma_int_t lddc, magma_queue_t queue ) { magma_int_t info = 0; if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != MagmaConjTrans ) info = -1; else if ( transB != MagmaNoTrans && transB != MagmaTrans && transB != MagmaConjTrans ) info = -2; else if ( m < 0 ) info = -3; else if ( n < 0 ) info = -4; else if ( k < 0 ) info = -5; else if ( transA == MagmaNoTrans ? ldda < m : ldda < k ) info = -8; else if ( transB == MagmaNoTrans ? lddb < k : lddb < n ) info = -10; else if ( lddc < m ) info = -13; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } // -------------------- // CUDA ARCH 2.x (Fermi) version if ( m <= 0 || n <= 0 || k <= 0 ) return; size_t offsetA = 0; size_t offsetB = 0; int TransA = 2, TransB = 2; if ( transA == MagmaTrans ) TransA = 1; else if ( transA == MagmaNoTrans ) TransA = 0; if ( transB == MagmaTrans ) TransB = 1; else if ( transB == MagmaNoTrans ) TransB = 0; magma_int_t Am = ( ! TransA ? m : k); magma_int_t An = (!TransA ? k : m); magma_int_t Bm = ( ! TransB ? k : n); magma_int_t Bn = (!TransB ? n : k); size_t sizeA = (size_t) ldda * (An - 1) + Am; size_t sizeB = (size_t) lddb * (Bn - 1) + Bm; size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512); if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE || sizeB >= CUBLAS_MAX_1DBUF_SIZE ) { magma_dgemm( transA, transB, m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc, queue ); return; } #ifdef TEXTURE_1D // Set textures parameters tex_ref_Amagma_d.normalized = false; tex_ref_Amagma_d.filterMode = cudaFilterModePoint; tex_ref_Amagma_d.addressMode[0] = cudaAddressModeClamp; tex_ref_Bmagma_d.normalized = false; tex_ref_Bmagma_d.filterMode = cudaFilterModePoint; tex_ref_Bmagma_d.addressMode[0] = cudaAddressModeClamp; // Bind A and B to texture references cudaError_t err; err = cudaBindTexture(&offsetA, tex_ref_Amagma_d, dA, sizeA*sizeof(double)); if ( err != cudaSuccess ) { fprintf( stderr, "cannot bind A to texture: %s (%d)\n", cudaGetErrorString(err), err ); return; } err = cudaBindTexture(&offsetB, tex_ref_Bmagma_d, dB, sizeB*sizeof(double)); if ( err != cudaSuccess ) { fprintf( stderr, "cannot bind B to texture: %s (%d)\n", cudaGetErrorString(err), err ); cudaUnbindTexture( tex_ref_Amagma_d ); return; } #endif // Set up grids dim3 dimBlock(DIM_X, DIM_Y); offsetA = offsetA/sizeof(dA[0]); offsetB = offsetB/sizeof(dB[0]); if ( TransA == 0 && TransB == 0 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_nn ), magma_ceildiv( n, BLK_N_nn ) ); dgemm_kernel_fermi_nn<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>( m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 0 && TransB == 1 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_nt ), magma_ceildiv( n, BLK_N_nt ) ); dgemm_kernel_fermi_nt<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>( m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 0 && TransB == 2 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_nc ), magma_ceildiv( n, BLK_N_nc ) ); dgemm_kernel_fermi_nc<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>( m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 0 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_tn ), magma_ceildiv( n, BLK_N_tn ) ); dgemm_kernel_fermi_tn<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>( m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 1 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_tt ), magma_ceildiv( n, BLK_N_tt ) ); dgemm_kernel_fermi_tt<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>( m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 1 && TransB == 2 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_tc ), magma_ceildiv( n, BLK_N_tc ) ); dgemm_kernel_fermi_tc<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>( m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 0 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_cn ), magma_ceildiv( n, BLK_N_cn ) ); dgemm_kernel_fermi_cn<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>( m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 1 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_ct ), magma_ceildiv( n, BLK_N_ct ) ); dgemm_kernel_fermi_ct<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>( m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } else if ( TransA == 2 && TransB == 2 ) { dim3 dimGrid( magma_ceildiv( m, BLK_M_cc ), magma_ceildiv( n, BLK_N_cc ) ); dgemm_kernel_fermi_cc<<< dimGrid, dimBlock, 0, queue->cuda_stream() >>>( m, n, k, dA, ldda, dB, lddb, dC, lddc, alpha, beta, (int)offsetA, (int)offsetB ); } #ifdef TEXTURE_1D cudaUnbindTexture( tex_ref_Amagma_d ); cudaUnbindTexture( tex_ref_Bmagma_d ); #endif }
271def1bdde4b50cb9fbe2e5d45037c11aa943f7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Henry Cooney - CS510, Accel. Comp. - 4 July 2015 Conway's game of life, computed on the GPU. Uses a tiled convolution pattern to achieve good performance (hopefully) */ #include "gol.h" int main() { srand(time(NULL)); int i; for(i = 1; i < 5; ++i) { if(!test_gol(i, HEIGHT, WIDTH)) { printf("**** FAILED ****\n"); printf("CPU and GPU results did not agree.\n"); exit(1); } } printf("OK \n"); printf("All tests passed! GPU results agree with CPU results.\n"); printf("Starting the Game of Life... \n"); animate_gpu(); return 0; } void animate_gpu() { // Does the Game of Life on the GPU, and outputs results // to an X window. Unfortunately, this method is inefficient, // since results are copied back from the GPU every timestep (rather // than remaining on the GPU). Oh well :( // Display code is from gol.c by Christopher Mitchell ([email protected]) Display* display; display = XOpenDisplay(NULL); if (display == NULL) { fprintf(stderr, "Could not open an X display.\n"); exit(-1); } int screen_num = DefaultScreen(display); int black = BlackPixel(display, screen_num); int white = WhitePixel(display, screen_num); Window win = XCreateSimpleWindow(display, RootWindow(display, screen_num), 0, 0, WIDTH, HEIGHT, 0, black, white); XStoreName(display, win, "The Game of Life"); XSelectInput(display, win, StructureNotifyMask); XMapWindow(display, win); while (1) { XEvent e; XNextEvent(display, &e); if (e.type == MapNotify) break; } GC gc = XCreateGC(display, win, 0, NULL); int x, y, n; XPoint points[WIDTH * HEIGHT]; // Generate a random board: int initial[WIDTH * HEIGHT]; int* result; int i; for(i=0; i<WIDTH*HEIGHT; ++i) initial[i] = rand() % 2; while (1) { XClearWindow(display, win); // Get the GPU GoL values: result = gpu_compute(initial, HEIGHT, WIDTH, 1); memcpy(initial, result, sizeof(int)*WIDTH*HEIGHT); n = 0; for (y=0; y<HEIGHT; y++) { for (x=0; x<WIDTH; x++) { if (result[y * WIDTH + x]) { points[n].x = x; points[n].y = y; n++; } } } free(result); XDrawPoints(display, win, gc, points, n, CoordModeOrigin); XFlush(display); } } int* gpu_compute(int* initial, int height, int width, int timesteps) { // Does GoL on the GPU. Initial is the starting // matrix (it is not modified.) The resulting matrix after timesteps // iterations is returned. //printf("Launching GPU computation for %d timesteps... \n", timesteps); int n = width * height; int* result = (int*) malloc(sizeof(int) * n); int* current_dev,* next_dev; int steps_done = 0; // Memory transfer printCudaError(hipMalloc((void**) &current_dev, sizeof(int)*n)); printCudaError(hipMalloc((void**) &next_dev, sizeof(int)*n)); hipDeviceSynchronize(); // is this necessary? printCudaError(hipMemcpy(current_dev, initial, sizeof(int)*n, hipMemcpyHostToDevice)); // Establish dimms - these are for GTX 645 dim3 dimBlock(TW, TW, 1); dim3 dimGrid(divideRoundUp(width, ETW), divideRoundUp(height, ETW), 1); //printf("Matrix size (width x height): %d x %d\n", width, height); //printf("Block dims (x, y, z): %d x %d x %d\n", dimBlock.x, dimBlock.y, dimBlock.z); //printf("Grid dims (x, y, z): %d x %d x %d\n", dimGrid.x, dimGrid.y, dimGrid.z); //printf("Starting kernel... \n"); // For testing - set GPU memory to 2. If any pixel get missed it should be // very visible // setGPUMemory<<<dim3(divideRoundUp(n,512),1,1), dim3(512, 1, 1)>>> // (next_dev, 2); while(steps_done < timesteps) { // To make things faster, current_dev and next dev are swapped back // and forth. if(steps_done % 2 == 0) hipLaunchKernelGGL(( conway_step_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, current_dev, next_dev, height, width); else hipLaunchKernelGGL(( conway_step_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, next_dev, current_dev, height, width); ++steps_done; } // printf("Kernel done. \n"); //hipDeviceSynchronize(); // Necessary?? // Check for errors from the kernel: if(hipGetLastError() != hipSuccess) { printf("*** ERROR IN KERNEL *** \n"); exit(1); } // Copy back memory. Make sure we get the right buffer // (since current and next are swapped each frame) if(steps_done % 2 == 1) printCudaError(hipMemcpy(result, next_dev, sizeof(int)*n, hipMemcpyDeviceToHost)); else printCudaError(hipMemcpy(result, current_dev, sizeof(int)*n, hipMemcpyDeviceToHost)); printCudaError(hipFree(current_dev)); printCudaError(hipFree(next_dev)); return result; } __global__ void conway_step_kernel(int* current_dev, int* next_dev, int height, int width) { // Advances the game of life one timestep. // current_dev is the initial matrix, it is not modified. next_dev // the next timestep (the result) __shared__ int dsm[TW][TW]; // Device Shared Memory // Each thread is responsbile for a. fetching one item // from global memory and b. writing one item to output matrix. int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int i, ii; int num_neighbors; int next = 0; int this_pixel; // Each output pixel requires knowledge of neighboring pixels. // Thus, each tile has 'edge pixels' which are loaded into shared mem // but not written to. Values are shifted by one to compensate for this // Mod arithmetic implements wraparound for pixels that are off the board int row = (by*ETW + ty + height - 1) % height; int col = (bx*ETW + tx + width - 1) % width; this_pixel = current_dev[row*width + col]; dsm[ty][tx] = this_pixel; __syncthreads(); if(tx > 0 && tx <= ETW && ty > 0 && ty <= ETW) { // This pixel is not an edge pixel, so figure out its value // in the next frame, and write it. // num_neighbors is the sum of all the neighboring cells. Since // the loop will pass through this pixel, I negate this pixels value. // Thus, this pixel does not contribute to the overall sum. num_neighbors = -this_pixel; for(i=-1; i<2; ++i) { for(ii=-1; ii<2; ++ii) { num_neighbors += dsm[ty+i][tx+ii]; } } if(num_neighbors == 3 || (num_neighbors == 2 && this_pixel)) next = 1; next_dev[row*width + col] = next; } } void printCudaError(hipError_t err) { // Checks the value of input error. If it does not // indicate success, prints an error message. if(err != hipSuccess) { printf("**** CUDA ERROR: ****\n"); printf("%s in %s at line %d \n", hipGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } } int divideRoundUp(int a, int b) { // Divides a by b, but rounds the result up instead of down. return (a+(b-1)) / b; } __global__ void setGPUMemory(int* ptr, int val) { // Sets memory at pointer. // For testing, since garbage hanging out in the GPU // can cause confusing results ptr[blockIdx.x*blockDim.x + threadIdx.x] = val; }
271def1bdde4b50cb9fbe2e5d45037c11aa943f7.cu
/* Henry Cooney - CS510, Accel. Comp. - 4 July 2015 Conway's game of life, computed on the GPU. Uses a tiled convolution pattern to achieve good performance (hopefully) */ #include "gol.h" int main() { srand(time(NULL)); int i; for(i = 1; i < 5; ++i) { if(!test_gol(i, HEIGHT, WIDTH)) { printf("**** FAILED ****\n"); printf("CPU and GPU results did not agree.\n"); exit(1); } } printf("OK \n"); printf("All tests passed! GPU results agree with CPU results.\n"); printf("Starting the Game of Life... \n"); animate_gpu(); return 0; } void animate_gpu() { // Does the Game of Life on the GPU, and outputs results // to an X window. Unfortunately, this method is inefficient, // since results are copied back from the GPU every timestep (rather // than remaining on the GPU). Oh well :( // Display code is from gol.c by Christopher Mitchell ([email protected]) Display* display; display = XOpenDisplay(NULL); if (display == NULL) { fprintf(stderr, "Could not open an X display.\n"); exit(-1); } int screen_num = DefaultScreen(display); int black = BlackPixel(display, screen_num); int white = WhitePixel(display, screen_num); Window win = XCreateSimpleWindow(display, RootWindow(display, screen_num), 0, 0, WIDTH, HEIGHT, 0, black, white); XStoreName(display, win, "The Game of Life"); XSelectInput(display, win, StructureNotifyMask); XMapWindow(display, win); while (1) { XEvent e; XNextEvent(display, &e); if (e.type == MapNotify) break; } GC gc = XCreateGC(display, win, 0, NULL); int x, y, n; XPoint points[WIDTH * HEIGHT]; // Generate a random board: int initial[WIDTH * HEIGHT]; int* result; int i; for(i=0; i<WIDTH*HEIGHT; ++i) initial[i] = rand() % 2; while (1) { XClearWindow(display, win); // Get the GPU GoL values: result = gpu_compute(initial, HEIGHT, WIDTH, 1); memcpy(initial, result, sizeof(int)*WIDTH*HEIGHT); n = 0; for (y=0; y<HEIGHT; y++) { for (x=0; x<WIDTH; x++) { if (result[y * WIDTH + x]) { points[n].x = x; points[n].y = y; n++; } } } free(result); XDrawPoints(display, win, gc, points, n, CoordModeOrigin); XFlush(display); } } int* gpu_compute(int* initial, int height, int width, int timesteps) { // Does GoL on the GPU. Initial is the starting // matrix (it is not modified.) The resulting matrix after timesteps // iterations is returned. //printf("Launching GPU computation for %d timesteps... \n", timesteps); int n = width * height; int* result = (int*) malloc(sizeof(int) * n); int* current_dev,* next_dev; int steps_done = 0; // Memory transfer printCudaError(cudaMalloc((void**) &current_dev, sizeof(int)*n)); printCudaError(cudaMalloc((void**) &next_dev, sizeof(int)*n)); cudaThreadSynchronize(); // is this necessary? printCudaError(cudaMemcpy(current_dev, initial, sizeof(int)*n, cudaMemcpyHostToDevice)); // Establish dimms - these are for GTX 645 dim3 dimBlock(TW, TW, 1); dim3 dimGrid(divideRoundUp(width, ETW), divideRoundUp(height, ETW), 1); //printf("Matrix size (width x height): %d x %d\n", width, height); //printf("Block dims (x, y, z): %d x %d x %d\n", dimBlock.x, dimBlock.y, dimBlock.z); //printf("Grid dims (x, y, z): %d x %d x %d\n", dimGrid.x, dimGrid.y, dimGrid.z); //printf("Starting kernel... \n"); // For testing - set GPU memory to 2. If any pixel get missed it should be // very visible // setGPUMemory<<<dim3(divideRoundUp(n,512),1,1), dim3(512, 1, 1)>>> // (next_dev, 2); while(steps_done < timesteps) { // To make things faster, current_dev and next dev are swapped back // and forth. if(steps_done % 2 == 0) conway_step_kernel<<<dimGrid, dimBlock>>> (current_dev, next_dev, height, width); else conway_step_kernel<<<dimGrid, dimBlock>>> (next_dev, current_dev, height, width); ++steps_done; } // printf("Kernel done. \n"); //cudaDeviceSynchronize(); // Necessary?? // Check for errors from the kernel: if(cudaGetLastError() != cudaSuccess) { printf("*** ERROR IN KERNEL *** \n"); exit(1); } // Copy back memory. Make sure we get the right buffer // (since current and next are swapped each frame) if(steps_done % 2 == 1) printCudaError(cudaMemcpy(result, next_dev, sizeof(int)*n, cudaMemcpyDeviceToHost)); else printCudaError(cudaMemcpy(result, current_dev, sizeof(int)*n, cudaMemcpyDeviceToHost)); printCudaError(cudaFree(current_dev)); printCudaError(cudaFree(next_dev)); return result; } __global__ void conway_step_kernel(int* current_dev, int* next_dev, int height, int width) { // Advances the game of life one timestep. // current_dev is the initial matrix, it is not modified. next_dev // the next timestep (the result) __shared__ int dsm[TW][TW]; // Device Shared Memory // Each thread is responsbile for a. fetching one item // from global memory and b. writing one item to output matrix. int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int i, ii; int num_neighbors; int next = 0; int this_pixel; // Each output pixel requires knowledge of neighboring pixels. // Thus, each tile has 'edge pixels' which are loaded into shared mem // but not written to. Values are shifted by one to compensate for this // Mod arithmetic implements wraparound for pixels that are off the board int row = (by*ETW + ty + height - 1) % height; int col = (bx*ETW + tx + width - 1) % width; this_pixel = current_dev[row*width + col]; dsm[ty][tx] = this_pixel; __syncthreads(); if(tx > 0 && tx <= ETW && ty > 0 && ty <= ETW) { // This pixel is not an edge pixel, so figure out its value // in the next frame, and write it. // num_neighbors is the sum of all the neighboring cells. Since // the loop will pass through this pixel, I negate this pixels value. // Thus, this pixel does not contribute to the overall sum. num_neighbors = -this_pixel; for(i=-1; i<2; ++i) { for(ii=-1; ii<2; ++ii) { num_neighbors += dsm[ty+i][tx+ii]; } } if(num_neighbors == 3 || (num_neighbors == 2 && this_pixel)) next = 1; next_dev[row*width + col] = next; } } void printCudaError(cudaError_t err) { // Checks the value of input error. If it does not // indicate success, prints an error message. if(err != cudaSuccess) { printf("**** CUDA ERROR: ****\n"); printf("%s in %s at line %d \n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } } int divideRoundUp(int a, int b) { // Divides a by b, but rounds the result up instead of down. return (a+(b-1)) / b; } __global__ void setGPUMemory(int* ptr, int val) { // Sets memory at pointer. // For testing, since garbage hanging out in the GPU // can cause confusing results ptr[blockIdx.x*blockDim.x + threadIdx.x] = val; }
a8f52557bbad53d5f0c7cc66d818384d0b97d495.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vec_log10.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; double *result = NULL; hipMalloc(&result, XSIZE*YSIZE); double *x = NULL; hipMalloc(&x, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vec_log10), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vec_log10), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vec_log10), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a8f52557bbad53d5f0c7cc66d818384d0b97d495.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vec_log10.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; double *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); double *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vec_log10<<<gridBlock,threadBlock>>>(n,result,x); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vec_log10<<<gridBlock,threadBlock>>>(n,result,x); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vec_log10<<<gridBlock,threadBlock>>>(n,result,x); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f4eb1f0cf7cb4fac3fc2d20e8f3d3049495350d6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/math/detail/gru_gpu_kernel.h" #include "paddle/operators/math/detail/gru_kernel.h" #include "paddle/operators/math/gru_compute.h" #include "paddle/operators/math/math_function.h" namespace paddle { namespace operators { namespace math { template <typename T> struct GRUUnitFunctor<platform::GPUPlace, T> { static void compute(const platform::DeviceContext &context, hl_gru_value<T> value, int frameSize, int batchSize, activation_mode_t active_node, activation_mode_t active_gate) { auto stream = reinterpret_cast<const platform::CUDADeviceContext &>(context).stream(); dim3 threads; dim3 grid; if (batchSize == 1) { int framePerBlock = frameSize <= 1024 ? frameSize : 1024; int frameBlocks = (frameSize + 1024 - 1) / 1024; threads = dim3(framePerBlock, 1); grid = dim3(frameBlocks, 1); } else { threads = dim3(32, 32); grid = dim3((frameSize + 32 - 1) / 32, (batchSize + 32 - 1) / 32); } if (value.prevOutValue) { math::gemm<platform::GPUPlace, T>( context, false, false, batchSize, frameSize * 2, frameSize, 1, value.prevOutValue, frameSize, value.gateWeight, frameSize * 2, 1, value.gateValue, frameSize * 3); } if (batchSize == 1) { hipLaunchKernelGGL(( detail::KeGruForwardResetOutput<detail::forward::gru_resetOutput<T>, /* isBatch= */ false, T>), dim3(grid), dim3(threads), 0, stream, detail::forward::gru_resetOutput<T>(), value.gateValue, value.resetOutputValue, value.prevOutValue, frameSize, batchSize, active_gate); } else { hipLaunchKernelGGL(( detail::KeGruForwardResetOutput<detail::forward::gru_resetOutput<T>, /* isBatch= */ true, T>), dim3(grid), dim3(threads), 0, stream, detail::forward::gru_resetOutput<T>(), value.gateValue, value.resetOutputValue, value.prevOutValue, frameSize, batchSize, active_gate); } if (value.prevOutValue) { math::gemm<platform::GPUPlace, T>( context, false, false, batchSize, frameSize, frameSize, 1, value.resetOutputValue, frameSize, value.stateWeight, frameSize, 1, value.gateValue + frameSize * 2, frameSize * 3); } if (batchSize == 1) { hipLaunchKernelGGL(( detail::KeGruForwardFinalOutput<detail::forward::gru_finalOutput<T>, /* isBatch= */ false, T>), dim3(grid), dim3(threads), 0, stream, detail::forward::gru_finalOutput<T>(), value.gateValue, value.prevOutValue, value.outputValue, frameSize, batchSize, active_node); } else { hipLaunchKernelGGL(( detail::KeGruForwardFinalOutput<detail::forward::gru_finalOutput<T>, /* isBatch= */ true, T>), dim3(grid), dim3(threads), 0, stream, detail::forward::gru_finalOutput<T>(), value.gateValue, value.prevOutValue, value.outputValue, frameSize, batchSize, active_node); } } }; template <typename T> struct GRUUnitGradFunctor<platform::GPUPlace, T> { static void compute(const platform::DeviceContext &context, hl_gru_value<T> value, hl_gru_grad<T> grad, int frameSize, int batchSize, activation_mode_t active_node, activation_mode_t active_gate) { auto stream = reinterpret_cast<const platform::CUDADeviceContext &>(context).stream(); dim3 threads; dim3 grid; if (batchSize == 1) { int framePerBlock = frameSize <= 1024 ? frameSize : 1024; int frameBlocks = (frameSize + 1024 - 1) / 1024; threads = dim3(framePerBlock, 1); grid = dim3(frameBlocks, 1); } else { threads = dim3(32, 32); grid = dim3((frameSize + 32 - 1) / 32, (batchSize + 32 - 1) / 32); } if (batchSize == 1) { hipLaunchKernelGGL(( detail::KeGruBackwardStateGrad< detail::backward::gru_stateGrad<T>, /* isBatch= */ false>), dim3(grid), dim3(threads), 0, stream, detail::backward::gru_stateGrad<T>(), value.gateValue, grad.gateGrad, value.prevOutValue, grad.prevOutGrad, grad.outputGrad, frameSize, batchSize, active_node); } else { hipLaunchKernelGGL(( detail::KeGruBackwardStateGrad< detail::backward::gru_stateGrad<T>, /* isBatch= */ true>), dim3(grid), dim3(threads), 0, stream, detail::backward::gru_stateGrad<T>(), value.gateValue, grad.gateGrad, value.prevOutValue, grad.prevOutGrad, grad.outputGrad, frameSize, batchSize, active_node); } if (value.prevOutValue && grad.prevOutGrad) { math::gemm<platform::GPUPlace, T>( context, false, true, batchSize, frameSize, frameSize, 1, grad.gateGrad + frameSize * 2, frameSize * 3, value.stateWeight, frameSize, 0, grad.resetOutputGrad, frameSize); if (grad.stateWeightGrad) { math::gemm<platform::GPUPlace, T>( context, true, false, frameSize, frameSize, batchSize, 1, value.resetOutputValue, frameSize, grad.gateGrad + frameSize * 2, frameSize * 3, 1, grad.stateWeightGrad, frameSize); } } if (batchSize == 1) { hipLaunchKernelGGL(( detail::KeGruBackwardResetGrad< detail::backward::gru_resetGrad<T>, /* isBatch= */ false>), dim3(grid), dim3(threads), 0, stream, detail::backward::gru_resetGrad<T>(), value.gateValue, grad.gateGrad, value.prevOutValue, grad.prevOutGrad, grad.resetOutputGrad, frameSize, batchSize, active_gate); } else { hipLaunchKernelGGL(( detail::KeGruBackwardResetGrad< detail::backward::gru_resetGrad<T>, /* isBatch= */ true>), dim3(grid), dim3(threads), 0, stream, detail::backward::gru_resetGrad<T>(), value.gateValue, grad.gateGrad, value.prevOutValue, grad.prevOutGrad, grad.resetOutputGrad, frameSize, batchSize, active_gate); } if (grad.prevOutGrad && value.prevOutValue) { math::gemm<platform::GPUPlace, T>( context, false, true, batchSize, frameSize, frameSize * 2, 1, grad.gateGrad, frameSize * 3, value.gateWeight, frameSize * 2, 1, grad.prevOutGrad, frameSize); if (grad.gateWeightGrad) { math::gemm<platform::GPUPlace, T>( context, true, false, frameSize, frameSize * 2, batchSize, 1, value.prevOutValue, frameSize, grad.gateGrad, frameSize * 3, 1, grad.gateWeightGrad, frameSize * 2); } } } }; template struct GRUUnitFunctor<platform::GPUPlace, float>; template struct GRUUnitFunctor<platform::GPUPlace, double>; template struct GRUUnitGradFunctor<platform::GPUPlace, float>; template struct GRUUnitGradFunctor<platform::GPUPlace, double>; } // namespace math } // namespace operators } // namespace paddle
f4eb1f0cf7cb4fac3fc2d20e8f3d3049495350d6.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/math/detail/gru_gpu_kernel.h" #include "paddle/operators/math/detail/gru_kernel.h" #include "paddle/operators/math/gru_compute.h" #include "paddle/operators/math/math_function.h" namespace paddle { namespace operators { namespace math { template <typename T> struct GRUUnitFunctor<platform::GPUPlace, T> { static void compute(const platform::DeviceContext &context, hl_gru_value<T> value, int frameSize, int batchSize, activation_mode_t active_node, activation_mode_t active_gate) { auto stream = reinterpret_cast<const platform::CUDADeviceContext &>(context).stream(); dim3 threads; dim3 grid; if (batchSize == 1) { int framePerBlock = frameSize <= 1024 ? frameSize : 1024; int frameBlocks = (frameSize + 1024 - 1) / 1024; threads = dim3(framePerBlock, 1); grid = dim3(frameBlocks, 1); } else { threads = dim3(32, 32); grid = dim3((frameSize + 32 - 1) / 32, (batchSize + 32 - 1) / 32); } if (value.prevOutValue) { math::gemm<platform::GPUPlace, T>( context, false, false, batchSize, frameSize * 2, frameSize, 1, value.prevOutValue, frameSize, value.gateWeight, frameSize * 2, 1, value.gateValue, frameSize * 3); } if (batchSize == 1) { detail::KeGruForwardResetOutput<detail::forward::gru_resetOutput<T>, /* isBatch= */ false, T><<<grid, threads, 0, stream>>>( detail::forward::gru_resetOutput<T>(), value.gateValue, value.resetOutputValue, value.prevOutValue, frameSize, batchSize, active_gate); } else { detail::KeGruForwardResetOutput<detail::forward::gru_resetOutput<T>, /* isBatch= */ true, T><<<grid, threads, 0, stream>>>( detail::forward::gru_resetOutput<T>(), value.gateValue, value.resetOutputValue, value.prevOutValue, frameSize, batchSize, active_gate); } if (value.prevOutValue) { math::gemm<platform::GPUPlace, T>( context, false, false, batchSize, frameSize, frameSize, 1, value.resetOutputValue, frameSize, value.stateWeight, frameSize, 1, value.gateValue + frameSize * 2, frameSize * 3); } if (batchSize == 1) { detail::KeGruForwardFinalOutput<detail::forward::gru_finalOutput<T>, /* isBatch= */ false, T><<<grid, threads, 0, stream>>>( detail::forward::gru_finalOutput<T>(), value.gateValue, value.prevOutValue, value.outputValue, frameSize, batchSize, active_node); } else { detail::KeGruForwardFinalOutput<detail::forward::gru_finalOutput<T>, /* isBatch= */ true, T><<<grid, threads, 0, stream>>>( detail::forward::gru_finalOutput<T>(), value.gateValue, value.prevOutValue, value.outputValue, frameSize, batchSize, active_node); } } }; template <typename T> struct GRUUnitGradFunctor<platform::GPUPlace, T> { static void compute(const platform::DeviceContext &context, hl_gru_value<T> value, hl_gru_grad<T> grad, int frameSize, int batchSize, activation_mode_t active_node, activation_mode_t active_gate) { auto stream = reinterpret_cast<const platform::CUDADeviceContext &>(context).stream(); dim3 threads; dim3 grid; if (batchSize == 1) { int framePerBlock = frameSize <= 1024 ? frameSize : 1024; int frameBlocks = (frameSize + 1024 - 1) / 1024; threads = dim3(framePerBlock, 1); grid = dim3(frameBlocks, 1); } else { threads = dim3(32, 32); grid = dim3((frameSize + 32 - 1) / 32, (batchSize + 32 - 1) / 32); } if (batchSize == 1) { detail::KeGruBackwardStateGrad< detail::backward::gru_stateGrad<T>, /* isBatch= */ false><<<grid, threads, 0, stream>>>( detail::backward::gru_stateGrad<T>(), value.gateValue, grad.gateGrad, value.prevOutValue, grad.prevOutGrad, grad.outputGrad, frameSize, batchSize, active_node); } else { detail::KeGruBackwardStateGrad< detail::backward::gru_stateGrad<T>, /* isBatch= */ true><<<grid, threads, 0, stream>>>( detail::backward::gru_stateGrad<T>(), value.gateValue, grad.gateGrad, value.prevOutValue, grad.prevOutGrad, grad.outputGrad, frameSize, batchSize, active_node); } if (value.prevOutValue && grad.prevOutGrad) { math::gemm<platform::GPUPlace, T>( context, false, true, batchSize, frameSize, frameSize, 1, grad.gateGrad + frameSize * 2, frameSize * 3, value.stateWeight, frameSize, 0, grad.resetOutputGrad, frameSize); if (grad.stateWeightGrad) { math::gemm<platform::GPUPlace, T>( context, true, false, frameSize, frameSize, batchSize, 1, value.resetOutputValue, frameSize, grad.gateGrad + frameSize * 2, frameSize * 3, 1, grad.stateWeightGrad, frameSize); } } if (batchSize == 1) { detail::KeGruBackwardResetGrad< detail::backward::gru_resetGrad<T>, /* isBatch= */ false><<<grid, threads, 0, stream>>>( detail::backward::gru_resetGrad<T>(), value.gateValue, grad.gateGrad, value.prevOutValue, grad.prevOutGrad, grad.resetOutputGrad, frameSize, batchSize, active_gate); } else { detail::KeGruBackwardResetGrad< detail::backward::gru_resetGrad<T>, /* isBatch= */ true><<<grid, threads, 0, stream>>>( detail::backward::gru_resetGrad<T>(), value.gateValue, grad.gateGrad, value.prevOutValue, grad.prevOutGrad, grad.resetOutputGrad, frameSize, batchSize, active_gate); } if (grad.prevOutGrad && value.prevOutValue) { math::gemm<platform::GPUPlace, T>( context, false, true, batchSize, frameSize, frameSize * 2, 1, grad.gateGrad, frameSize * 3, value.gateWeight, frameSize * 2, 1, grad.prevOutGrad, frameSize); if (grad.gateWeightGrad) { math::gemm<platform::GPUPlace, T>( context, true, false, frameSize, frameSize * 2, batchSize, 1, value.prevOutValue, frameSize, grad.gateGrad, frameSize * 3, 1, grad.gateWeightGrad, frameSize * 2); } } } }; template struct GRUUnitFunctor<platform::GPUPlace, float>; template struct GRUUnitFunctor<platform::GPUPlace, double>; template struct GRUUnitGradFunctor<platform::GPUPlace, float>; template struct GRUUnitGradFunctor<platform::GPUPlace, double>; } // namespace math } // namespace operators } // namespace paddle
d62e910c34d61e5570b79f3dc8697c8fe2e9b89c.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2013 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "rgb_to_yuv_convert_layer_tester_cuda.h" #include <hip/hip_runtime.h> #include "util_cuda.h" #include "../rgb_to_yuv_convert_layer.h" #include "../nn_types.h" #define w_r 0.299F #define w_b 0.114F #define w_g (1.0F - w_r - w_b) #define u_max 0.436F #define v_max 0.615F #define u_mult (u_max / (1.0F - w_b)) #define v_mult (v_max / (1.0F - w_r)) __global__ void rgb_to_yuv_convert_kernel( float * __restrict input, const int * __restrict color_feature_map_config_list, int feature_map_count, int elem_count_per_feature_map, int color_feature_map_config_count, int entry_count) { int elem_id = blockDim.x * blockIdx.x + threadIdx.x; int color_feature_map_config_config_id = blockDim.y * blockIdx.y + threadIdx.y; int entry_id = blockDim.z * blockIdx.z + threadIdx.z; if ((elem_id < elem_count_per_feature_map) && (color_feature_map_config_config_id < color_feature_map_config_count) && (entry_id < entry_count)) { int color_feature_map_config_id_offset = color_feature_map_config_config_id * 3; int red_and_y_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset]; int green_and_u_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 1]; int blue_and_v_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 2]; int base_offset = (entry_id * elem_count_per_feature_map * feature_map_count) + elem_id; int red_and_y_offset = red_and_y_feature_map_id * elem_count_per_feature_map + base_offset; int green_and_u_offset = green_and_u_feature_map_id * elem_count_per_feature_map + base_offset; int blue_and_v_offset = blue_and_v_feature_map_id * elem_count_per_feature_map + base_offset; float red = input[red_and_y_offset]; float green = input[green_and_u_offset]; float blue = input[blue_and_v_offset]; float y = w_r * red + w_g * green + w_b * blue; float u = u_mult * (blue - y); float v = v_mult * (red - y); input[red_and_y_offset] = y; input[green_and_u_offset] = u; input[blue_and_v_offset] = v; } } namespace nnforge { namespace cuda { rgb_to_yuv_convert_layer_tester_cuda::rgb_to_yuv_convert_layer_tester_cuda() { } rgb_to_yuv_convert_layer_tester_cuda::~rgb_to_yuv_convert_layer_tester_cuda() { } void rgb_to_yuv_convert_layer_tester_cuda::enqueue_test( hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data, cuda_linear_buffer_device_smart_ptr input_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, unsigned int entry_count) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, input_elem_count_per_feature_map, color_feature_map_config_count, entry_count); hipLaunchKernelGGL(( rgb_to_yuv_convert_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_buffer, *schema_data[0], input_configuration_specific.feature_map_count, input_elem_count_per_feature_map, color_feature_map_config_count, entry_count); } void rgb_to_yuv_convert_layer_tester_cuda::tester_configured() { nnforge_shared_ptr<const rgb_to_yuv_convert_layer> layer_derived = nnforge_dynamic_pointer_cast<const rgb_to_yuv_convert_layer>(layer_schema); color_feature_map_config_count = layer_derived->color_feature_map_config_list.size(); } } }
d62e910c34d61e5570b79f3dc8697c8fe2e9b89c.cu
/* * Copyright 2011-2013 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "rgb_to_yuv_convert_layer_tester_cuda.h" #include <cuda_runtime.h> #include "util_cuda.h" #include "../rgb_to_yuv_convert_layer.h" #include "../nn_types.h" #define w_r 0.299F #define w_b 0.114F #define w_g (1.0F - w_r - w_b) #define u_max 0.436F #define v_max 0.615F #define u_mult (u_max / (1.0F - w_b)) #define v_mult (v_max / (1.0F - w_r)) __global__ void rgb_to_yuv_convert_kernel( float * __restrict input, const int * __restrict color_feature_map_config_list, int feature_map_count, int elem_count_per_feature_map, int color_feature_map_config_count, int entry_count) { int elem_id = blockDim.x * blockIdx.x + threadIdx.x; int color_feature_map_config_config_id = blockDim.y * blockIdx.y + threadIdx.y; int entry_id = blockDim.z * blockIdx.z + threadIdx.z; if ((elem_id < elem_count_per_feature_map) && (color_feature_map_config_config_id < color_feature_map_config_count) && (entry_id < entry_count)) { int color_feature_map_config_id_offset = color_feature_map_config_config_id * 3; int red_and_y_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset]; int green_and_u_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 1]; int blue_and_v_feature_map_id = color_feature_map_config_list[color_feature_map_config_id_offset + 2]; int base_offset = (entry_id * elem_count_per_feature_map * feature_map_count) + elem_id; int red_and_y_offset = red_and_y_feature_map_id * elem_count_per_feature_map + base_offset; int green_and_u_offset = green_and_u_feature_map_id * elem_count_per_feature_map + base_offset; int blue_and_v_offset = blue_and_v_feature_map_id * elem_count_per_feature_map + base_offset; float red = input[red_and_y_offset]; float green = input[green_and_u_offset]; float blue = input[blue_and_v_offset]; float y = w_r * red + w_g * green + w_b * blue; float u = u_mult * (blue - y); float v = v_mult * (red - y); input[red_and_y_offset] = y; input[green_and_u_offset] = u; input[blue_and_v_offset] = v; } } namespace nnforge { namespace cuda { rgb_to_yuv_convert_layer_tester_cuda::rgb_to_yuv_convert_layer_tester_cuda() { } rgb_to_yuv_convert_layer_tester_cuda::~rgb_to_yuv_convert_layer_tester_cuda() { } void rgb_to_yuv_convert_layer_tester_cuda::enqueue_test( cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data, cuda_linear_buffer_device_smart_ptr input_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, unsigned int entry_count) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, input_elem_count_per_feature_map, color_feature_map_config_count, entry_count); rgb_to_yuv_convert_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_buffer, *schema_data[0], input_configuration_specific.feature_map_count, input_elem_count_per_feature_map, color_feature_map_config_count, entry_count); } void rgb_to_yuv_convert_layer_tester_cuda::tester_configured() { nnforge_shared_ptr<const rgb_to_yuv_convert_layer> layer_derived = nnforge_dynamic_pointer_cast<const rgb_to_yuv_convert_layer>(layer_schema); color_feature_map_config_count = layer_derived->color_feature_map_config_list.size(); } } }
0c88261708ec15cbbbc8d712bb0a90f022676e3e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "cuda_helpers.h" #include <rocblas.h> //added by kk 04/26/2019 for hipblasDgemm handle //#define DEBUGPRINT 0 __global__ void compute_entropy_gpu_kernel(double *tlag, double *pr, double *vtrans,int ntot, int irho, double ntol , double rgam, double gmaref,int ltot ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ double rho= fmax(vtrans[ltot*(irho-1)+id],ntol); tlag[id]=rgam*rho*log(pr[id]/(pow(rho,gmaref) )); } } extern "C" void compute_entropy_gpu_wrapper_(int *glbblockSize1,double *d_tlag, double *d_pr, double *d_vtrans,int *ntot, int *irho, double *ntol , double *rgam, double *gmaref, int *ltot){ #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code1 = hipPeekAtLastError(); // if (code1 != hipSuccess){ printf("CUDA: Start compute_entropy_gpu_wrapper cuda status: %s\n",hipGetErrorString(code1)); printf("CUDA: Start compute_entropy_gpu_wrapper values ntot = %d, irho = %d, ntol = %lf, rgam = %lf, gmaref = %lf \n",ntot[0],irho[0],ntol[0],rgam[0],gmaref[0] ); #endif //} int blockSize = glbblockSize1[0], gridSize; gridSize = (int)ceil((float)ntot[0]/blockSize); hipLaunchKernelGGL(( compute_entropy_gpu_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_tlag,d_pr,d_vtrans,ntot[0],irho[0],ntol[0],rgam[0],gmaref[0],ltot[0]); #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code2 = hipPeekAtLastError(); //if (code2 != hipSuccess){ printf("CUDA: End compute_engropy_wrapper cuda status: %s\n",hipGetErrorString(code1)); #endif //} } __global__ void entropy_residual_flux_gpu_kernel(double *tlag, double *res2,int ntot, double rdt, int stage, int lorder, int ltot, double *totalh, int lxyzdlelt, double *vx, double *vy, double *vz, int if3d ){//lxyzd -> lxyzdlelt by Kk 03/16 int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ /*for performance, put *rdt to one equation by Kk 04/11 if(stage==1){ res2[id]=tlag[id]-tlag[ltot*lorder+id] ; } else{ res2[id]=tlag[id]-tlag[ltot+id] ; } res2[id] = res2[id]*rdt;*/ if(stage==1){ res2[id]=(tlag[id]-tlag[ltot*lorder+id])*rdt; } else{ res2[id]=(tlag[id]-tlag[ltot+id])*rdt; } // evaluate_entropy_flux(e) totalh[id]= vx[id]*tlag[id]; //totalh[lxyzd+id] = vy[id]*tlag[id]; //if(if3d){totalh[lxyzd*2+id] = vz[id]*tlag[id];} totalh[lxyzdlelt+id] = vy[id]*tlag[id]; //lxyzd -> lxyzdlelt by Kk 03/16 if(if3d){totalh[lxyzdlelt*2+id] = vz[id]*tlag[id];} //flux_div_mini(e) } } __global__ void flux_div_mini_gpu_kernel1(double *tlag, double *res2,int ntot, double rdt, int stage, int lorder, int ltot, double *totalh, int lxyzd, double *ur1,double *us1, double *ut1, double *ur2, double *us2, double *ut2, double *ur3, double *us3, double *ut3,double *ud, int ldd, double *jacmi, double *rxm1, double *sxm1, double *txm1, double *rym1, double *sym1, double *tym1,double *rzm1, double *szm1, double *tzm1, int if3d ){ int id = blockIdx.x*blockDim.x+threadIdx.x; int i= id % ldd; if(id<ntot){ //something is wrong because ur us ut has only [i]. I think it should be [id] because I added *lelt later. Check again. adeesha // ur us ut [i] -> ur us ut[id] by Kk 03/22 ud[id] = jacmi[id] *( rxm1[id]*ur1[id]+ sxm1[id]*us1[id]+txm1[id]*ut1[id]); ud[id] = ud[id]+ jacmi[id] *( rym1[id]*ur2[id]+ sym1[id]*us2[id]+txm1[id]*ut2[id]); ud[id] = ud[id] + jacmi[id] *( rzm1[id]*ur3[id]+ szm1[id]*us3[id]+tzm1[id]*ut3[id]); //added by Kk 04/11 for performance res2[id] = res2[id] + ud[id]; } } __global__ void flux_div_mini_gpu_kernel2(double *tlag, double *res2,int ntot, double rdt, int stage, int lorder, int ltot, double *totalh, int lxyzd, double *ur1,double *us1, double *ut1, double *ur2, double *us2, double *ut2, double *ur3, double *us3, double *ut3,double *ud, int ldd, double *jacmi, double *rxm1, double *sxm1, double *txm1, double *rym1, double *sym1, double *tym1,double *rzm1, double *szm1, double *tzm1, int if3d ){ int id = blockIdx.x*blockDim.x+threadIdx.x; int i= id % ldd; if(id<ntot){ //something is wrong because ur us ut has only [i]. I think it should be [id] because I added *lelt later. Check again. adeesha // ur us ut [i] -> ur us ut[id] by Kk 03/22 ud[id] = jacmi[id] *(rxm1[id]*ur1[id]+ sxm1[id]*us1[id]); ud[id] = ud[id]+ jacmi[id]*(rym1[id]*ur2[id]+ sym1[id]*us2[id]); //added by Kk 04/11 for performance res2[id] = res2[id] + ud[id]; } } //mxm multiplication __global__ void mxm1(double *a, int n1, double *b, int n2, double *c, int n3, int nelt, int aSize, int bSize, int cSize, int extraEq){ //calculate c(n1,n3) = a(n1,n2) X b(n2,n3) in c //in fortran the original calculation was // c(n3,n1) = b(n3,n2) X a(n2,n1) // a,b,cSize are single element size //extraEq, in case of a matrix has equation as an index int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<nelt*n1*n3){ int e = id/(n1*n3); int rc = id%(n1*n3); int i = rc/n3; int j = rc%n3; int cid = e*cSize + rc; int aid = e*aSize + extraEq + i*n2; int bid = e*bSize + j; c[cid] = 0; for(int k = 0; k<n2; k++) c[cid]+=a[aid+k]*b[bid+k*n3]; } } extern "C" void entropy_residual_gpu_wrapper_(int *glbblockSize1,double *d_tlag, double *d_res2,int *ntot, double *rdt, int *stage, int *lorder,int *ltot, int *lxd, int *lyd, int *lzd, double *d_vx, double *d_vy, double *d_vz, int *lx1, int *ly1, int *lz1, double *d_jacmi, double *d_rxm1, double *d_sxm1, double *d_txm1, double *d_rym1, double *d_sym1, double *d_tym1,double *d_rzm1, double *d_szm1, double *d_tzm1,int *if3d,int *nelt, double *d_dxm1, double *d_dxtm1, int *lelt, double *d_totalh){//added parameter lelt by Kk 03/16; added d_totalh to parameter to replace d_totalh_temp by Kk04/11 #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code1 = hipPeekAtLastError(); // if (code1 != hipSuccess){ printf("CUDA: Start entropy_residual_gpu_wrapper cuda status: %s\n",hipGetErrorString(code1)); printf("CUDA: Start entropy_residual_gpu_wrapper values rdt = %lf, stage = %d, lorder= %d,ltot = %d,lxd = %d, lyd = %d, lzd = %d, lx1 = %d,ly1 = %d,lz1 = %d,if3d = %d,nelt = %d \n",rdt[0], stage[0],lorder[0],ltot[0],lxd[0], lyd[0],lzd[0],lx1[0],ly1[0],lz1[0],if3d[0],nelt[0]); #endif //} //double *d_totalh_temp; // Anyway d_totalh seems not needed. check with Dr.Tania. adeesha; use d_totalh to replace d_totalh_temp by Kk04/11 double *d_ur1; double *d_us1; double *d_ut1; double *d_ur2; double *d_us2; double *d_ut2; double *d_ur3; double *d_us3; double *d_ut3; double *d_ud; int lxyzd = lxd[0]*lyd[0]*lzd[0]; int lxyzdlelt = lxd[0]*lyd[0]*lzd[0]*lelt[0]; int ldd = lx1[0]*ly1[0]*lz1[0]; //hipMalloc((void**)&d_totalh_temp,3*lxyzd *nelt[0]* sizeof(double)); //hipMalloc((void**)&d_totalh_temp,3*lxyzd *lelt[0]* sizeof(double));//note here changed to lelt by Kk, check if correct; use d_totalh to replace d_totalh_temp by Kk04/11 hipMalloc((void**)&d_ur1,ldd * nelt[0]*sizeof(double)); //nelt[0] added later. need to double check. hipMalloc((void**)&d_us1,ldd * nelt[0]*sizeof(double)); hipMalloc((void**)&d_ut1,ldd * nelt[0]*sizeof(double)); hipMalloc((void**)&d_ur2,ldd * nelt[0]*sizeof(double)); hipMalloc((void**)&d_us2,ldd * nelt[0]*sizeof(double)); hipMalloc((void**)&d_ut2,ldd * nelt[0]*sizeof(double)); hipMalloc((void**)&d_ur3,ldd * nelt[0]*sizeof(double)); hipMalloc((void**)&d_us3,ldd * nelt[0]*sizeof(double)); hipMalloc((void**)&d_ut3,ldd * nelt[0]*sizeof(double)); hipMalloc((void**)&d_ud,nelt[0]*ldd * sizeof(double)); //hipMemset(d_totalh_temp, 0.0, 3*lxyzd*lelt[0]*sizeof(double)); //nelt[0] -> lelt[0] by Kk 03/22; use d_totalh to replace d_totalh_temp by Kk04/11 /* comment here for performance, duplicate with below, by Kk04/11 hipMemset(d_ur1, 0.0, ldd*nelt[0]*sizeof(double)); hipMemset(d_us1, 0.0, ldd*nelt[0]*sizeof(double)); hipMemset(d_ut1, 0.0, ldd*nelt[0]*sizeof(double)); hipMemset(d_ur2, 0.0, ldd*nelt[0]*sizeof(double)); hipMemset(d_us2, 0.0, ldd*nelt[0]*sizeof(double)); hipMemset(d_ut2, 0.0, ldd*nelt[0]*sizeof(double)); hipMemset(d_ur3, 0.0, ldd*nelt[0]*sizeof(double)); hipMemset(d_us3, 0.0, ldd*nelt[0]*sizeof(double)); hipMemset(d_ut3, 0.0, ldd*nelt[0]*sizeof(double)); hipMemset(d_ud, 0.0, ldd*nelt[0]*sizeof(double));*/ int blockSize = glbblockSize1[0], gridSize; gridSize = (int)ceil((float)ntot[0]/blockSize); //ntot[0] = lxyz * nelt //create handle for gpu_local_grad3 kk 04/18 hipblasHandle_t handle; hipblasCreate(&handle); //lxyzd -> lxyzdlelt by Kk 03/16 //entropy_residual_flux_gpu_kernel<<<gridSize, blockSize>>>(d_tlag,d_res2,ntot[0],rdt[0],stage[0],lorder[0], ltot[0], d_totalh_temp, lxyzd, d_vx, d_vy, d_vz, if3d[0]); hipLaunchKernelGGL(( entropy_residual_flux_gpu_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_tlag,d_res2,ntot[0],rdt[0],stage[0],lorder[0], ltot[0], d_totalh, lxyzdlelt, d_vx, d_vy, d_vz, if3d[0]);//use d_totalh to replace d_totalh_temp by Kk04/11 #ifdef DEBUGPRINT hipDeviceSynchronize(); code1 = hipPeekAtLastError(); printf("CUDA: entropy_residual_gpu_wrapper after kernel 1cuda status: %s\n",hipGetErrorString(code1)); #endif //flux_div_mini(e) hipMemset(d_ur1, 0.0, ldd*nelt[0]*sizeof(double)); hipMemset(d_us1, 0.0, ldd*nelt[0]*sizeof(double)); hipMemset(d_ut1, 0.0, ldd*nelt[0]*sizeof(double)); hipMemset(d_ur2, 0.0, ldd*nelt[0]*sizeof(double)); hipMemset(d_us2, 0.0, ldd*nelt[0]*sizeof(double)); hipMemset(d_ut2, 0.0, ldd*nelt[0]*sizeof(double)); hipMemset(d_ur3, 0.0, ldd*nelt[0]*sizeof(double)); hipMemset(d_us3, 0.0, ldd*nelt[0]*sizeof(double)); hipMemset(d_ut3, 0.0, ldd*nelt[0]*sizeof(double)); hipMemset(d_ud, 0.0, ldd*nelt[0]*sizeof(double)); if(if3d[0]){ gpu_local_grad3(handle, d_ur1,d_us1,d_ut1,d_totalh,lx1[0],d_dxm1,d_dxtm1,nelt[0]);//use d_totalh to replace d_totalh_temp by Kk04/11 #ifdef DEBUGPRINT hipDeviceSynchronize(); code1 = hipPeekAtLastError(); printf("CUDA: entropy_residual_gpu_wrapper after 1st gpu_local_grad3 cuda status: %s\n",hipGetErrorString(code1)); #endif gpu_local_grad3(handle, d_ur2,d_us2,d_ut2,d_totalh+lxyzdlelt,lx1[0],d_dxm1,d_dxtm1,nelt[0]);//use d_totalh to replace d_totalh_temp by Kk04/11 #ifdef DEBUGPRINT hipDeviceSynchronize(); code1 = hipPeekAtLastError(); printf("CUDA: entropy_residual_gpu_wrapper after 2st gpu_local_grad3 cuda status: %s\n",hipGetErrorString(code1)); #endif gpu_local_grad3(handle, d_ur3,d_us3,d_ut3,d_totalh+lxyzdlelt*2,lx1[0],d_dxm1,d_dxtm1,nelt[0]);//use d_totalh to replace d_totalh_temp by Kk04/11 #ifdef DEBUGPRINT hipDeviceSynchronize(); code1 = hipPeekAtLastError(); printf("CUDA: entropy_residual_gpu_wrapper after 3st gpu_local_grad3 cuda status: %s\n",hipGetErrorString(code1)); #endif hipLaunchKernelGGL(( flux_div_mini_gpu_kernel1), dim3(gridSize), dim3(blockSize), 0, 0, d_tlag,d_res2,ntot[0],rdt[0],stage[0],lorder[0], ltot[0], d_totalh, lxyzd, d_ur1,d_us1, d_ut1, d_ur2,d_us2, d_ut2, d_ur3, d_us3, d_ut3,d_ud, ldd, d_jacmi, d_rxm1, d_sxm1, d_txm1, d_rym1, d_sym1, d_tym1, d_rzm1, d_szm1, d_tzm1,if3d[0]); //use d_totalh to replace d_totalh_temp by Kk04/11 #ifdef DEBUGPRINT hipDeviceSynchronize(); code1 = hipPeekAtLastError(); printf("CUDA: entropy_residual_gpu_wrapper after flux_div_mini_gpu_kernel1 cuda status: %s\n",hipGetErrorString(code1)); #endif } else{ gpu_local_grad2(d_ur1,d_us1,d_totalh,lx1[0],d_dxm1,d_dxtm1,nelt[0]);//use d_totalh to replace d_totalh_temp by Kk04/11 #ifdef DEBUGPRINT hipDeviceSynchronize(); code1 = hipPeekAtLastError(); printf("CUDA: entropy_residual_gpu_wrapper after 1st gpu_local_grad2 cuda status: %s\n",hipGetErrorString(code1)); #endif gpu_local_grad2(d_ur2,d_us2,d_totalh+lxyzdlelt,lx1[0],d_dxm1,d_dxtm1,nelt[0]); //use d_totalh to replace d_totalh_temp by Kk04/11 #ifdef DEBUGPRINT hipDeviceSynchronize(); code1 = hipPeekAtLastError(); printf("CUDA: entropy_residual_gpu_wrapper after 2st gpu_local_grad2 cuda status: %s\n",hipGetErrorString(code1)); #endif hipLaunchKernelGGL(( flux_div_mini_gpu_kernel2), dim3(gridSize), dim3(blockSize), 0, 0, d_tlag,d_res2,ntot[0],rdt[0],stage[0],lorder[0], ltot[0], d_totalh, lxyzd, d_ur1,d_us1, d_ut1, d_ur2,d_us2, d_ut2, d_ur3, d_us3, d_ut3,d_ud, ldd, d_jacmi, d_rxm1, d_sxm1, d_txm1, d_rym1, d_sym1, d_tym1, d_rzm1, d_szm1, d_tzm1,if3d[0]); //use d_totalh to replace d_totalh_temp by Kk04/11 #ifdef DEBUGPRINT hipDeviceSynchronize(); code1 = hipPeekAtLastError(); printf("CUDA: entropy_residual_gpu_wrapper after flux_div_mini_gpu_kernel2 cuda status: %s\n",hipGetErrorString(code1)); #endif } /*for performance, comment out nekadd2, put add in the flux_div_mini_gpu_kernel1 or flux_div_mini_gpu_kernel2 gpu_nekadd2(glbblockSize1[0], d_res2,d_ud, ntot[0]);*/ #ifdef DEBUGPRINT hipDeviceSynchronize(); code1 = hipPeekAtLastError(); printf("CUDA: entropy_residual_gpu_wrapper after nekadd2 cuda status: %s\n",hipGetErrorString(code1)); #endif //hipFree(d_totalh_temp); //use d_totalh to replace d_totalh_temp by Kk04/11 hipFree(d_ur1); hipFree(d_ur2); hipFree(d_ur3); hipFree(d_us1); hipFree(d_us2); hipFree(d_us3); hipFree(d_ut1); hipFree(d_ut2); hipFree(d_ut3); hipFree(d_ud); //destroy handle hipblasDestroy(handle); //added by Kk 04/26 #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code2 = hipPeekAtLastError(); printf("CUDA: End entropy residual_gpu_wrapper cuda status: %s\n",hipGetErrorString(code2)); #endif } __global__ void wavevisc_gpu_kernel(double *t,double *csound, double *vx, double *vy, double *vz, int ntot, double *wavespeed,int lxyz, int lx1, int ly1, int lz1, double c_max,int ltot, double *meshh ){ //deleted parameter vtranstemp since no use int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ wavespeed[id]= csound [id] +sqrt(vx[id]*vx[id]+vy[id]*vy[id]+vz[id]*vz[id] ) ;//sqrtf -> sqrt, corrected by Kk 02/05 // find max of wavespeed using reduction __syncthreads(); unsigned int i = lxyz/2; int len = lxyz; int e= id/(lxyz); int startofcurrentelement = e*lxyz; while(i != 0){ if(id-startofcurrentelement <= i){ wavespeed[id] = fmax(fabs(wavespeed[id]),fabs(wavespeed[startofcurrentelement + (id+i)%len]));//fmaxf->fmax, corrected by Kk 02/05, fabs(double) } __syncthreads(); //added by Kk 02/05/2019 since the latter one may not correct when len is odd len = (len+1)/2; i = len/2; /* commented by Kk 02/05/2019 len = i; i /= 2;*/ } double maxeig = wavespeed[e*lxyz]; /*if(id%lxyz == 0){ printf("maxeig in wavevisc %d %.15lf %.15lf \n", id/lxyz, maxeig, c_max); }*/ // find max of vtrans using reduction. But never used? check with Dr.Tania //i = lxyz/2; //int e= id/(lx1*ly1*lz1); //int startofcurrentelement = id-e; //while(i != 0){ // if(id-startofcurrentelement < i){ // vtranstmp[id] = fmaxf(vtranstmp[id], vtranstmp[id + i]); // } // __syncthreads(); // i /= 2; //} //int rhomax = vtranstmp[id-e]; t[2*ltot+id] = c_max*maxeig*meshh[e]; // if(id<10){ // printf("$$$ print from cuda maxeig = %lf t[2*ltot+id]= %lf meshh[e]=%lf \n",maxeig,t[2*ltot+id],meshh[e]); // } } } extern "C" void wavevisc_gpu_wrapper_(int *glbblockSize1,double *d_t, double *d_csound,double *d_vx, double *d_vy, double *d_vz, int *nelt, int *lelt, int *lx1, int *ly1, int *lz1, double *d_vtrans, double *c_max, double *d_meshh, int *irho ){ #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code1 = hipPeekAtLastError(); printf("CUDA: Start wavevisc_gpu_wrapper cuda status: %s\n",hipGetErrorString(code1)); printf("CUDA: Start compute_entropy_gpu_wrapper values nelt= %d,lelt= %d,lx1= %d,ly1= %d, lz1= %d,c_max= %lf,irho= %d \n",nelt[0],lelt[0],lx1[0],ly1[0],lz1[0],c_max[0],irho[0]); #endif int ntot = nelt[0]*lx1[0]*ly1[0]*lz1[0]; int lxyz = lx1[0]*ly1[0]*lz1[0]; int ltot = lelt[0]*lxyz; double *d_wavespeed; hipMalloc((void**)&d_wavespeed,nelt[0]*lxyz* sizeof(double)); /* comment vtranstemp here for performance by Kk 04/11, since no use of it double *d_vtranstemp; hipMalloc((void**)&d_vtranstemp,nelt[0]*lxyz* sizeof(double)); hipMemcpy(d_vtranstemp, &d_vtrans[(irho[0]-1)*lelt[0]*lxyz], nelt[0]*lxyz* sizeof(double), hipMemcpyDeviceToDevice);*/ int blockSize = glbblockSize1[0], gridSize; gridSize = (int)ceil((float)ntot/blockSize); hipLaunchKernelGGL(( wavevisc_gpu_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_t,d_csound, d_vx, d_vy, d_vz,ntot,d_wavespeed, lxyz,lx1[0],ly1[0],lz1[0],c_max[0], ltot, d_meshh);//deleted parameter d_vtranstemp since no use hipFree(d_wavespeed); //hipFree(d_vtranstemp); //comment by Kk 04/11 #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code2 = hipPeekAtLastError(); printf("CUDA: End Wavevisc_gpu_wrapper cuda status: %s\n",hipGetErrorString(code2)); #endif } __global__ void max_to_trilin_gpu_kernel(double *t,int ntot,int lxyz, int lx1, int ly1, int lz1,int ltot, int lxy, double *xm1, double *ym1, double *zm1, int if3d ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ int e= id/(lxyz); double p000 = t[2*ltot+e*lxyz]; double p100 = t[2*ltot+e*lxyz+(lx1-1)]; double p010 = t[2*ltot+e*lxyz+(ly1-1)*lx1]; double p110 = t[2*ltot+e*lxyz+(ly1-1)*lx1+(lx1-1)]; double p001 = t[2*ltot+e*lxyz+(lz1-1)*lxy]; double p101 = t[2*ltot+e*lxyz+(lz1-1)*lxy+(lx1-1)]; double p011 = t[2*ltot+e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1]; double p111 = t[2*ltot+e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1+(lx1-1)]; double c1=p100-p000; double c2=p010-p000; double c3=p001-p000; double c4=p110-p010-p100+p000; double c5=p011-p001-p010+p000; double c6=p101-p001-p100+p000; double c7=p111-p011-p101-p110+p100+p001+p010-p000; double rdx=1.0/(xm1[e*lxyz+(lx1-1)]-xm1[e*lxyz]); // cubes only!!! double rdy=1.0/(ym1[e*lxyz+(ly1-1)*lx1]-ym1[e*lxyz]); double rdz=0.0; if(if3d){ rdz=1.0/(zm1[e*lxyz+(lz1-1)*lxy]-zm1[e*lxyz]); } int firstlx = id%lxyz; double deltax=rdx*(xm1[id]-xm1[e*lxyz]) ;//! cubes only!!! double deltay=rdy*(ym1[id]-ym1[e*lxyz]); double deltaz=0.0; if (if3d){ deltaz=rdz*(zm1[id]-zm1[e*lxyz]);} t[2*ltot+id] =p000+c1*deltax+c2*deltay+c3*deltaz+ c4*deltax*deltay+c5*deltay*deltaz+ c6*deltaz*deltax+c7*deltay*deltaz*deltax; /*if(id ==ntot-1){ printf("debug max_to_trilin: %.30lf, %.30lf, %.30lf, %.30lf, %.30lf, %.30lf, %.30lf, %.30lf, %.30lf, %.30lf, %.30lf, %.30lf, %.30lf, %.30lf, %d\n", p000, c1, c2, c3, c4, c5, c6, c7, rdx, rdy, deltax, deltay, deltaz, t[2*ltot+id], if3d); }*/ } } extern "C" void max_to_trilin_gpu_wrapper_(int *glbblockSize1,double *d_t, int *nelt, int *lelt, int *lx1, int *ly1, int *lz1, double *d_xm1, double *d_ym1, double *d_zm1, int *if3d ){ #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code1 = hipPeekAtLastError(); // if (code1 != hipSuccess){ printf("CUDA: Start max_to_trilin_gpu_wrapper cuda status: %s\n",hipGetErrorString(code1)); printf("CUDA: Start compute_entropy_gpu_wrapper values nelt=%d,lelt=%d,lx1=%d,ly1=%d,lz1=%d,if3d=%d \n",nelt[0],lelt[0],lx1[0],ly1[0],lz1[0],if3d[0]); #endif //} int ntot = nelt[0]*lx1[0]*ly1[0]*lz1[0]; int lxyz = lx1[0]*ly1[0]*lz1[0]; int ltot = lelt[0]*lxyz; int blockSize = glbblockSize1[0], gridSize; gridSize = (int)ceil((float)ntot/blockSize); hipLaunchKernelGGL(( max_to_trilin_gpu_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_t,ntot,lxyz,lx1[0],ly1[0],lz1[0], ltot,lx1[0]*ly1[0], d_xm1, d_ym1, d_zm1, if3d[0]); #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code2 = hipPeekAtLastError(); //if (code2 != hipSuccess){ printf("CUDA: End max_to_trilin_gpu_wrapper cuda status: %s\n",hipGetErrorString(code2)); #endif //} } __global__ void resvisc_gpu_kernel1(double *res2,int ntot,int lxyz, int lx1, int ly1, int lz1,int ltot, int lxy,double *meshh ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ int e= id/(lx1*ly1*lz1); res2[id] = res2[id]*meshh[e]*meshh[e]; } } extern "C" void resvisc_gpu_wrapper1_(int *glbblockSize1,double *d_res2, int *nelt, int *lelt, int *lx1, int *ly1, int *lz1, double *d_meshh){ #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code1 = hipPeekAtLastError(); //if (code1 != hipSuccess){ printf("CUDA: Start resvisc_gpu_wrapper cuda status: %s\n",hipGetErrorString(code1)); printf("CUDA: Start compute_entropy_gpu_wrapper values nelt= %d,lelt= %d,lx1= %d,ly1= %d,lz1 = %d,\n", nelt[0],lelt[0],lx1[0],ly1[0],lz1[0]); #endif //} int ntot = nelt[0]*lx1[0]*ly1[0]*lz1[0]; int lxyz = lx1[0]*ly1[0]*lz1[0]; int ltot = lelt[0]*lxyz; int blockSize = glbblockSize1[0], gridSize; gridSize = (int)ceil((float)ntot/blockSize); hipLaunchKernelGGL(( resvisc_gpu_kernel1), dim3(gridSize), dim3(blockSize), 0, 0, d_res2,ntot,lxyz,lx1[0],ly1[0],lz1[0], ltot,lx1[0]*ly1[0], d_meshh); #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code2 = hipPeekAtLastError(); //if (code2 != hipSuccess){ printf("CUDA: End resvisc_gpu_wrapper cuda status: %s\n",hipGetErrorString(code2)); #endif //} } __global__ void resvisc_gpu_kernel2(double *res2,int ntot,int lxyz, int lx1, int ly1, int lz1,int ltot, int lxy,double c_sub_e, double maxdiff ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ int e= id/(lx1*ly1*lz1); res2[id] = fabs(res2[id]); res2[id] = res2[id]*c_sub_e; // cmult if(maxdiff !=0){ double consta = 1/maxdiff; res2[id] = res2[id]*consta; } } } extern "C" void resvisc_gpu_wrapper2_(int *glbblockSize1,double *d_res2, int *nelt, int *lelt, int *lx1, int *ly1, int *lz1, double *c_sub_e, double *maxdiff){ #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code1 = hipPeekAtLastError(); // if (code1 != hipSuccess){ printf("CUDA: Start resvisc_gpu_wrapper2 cuda status: %s\n",hipGetErrorString(code1)); printf("CUDA: Start compute_entropy_gpu_wrapper values nelt=%d,lelt=%d,lx1=%d,ly1=%d,lz1=%d,c_sub_e=%lf,maxdiff= %.20lf, \n",nelt[0],lelt[0],lx1[0],ly1[0],lz1[0],c_sub_e[0],maxdiff[0]); #endif // } int ntot = nelt[0]*lx1[0]*ly1[0]*lz1[0]; int lxyz = lx1[0]*ly1[0]*lz1[0]; int ltot = lelt[0]*lxyz; int blockSize =glbblockSize1[0], gridSize; gridSize = (int)ceil((float)ntot/blockSize); hipLaunchKernelGGL(( resvisc_gpu_kernel2), dim3(gridSize), dim3(blockSize), 0, 0, d_res2,ntot,lxyz,lx1[0],ly1[0],lz1[0], ltot,lx1[0]*ly1[0], c_sub_e[0], maxdiff[0]); #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code2 = hipPeekAtLastError(); // if (code2 != hipSuccess){ printf("CUDA: End resvisc_gpu_wrapper2 cuda status: %s\n",hipGetErrorString(code2)); #endif // } } __global__ void evnsmooth_gpu_kernel(double *res2, double *t, int ntot,int lxyz, int lx1, int ly1, int lz1,int ltot, int lxy, int kstart, int kend, int jstart, int jend, int istart, int iend,int ldim , double rldim, double *rtmp, int if3d ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ int e= id/(lx1*ly1*lz1); if(t[2*ltot+id] <= res2[id]){ res2[id] = t[2*ltot+id];// wavevisc and resvisc are really res2 and t. but the dimensions are different. As I understand this will start from 0 and works well. Need to check with Dr.Tania . adeesha } //global syncthread is needed here. check with Dr.Tania. adeesha. rtmp[id] = res2[id]; int ix= id % lx1; int iy= (id/lx1)%ly1; int iz = (id / (lx1*ly1))%lz1; if((kstart<=iz && iz<=kend)&& (jstart<= iy && iy<= jend) && (istart<=ix && ix<=iend)){ int izm,izp; if(if3d){ int km1=iz-1; int kp1=iz+1; int izm=km1; if (km1 < 0){ izm=kp1;} // Guermond symmetry izp=kp1; if (kp1 > (lz1-1)){ izp=km1;} // Guermond symmetry } else{ izm=iz; izp=iz; } int jm1=iy-1; int jp1=iy+1; int iym=jm1; if (jm1 < 0){ iym=jp1;}// Guermond symmetry int iyp=jp1; if (jp1 > (ly1-1)){ iyp=jm1;} // Guermond symmetry int im1=ix-1; int ip1=ix+1; int ixm=im1; if (im1 < 0){ ixm=ip1;} // Guermond symmetry int ixp=ip1; if (ip1 > (lx1-1)) {ixp=im1 ;} // Guermond symmetry double x0 = res2[e*lxyz+iz*lxy+iy*lx1+ix]; double x1 = res2[e*lxyz+iz*lxy+iy*lx1+ixm]; double x2 = res2[e*lxyz+iz*lxy+iy*lx1+ixp]; double x3 = res2[e*lxyz+iz*lxy+iym*lx1+ix]; double x4 = res2[e*lxyz+iz*lxy+iyp*lx1+ix]; double x5,x6; if (if3d){ x5 = res2[e*lxyz+izm*lxy+iy*lx1+ixp]; x6 = res2[e*lxyz+izp*lxy+iy*lx1+ixp]; } else { x5=0.0; x6=0.0; } rtmp[id]=0.25*(2.0*ldim*x0+x1+x2+x3+x4+x5+x6)*rldim;// check whether this is same as rtmp [id]. adeesha } res2[id]=rtmp[id]; } } extern "C" void evnsmooth_gpu_wrapper_(int *glbblockSize1,double *d_res2, double *d_t, int *nelt, int *lelt, int *lx1, int *ly1, int *lz1,int *kstart, int *kend, int *jstart, int *jend, int *istart, int *iend, int *ldim , double *rldim, int *if3d ){ #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code1 = hipPeekAtLastError(); // if (code1 != hipSuccess){ printf("CUDA: Start evnsmooth_gpu_wrapper cuda status: %s\n",hipGetErrorString(code1)); printf("CUDA: Start compute_entropy_gpu_wrapper values nelt =%d ,lelt=%d,lx1=%d,ly1=%d,lz1=%d,kstart=%d,kend=%d,jstart=%d,jend=%d,istart=%d,iend=%d,ldim=%d ,rldim=%lf,if3d=%d,\n", nelt[0],lelt[0],lx1[0],ly1[0],lz1[0],kstart[0],kend[0],jstart[0],jend[0],istart[0],iend[0],ldim[0] ,rldim[0],if3d[0]); #endif // } int ntot = nelt[0]*lx1[0]*ly1[0]*lz1[0]; int lxyz = lx1[0]*ly1[0]*lz1[0]; int ltot = lelt[0]*lxyz; double *d_rtmp; hipMalloc((void**)&d_rtmp,nelt[0]*lxyz* sizeof(double)); int blockSize = glbblockSize1[0], gridSize; gridSize = (int)ceil((float)ntot/blockSize); hipLaunchKernelGGL(( evnsmooth_gpu_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_res2,d_t,ntot,lxyz,lx1[0],ly1[0],lz1[0], ltot,lx1[0]*ly1[0],(kstart[0]-1),(kend[0]-1),(jstart[0]-1),(jend[0]-1),(istart[0]-1),(iend[0]-1),ldim[0] ,rldim[0], d_rtmp, if3d[0] ); hipFree(d_rtmp); #ifdef DEBUGPRINT hipDeviceSynchronize(); hipError_t code2 = hipPeekAtLastError(); // if (code2 != hipSuccess){ printf("CUDA: End evnsmooth_gpu_wrapper cuda status: %s\n",hipGetErrorString(code2)); #endif // } }
0c88261708ec15cbbbc8d712bb0a90f022676e3e.cu
#include <stdio.h> #include "cuda_helpers.h" #include <cublas_v2.h> //added by kk 04/26/2019 for cublasDgemm handle //#define DEBUGPRINT 0 __global__ void compute_entropy_gpu_kernel(double *tlag, double *pr, double *vtrans,int ntot, int irho, double ntol , double rgam, double gmaref,int ltot ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ double rho= fmax(vtrans[ltot*(irho-1)+id],ntol); tlag[id]=rgam*rho*log(pr[id]/(pow(rho,gmaref) )); } } extern "C" void compute_entropy_gpu_wrapper_(int *glbblockSize1,double *d_tlag, double *d_pr, double *d_vtrans,int *ntot, int *irho, double *ntol , double *rgam, double *gmaref, int *ltot){ #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code1 = cudaPeekAtLastError(); // if (code1 != cudaSuccess){ printf("CUDA: Start compute_entropy_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1)); printf("CUDA: Start compute_entropy_gpu_wrapper values ntot = %d, irho = %d, ntol = %lf, rgam = %lf, gmaref = %lf \n",ntot[0],irho[0],ntol[0],rgam[0],gmaref[0] ); #endif //} int blockSize = glbblockSize1[0], gridSize; gridSize = (int)ceil((float)ntot[0]/blockSize); compute_entropy_gpu_kernel<<<gridSize, blockSize>>>(d_tlag,d_pr,d_vtrans,ntot[0],irho[0],ntol[0],rgam[0],gmaref[0],ltot[0]); #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code2 = cudaPeekAtLastError(); //if (code2 != cudaSuccess){ printf("CUDA: End compute_engropy_wrapper cuda status: %s\n",cudaGetErrorString(code1)); #endif //} } __global__ void entropy_residual_flux_gpu_kernel(double *tlag, double *res2,int ntot, double rdt, int stage, int lorder, int ltot, double *totalh, int lxyzdlelt, double *vx, double *vy, double *vz, int if3d ){//lxyzd -> lxyzdlelt by Kk 03/16 int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ /*for performance, put *rdt to one equation by Kk 04/11 if(stage==1){ res2[id]=tlag[id]-tlag[ltot*lorder+id] ; } else{ res2[id]=tlag[id]-tlag[ltot+id] ; } res2[id] = res2[id]*rdt;*/ if(stage==1){ res2[id]=(tlag[id]-tlag[ltot*lorder+id])*rdt; } else{ res2[id]=(tlag[id]-tlag[ltot+id])*rdt; } // evaluate_entropy_flux(e) totalh[id]= vx[id]*tlag[id]; //totalh[lxyzd+id] = vy[id]*tlag[id]; //if(if3d){totalh[lxyzd*2+id] = vz[id]*tlag[id];} totalh[lxyzdlelt+id] = vy[id]*tlag[id]; //lxyzd -> lxyzdlelt by Kk 03/16 if(if3d){totalh[lxyzdlelt*2+id] = vz[id]*tlag[id];} //flux_div_mini(e) } } __global__ void flux_div_mini_gpu_kernel1(double *tlag, double *res2,int ntot, double rdt, int stage, int lorder, int ltot, double *totalh, int lxyzd, double *ur1,double *us1, double *ut1, double *ur2, double *us2, double *ut2, double *ur3, double *us3, double *ut3,double *ud, int ldd, double *jacmi, double *rxm1, double *sxm1, double *txm1, double *rym1, double *sym1, double *tym1,double *rzm1, double *szm1, double *tzm1, int if3d ){ int id = blockIdx.x*blockDim.x+threadIdx.x; int i= id % ldd; if(id<ntot){ //something is wrong because ur us ut has only [i]. I think it should be [id] because I added *lelt later. Check again. adeesha // ur us ut [i] -> ur us ut[id] by Kk 03/22 ud[id] = jacmi[id] *( rxm1[id]*ur1[id]+ sxm1[id]*us1[id]+txm1[id]*ut1[id]); ud[id] = ud[id]+ jacmi[id] *( rym1[id]*ur2[id]+ sym1[id]*us2[id]+txm1[id]*ut2[id]); ud[id] = ud[id] + jacmi[id] *( rzm1[id]*ur3[id]+ szm1[id]*us3[id]+tzm1[id]*ut3[id]); //added by Kk 04/11 for performance res2[id] = res2[id] + ud[id]; } } __global__ void flux_div_mini_gpu_kernel2(double *tlag, double *res2,int ntot, double rdt, int stage, int lorder, int ltot, double *totalh, int lxyzd, double *ur1,double *us1, double *ut1, double *ur2, double *us2, double *ut2, double *ur3, double *us3, double *ut3,double *ud, int ldd, double *jacmi, double *rxm1, double *sxm1, double *txm1, double *rym1, double *sym1, double *tym1,double *rzm1, double *szm1, double *tzm1, int if3d ){ int id = blockIdx.x*blockDim.x+threadIdx.x; int i= id % ldd; if(id<ntot){ //something is wrong because ur us ut has only [i]. I think it should be [id] because I added *lelt later. Check again. adeesha // ur us ut [i] -> ur us ut[id] by Kk 03/22 ud[id] = jacmi[id] *(rxm1[id]*ur1[id]+ sxm1[id]*us1[id]); ud[id] = ud[id]+ jacmi[id]*(rym1[id]*ur2[id]+ sym1[id]*us2[id]); //added by Kk 04/11 for performance res2[id] = res2[id] + ud[id]; } } //mxm multiplication __global__ void mxm1(double *a, int n1, double *b, int n2, double *c, int n3, int nelt, int aSize, int bSize, int cSize, int extraEq){ //calculate c(n1,n3) = a(n1,n2) X b(n2,n3) in c //in fortran the original calculation was // c(n3,n1) = b(n3,n2) X a(n2,n1) // a,b,cSize are single element size //extraEq, in case of a matrix has equation as an index int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<nelt*n1*n3){ int e = id/(n1*n3); int rc = id%(n1*n3); int i = rc/n3; int j = rc%n3; int cid = e*cSize + rc; int aid = e*aSize + extraEq + i*n2; int bid = e*bSize + j; c[cid] = 0; for(int k = 0; k<n2; k++) c[cid]+=a[aid+k]*b[bid+k*n3]; } } extern "C" void entropy_residual_gpu_wrapper_(int *glbblockSize1,double *d_tlag, double *d_res2,int *ntot, double *rdt, int *stage, int *lorder,int *ltot, int *lxd, int *lyd, int *lzd, double *d_vx, double *d_vy, double *d_vz, int *lx1, int *ly1, int *lz1, double *d_jacmi, double *d_rxm1, double *d_sxm1, double *d_txm1, double *d_rym1, double *d_sym1, double *d_tym1,double *d_rzm1, double *d_szm1, double *d_tzm1,int *if3d,int *nelt, double *d_dxm1, double *d_dxtm1, int *lelt, double *d_totalh){//added parameter lelt by Kk 03/16; added d_totalh to parameter to replace d_totalh_temp by Kk04/11 #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code1 = cudaPeekAtLastError(); // if (code1 != cudaSuccess){ printf("CUDA: Start entropy_residual_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1)); printf("CUDA: Start entropy_residual_gpu_wrapper values rdt = %lf, stage = %d, lorder= %d,ltot = %d,lxd = %d, lyd = %d, lzd = %d, lx1 = %d,ly1 = %d,lz1 = %d,if3d = %d,nelt = %d \n",rdt[0], stage[0],lorder[0],ltot[0],lxd[0], lyd[0],lzd[0],lx1[0],ly1[0],lz1[0],if3d[0],nelt[0]); #endif //} //double *d_totalh_temp; // Anyway d_totalh seems not needed. check with Dr.Tania. adeesha; use d_totalh to replace d_totalh_temp by Kk04/11 double *d_ur1; double *d_us1; double *d_ut1; double *d_ur2; double *d_us2; double *d_ut2; double *d_ur3; double *d_us3; double *d_ut3; double *d_ud; int lxyzd = lxd[0]*lyd[0]*lzd[0]; int lxyzdlelt = lxd[0]*lyd[0]*lzd[0]*lelt[0]; int ldd = lx1[0]*ly1[0]*lz1[0]; //cudaMalloc((void**)&d_totalh_temp,3*lxyzd *nelt[0]* sizeof(double)); //cudaMalloc((void**)&d_totalh_temp,3*lxyzd *lelt[0]* sizeof(double));//note here changed to lelt by Kk, check if correct; use d_totalh to replace d_totalh_temp by Kk04/11 cudaMalloc((void**)&d_ur1,ldd * nelt[0]*sizeof(double)); //nelt[0] added later. need to double check. cudaMalloc((void**)&d_us1,ldd * nelt[0]*sizeof(double)); cudaMalloc((void**)&d_ut1,ldd * nelt[0]*sizeof(double)); cudaMalloc((void**)&d_ur2,ldd * nelt[0]*sizeof(double)); cudaMalloc((void**)&d_us2,ldd * nelt[0]*sizeof(double)); cudaMalloc((void**)&d_ut2,ldd * nelt[0]*sizeof(double)); cudaMalloc((void**)&d_ur3,ldd * nelt[0]*sizeof(double)); cudaMalloc((void**)&d_us3,ldd * nelt[0]*sizeof(double)); cudaMalloc((void**)&d_ut3,ldd * nelt[0]*sizeof(double)); cudaMalloc((void**)&d_ud,nelt[0]*ldd * sizeof(double)); //cudaMemset(d_totalh_temp, 0.0, 3*lxyzd*lelt[0]*sizeof(double)); //nelt[0] -> lelt[0] by Kk 03/22; use d_totalh to replace d_totalh_temp by Kk04/11 /* comment here for performance, duplicate with below, by Kk04/11 cudaMemset(d_ur1, 0.0, ldd*nelt[0]*sizeof(double)); cudaMemset(d_us1, 0.0, ldd*nelt[0]*sizeof(double)); cudaMemset(d_ut1, 0.0, ldd*nelt[0]*sizeof(double)); cudaMemset(d_ur2, 0.0, ldd*nelt[0]*sizeof(double)); cudaMemset(d_us2, 0.0, ldd*nelt[0]*sizeof(double)); cudaMemset(d_ut2, 0.0, ldd*nelt[0]*sizeof(double)); cudaMemset(d_ur3, 0.0, ldd*nelt[0]*sizeof(double)); cudaMemset(d_us3, 0.0, ldd*nelt[0]*sizeof(double)); cudaMemset(d_ut3, 0.0, ldd*nelt[0]*sizeof(double)); cudaMemset(d_ud, 0.0, ldd*nelt[0]*sizeof(double));*/ int blockSize = glbblockSize1[0], gridSize; gridSize = (int)ceil((float)ntot[0]/blockSize); //ntot[0] = lxyz * nelt //create handle for gpu_local_grad3 kk 04/18 cublasHandle_t handle; cublasCreate(&handle); //lxyzd -> lxyzdlelt by Kk 03/16 //entropy_residual_flux_gpu_kernel<<<gridSize, blockSize>>>(d_tlag,d_res2,ntot[0],rdt[0],stage[0],lorder[0], ltot[0], d_totalh_temp, lxyzd, d_vx, d_vy, d_vz, if3d[0]); entropy_residual_flux_gpu_kernel<<<gridSize, blockSize>>>(d_tlag,d_res2,ntot[0],rdt[0],stage[0],lorder[0], ltot[0], d_totalh, lxyzdlelt, d_vx, d_vy, d_vz, if3d[0]);//use d_totalh to replace d_totalh_temp by Kk04/11 #ifdef DEBUGPRINT cudaDeviceSynchronize(); code1 = cudaPeekAtLastError(); printf("CUDA: entropy_residual_gpu_wrapper after kernel 1cuda status: %s\n",cudaGetErrorString(code1)); #endif //flux_div_mini(e) cudaMemset(d_ur1, 0.0, ldd*nelt[0]*sizeof(double)); cudaMemset(d_us1, 0.0, ldd*nelt[0]*sizeof(double)); cudaMemset(d_ut1, 0.0, ldd*nelt[0]*sizeof(double)); cudaMemset(d_ur2, 0.0, ldd*nelt[0]*sizeof(double)); cudaMemset(d_us2, 0.0, ldd*nelt[0]*sizeof(double)); cudaMemset(d_ut2, 0.0, ldd*nelt[0]*sizeof(double)); cudaMemset(d_ur3, 0.0, ldd*nelt[0]*sizeof(double)); cudaMemset(d_us3, 0.0, ldd*nelt[0]*sizeof(double)); cudaMemset(d_ut3, 0.0, ldd*nelt[0]*sizeof(double)); cudaMemset(d_ud, 0.0, ldd*nelt[0]*sizeof(double)); if(if3d[0]){ gpu_local_grad3(handle, d_ur1,d_us1,d_ut1,d_totalh,lx1[0],d_dxm1,d_dxtm1,nelt[0]);//use d_totalh to replace d_totalh_temp by Kk04/11 #ifdef DEBUGPRINT cudaDeviceSynchronize(); code1 = cudaPeekAtLastError(); printf("CUDA: entropy_residual_gpu_wrapper after 1st gpu_local_grad3 cuda status: %s\n",cudaGetErrorString(code1)); #endif gpu_local_grad3(handle, d_ur2,d_us2,d_ut2,d_totalh+lxyzdlelt,lx1[0],d_dxm1,d_dxtm1,nelt[0]);//use d_totalh to replace d_totalh_temp by Kk04/11 #ifdef DEBUGPRINT cudaDeviceSynchronize(); code1 = cudaPeekAtLastError(); printf("CUDA: entropy_residual_gpu_wrapper after 2st gpu_local_grad3 cuda status: %s\n",cudaGetErrorString(code1)); #endif gpu_local_grad3(handle, d_ur3,d_us3,d_ut3,d_totalh+lxyzdlelt*2,lx1[0],d_dxm1,d_dxtm1,nelt[0]);//use d_totalh to replace d_totalh_temp by Kk04/11 #ifdef DEBUGPRINT cudaDeviceSynchronize(); code1 = cudaPeekAtLastError(); printf("CUDA: entropy_residual_gpu_wrapper after 3st gpu_local_grad3 cuda status: %s\n",cudaGetErrorString(code1)); #endif flux_div_mini_gpu_kernel1<<<gridSize, blockSize>>>(d_tlag,d_res2,ntot[0],rdt[0],stage[0],lorder[0], ltot[0], d_totalh, lxyzd, d_ur1,d_us1, d_ut1, d_ur2,d_us2, d_ut2, d_ur3, d_us3, d_ut3,d_ud, ldd, d_jacmi, d_rxm1, d_sxm1, d_txm1, d_rym1, d_sym1, d_tym1, d_rzm1, d_szm1, d_tzm1,if3d[0]); //use d_totalh to replace d_totalh_temp by Kk04/11 #ifdef DEBUGPRINT cudaDeviceSynchronize(); code1 = cudaPeekAtLastError(); printf("CUDA: entropy_residual_gpu_wrapper after flux_div_mini_gpu_kernel1 cuda status: %s\n",cudaGetErrorString(code1)); #endif } else{ gpu_local_grad2(d_ur1,d_us1,d_totalh,lx1[0],d_dxm1,d_dxtm1,nelt[0]);//use d_totalh to replace d_totalh_temp by Kk04/11 #ifdef DEBUGPRINT cudaDeviceSynchronize(); code1 = cudaPeekAtLastError(); printf("CUDA: entropy_residual_gpu_wrapper after 1st gpu_local_grad2 cuda status: %s\n",cudaGetErrorString(code1)); #endif gpu_local_grad2(d_ur2,d_us2,d_totalh+lxyzdlelt,lx1[0],d_dxm1,d_dxtm1,nelt[0]); //use d_totalh to replace d_totalh_temp by Kk04/11 #ifdef DEBUGPRINT cudaDeviceSynchronize(); code1 = cudaPeekAtLastError(); printf("CUDA: entropy_residual_gpu_wrapper after 2st gpu_local_grad2 cuda status: %s\n",cudaGetErrorString(code1)); #endif flux_div_mini_gpu_kernel2<<<gridSize, blockSize>>>(d_tlag,d_res2,ntot[0],rdt[0],stage[0],lorder[0], ltot[0], d_totalh, lxyzd, d_ur1,d_us1, d_ut1, d_ur2,d_us2, d_ut2, d_ur3, d_us3, d_ut3,d_ud, ldd, d_jacmi, d_rxm1, d_sxm1, d_txm1, d_rym1, d_sym1, d_tym1, d_rzm1, d_szm1, d_tzm1,if3d[0]); //use d_totalh to replace d_totalh_temp by Kk04/11 #ifdef DEBUGPRINT cudaDeviceSynchronize(); code1 = cudaPeekAtLastError(); printf("CUDA: entropy_residual_gpu_wrapper after flux_div_mini_gpu_kernel2 cuda status: %s\n",cudaGetErrorString(code1)); #endif } /*for performance, comment out nekadd2, put add in the flux_div_mini_gpu_kernel1 or flux_div_mini_gpu_kernel2 gpu_nekadd2(glbblockSize1[0], d_res2,d_ud, ntot[0]);*/ #ifdef DEBUGPRINT cudaDeviceSynchronize(); code1 = cudaPeekAtLastError(); printf("CUDA: entropy_residual_gpu_wrapper after nekadd2 cuda status: %s\n",cudaGetErrorString(code1)); #endif //cudaFree(d_totalh_temp); //use d_totalh to replace d_totalh_temp by Kk04/11 cudaFree(d_ur1); cudaFree(d_ur2); cudaFree(d_ur3); cudaFree(d_us1); cudaFree(d_us2); cudaFree(d_us3); cudaFree(d_ut1); cudaFree(d_ut2); cudaFree(d_ut3); cudaFree(d_ud); //destroy handle cublasDestroy(handle); //added by Kk 04/26 #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code2 = cudaPeekAtLastError(); printf("CUDA: End entropy residual_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code2)); #endif } __global__ void wavevisc_gpu_kernel(double *t,double *csound, double *vx, double *vy, double *vz, int ntot, double *wavespeed,int lxyz, int lx1, int ly1, int lz1, double c_max,int ltot, double *meshh ){ //deleted parameter vtranstemp since no use int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ wavespeed[id]= csound [id] +sqrt(vx[id]*vx[id]+vy[id]*vy[id]+vz[id]*vz[id] ) ;//sqrtf -> sqrt, corrected by Kk 02/05 // find max of wavespeed using reduction __syncthreads(); unsigned int i = lxyz/2; int len = lxyz; int e= id/(lxyz); int startofcurrentelement = e*lxyz; while(i != 0){ if(id-startofcurrentelement <= i){ wavespeed[id] = fmax(fabs(wavespeed[id]),fabs(wavespeed[startofcurrentelement + (id+i)%len]));//fmaxf->fmax, corrected by Kk 02/05, fabs(double) } __syncthreads(); //added by Kk 02/05/2019 since the latter one may not correct when len is odd len = (len+1)/2; i = len/2; /* commented by Kk 02/05/2019 len = i; i /= 2;*/ } double maxeig = wavespeed[e*lxyz]; /*if(id%lxyz == 0){ printf("maxeig in wavevisc %d %.15lf %.15lf \n", id/lxyz, maxeig, c_max); }*/ // find max of vtrans using reduction. But never used? check with Dr.Tania //i = lxyz/2; //int e= id/(lx1*ly1*lz1); //int startofcurrentelement = id-e; //while(i != 0){ // if(id-startofcurrentelement < i){ // vtranstmp[id] = fmaxf(vtranstmp[id], vtranstmp[id + i]); // } // __syncthreads(); // i /= 2; //} //int rhomax = vtranstmp[id-e]; t[2*ltot+id] = c_max*maxeig*meshh[e]; // if(id<10){ // printf("$$$ print from cuda maxeig = %lf t[2*ltot+id]= %lf meshh[e]=%lf \n",maxeig,t[2*ltot+id],meshh[e]); // } } } extern "C" void wavevisc_gpu_wrapper_(int *glbblockSize1,double *d_t, double *d_csound,double *d_vx, double *d_vy, double *d_vz, int *nelt, int *lelt, int *lx1, int *ly1, int *lz1, double *d_vtrans, double *c_max, double *d_meshh, int *irho ){ #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code1 = cudaPeekAtLastError(); printf("CUDA: Start wavevisc_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1)); printf("CUDA: Start compute_entropy_gpu_wrapper values nelt= %d,lelt= %d,lx1= %d,ly1= %d, lz1= %d,c_max= %lf,irho= %d \n",nelt[0],lelt[0],lx1[0],ly1[0],lz1[0],c_max[0],irho[0]); #endif int ntot = nelt[0]*lx1[0]*ly1[0]*lz1[0]; int lxyz = lx1[0]*ly1[0]*lz1[0]; int ltot = lelt[0]*lxyz; double *d_wavespeed; cudaMalloc((void**)&d_wavespeed,nelt[0]*lxyz* sizeof(double)); /* comment vtranstemp here for performance by Kk 04/11, since no use of it double *d_vtranstemp; cudaMalloc((void**)&d_vtranstemp,nelt[0]*lxyz* sizeof(double)); cudaMemcpy(d_vtranstemp, &d_vtrans[(irho[0]-1)*lelt[0]*lxyz], nelt[0]*lxyz* sizeof(double), cudaMemcpyDeviceToDevice);*/ int blockSize = glbblockSize1[0], gridSize; gridSize = (int)ceil((float)ntot/blockSize); wavevisc_gpu_kernel<<<gridSize, blockSize>>>(d_t,d_csound, d_vx, d_vy, d_vz,ntot,d_wavespeed, lxyz,lx1[0],ly1[0],lz1[0],c_max[0], ltot, d_meshh);//deleted parameter d_vtranstemp since no use cudaFree(d_wavespeed); //cudaFree(d_vtranstemp); //comment by Kk 04/11 #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code2 = cudaPeekAtLastError(); printf("CUDA: End Wavevisc_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code2)); #endif } __global__ void max_to_trilin_gpu_kernel(double *t,int ntot,int lxyz, int lx1, int ly1, int lz1,int ltot, int lxy, double *xm1, double *ym1, double *zm1, int if3d ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ int e= id/(lxyz); double p000 = t[2*ltot+e*lxyz]; double p100 = t[2*ltot+e*lxyz+(lx1-1)]; double p010 = t[2*ltot+e*lxyz+(ly1-1)*lx1]; double p110 = t[2*ltot+e*lxyz+(ly1-1)*lx1+(lx1-1)]; double p001 = t[2*ltot+e*lxyz+(lz1-1)*lxy]; double p101 = t[2*ltot+e*lxyz+(lz1-1)*lxy+(lx1-1)]; double p011 = t[2*ltot+e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1]; double p111 = t[2*ltot+e*lxyz+(lz1-1)*lxy+(ly1-1)*lx1+(lx1-1)]; double c1=p100-p000; double c2=p010-p000; double c3=p001-p000; double c4=p110-p010-p100+p000; double c5=p011-p001-p010+p000; double c6=p101-p001-p100+p000; double c7=p111-p011-p101-p110+p100+p001+p010-p000; double rdx=1.0/(xm1[e*lxyz+(lx1-1)]-xm1[e*lxyz]); // cubes only!!! double rdy=1.0/(ym1[e*lxyz+(ly1-1)*lx1]-ym1[e*lxyz]); double rdz=0.0; if(if3d){ rdz=1.0/(zm1[e*lxyz+(lz1-1)*lxy]-zm1[e*lxyz]); } int firstlx = id%lxyz; double deltax=rdx*(xm1[id]-xm1[e*lxyz]) ;//! cubes only!!! double deltay=rdy*(ym1[id]-ym1[e*lxyz]); double deltaz=0.0; if (if3d){ deltaz=rdz*(zm1[id]-zm1[e*lxyz]);} t[2*ltot+id] =p000+c1*deltax+c2*deltay+c3*deltaz+ c4*deltax*deltay+c5*deltay*deltaz+ c6*deltaz*deltax+c7*deltay*deltaz*deltax; /*if(id ==ntot-1){ printf("debug max_to_trilin: %.30lf, %.30lf, %.30lf, %.30lf, %.30lf, %.30lf, %.30lf, %.30lf, %.30lf, %.30lf, %.30lf, %.30lf, %.30lf, %.30lf, %d\n", p000, c1, c2, c3, c4, c5, c6, c7, rdx, rdy, deltax, deltay, deltaz, t[2*ltot+id], if3d); }*/ } } extern "C" void max_to_trilin_gpu_wrapper_(int *glbblockSize1,double *d_t, int *nelt, int *lelt, int *lx1, int *ly1, int *lz1, double *d_xm1, double *d_ym1, double *d_zm1, int *if3d ){ #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code1 = cudaPeekAtLastError(); // if (code1 != cudaSuccess){ printf("CUDA: Start max_to_trilin_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1)); printf("CUDA: Start compute_entropy_gpu_wrapper values nelt=%d,lelt=%d,lx1=%d,ly1=%d,lz1=%d,if3d=%d \n",nelt[0],lelt[0],lx1[0],ly1[0],lz1[0],if3d[0]); #endif //} int ntot = nelt[0]*lx1[0]*ly1[0]*lz1[0]; int lxyz = lx1[0]*ly1[0]*lz1[0]; int ltot = lelt[0]*lxyz; int blockSize = glbblockSize1[0], gridSize; gridSize = (int)ceil((float)ntot/blockSize); max_to_trilin_gpu_kernel<<<gridSize, blockSize>>>(d_t,ntot,lxyz,lx1[0],ly1[0],lz1[0], ltot,lx1[0]*ly1[0], d_xm1, d_ym1, d_zm1, if3d[0]); #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code2 = cudaPeekAtLastError(); //if (code2 != cudaSuccess){ printf("CUDA: End max_to_trilin_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code2)); #endif //} } __global__ void resvisc_gpu_kernel1(double *res2,int ntot,int lxyz, int lx1, int ly1, int lz1,int ltot, int lxy,double *meshh ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ int e= id/(lx1*ly1*lz1); res2[id] = res2[id]*meshh[e]*meshh[e]; } } extern "C" void resvisc_gpu_wrapper1_(int *glbblockSize1,double *d_res2, int *nelt, int *lelt, int *lx1, int *ly1, int *lz1, double *d_meshh){ #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code1 = cudaPeekAtLastError(); //if (code1 != cudaSuccess){ printf("CUDA: Start resvisc_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1)); printf("CUDA: Start compute_entropy_gpu_wrapper values nelt= %d,lelt= %d,lx1= %d,ly1= %d,lz1 = %d,\n", nelt[0],lelt[0],lx1[0],ly1[0],lz1[0]); #endif //} int ntot = nelt[0]*lx1[0]*ly1[0]*lz1[0]; int lxyz = lx1[0]*ly1[0]*lz1[0]; int ltot = lelt[0]*lxyz; int blockSize = glbblockSize1[0], gridSize; gridSize = (int)ceil((float)ntot/blockSize); resvisc_gpu_kernel1<<<gridSize, blockSize>>>(d_res2,ntot,lxyz,lx1[0],ly1[0],lz1[0], ltot,lx1[0]*ly1[0], d_meshh); #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code2 = cudaPeekAtLastError(); //if (code2 != cudaSuccess){ printf("CUDA: End resvisc_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code2)); #endif //} } __global__ void resvisc_gpu_kernel2(double *res2,int ntot,int lxyz, int lx1, int ly1, int lz1,int ltot, int lxy,double c_sub_e, double maxdiff ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ int e= id/(lx1*ly1*lz1); res2[id] = fabs(res2[id]); res2[id] = res2[id]*c_sub_e; // cmult if(maxdiff !=0){ double consta = 1/maxdiff; res2[id] = res2[id]*consta; } } } extern "C" void resvisc_gpu_wrapper2_(int *glbblockSize1,double *d_res2, int *nelt, int *lelt, int *lx1, int *ly1, int *lz1, double *c_sub_e, double *maxdiff){ #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code1 = cudaPeekAtLastError(); // if (code1 != cudaSuccess){ printf("CUDA: Start resvisc_gpu_wrapper2 cuda status: %s\n",cudaGetErrorString(code1)); printf("CUDA: Start compute_entropy_gpu_wrapper values nelt=%d,lelt=%d,lx1=%d,ly1=%d,lz1=%d,c_sub_e=%lf,maxdiff= %.20lf, \n",nelt[0],lelt[0],lx1[0],ly1[0],lz1[0],c_sub_e[0],maxdiff[0]); #endif // } int ntot = nelt[0]*lx1[0]*ly1[0]*lz1[0]; int lxyz = lx1[0]*ly1[0]*lz1[0]; int ltot = lelt[0]*lxyz; int blockSize =glbblockSize1[0], gridSize; gridSize = (int)ceil((float)ntot/blockSize); resvisc_gpu_kernel2<<<gridSize, blockSize>>>(d_res2,ntot,lxyz,lx1[0],ly1[0],lz1[0], ltot,lx1[0]*ly1[0], c_sub_e[0], maxdiff[0]); #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code2 = cudaPeekAtLastError(); // if (code2 != cudaSuccess){ printf("CUDA: End resvisc_gpu_wrapper2 cuda status: %s\n",cudaGetErrorString(code2)); #endif // } } __global__ void evnsmooth_gpu_kernel(double *res2, double *t, int ntot,int lxyz, int lx1, int ly1, int lz1,int ltot, int lxy, int kstart, int kend, int jstart, int jend, int istart, int iend,int ldim , double rldim, double *rtmp, int if3d ){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ int e= id/(lx1*ly1*lz1); if(t[2*ltot+id] <= res2[id]){ res2[id] = t[2*ltot+id];// wavevisc and resvisc are really res2 and t. but the dimensions are different. As I understand this will start from 0 and works well. Need to check with Dr.Tania . adeesha } //global syncthread is needed here. check with Dr.Tania. adeesha. rtmp[id] = res2[id]; int ix= id % lx1; int iy= (id/lx1)%ly1; int iz = (id / (lx1*ly1))%lz1; if((kstart<=iz && iz<=kend)&& (jstart<= iy && iy<= jend) && (istart<=ix && ix<=iend)){ int izm,izp; if(if3d){ int km1=iz-1; int kp1=iz+1; int izm=km1; if (km1 < 0){ izm=kp1;} // Guermond symmetry izp=kp1; if (kp1 > (lz1-1)){ izp=km1;} // Guermond symmetry } else{ izm=iz; izp=iz; } int jm1=iy-1; int jp1=iy+1; int iym=jm1; if (jm1 < 0){ iym=jp1;}// Guermond symmetry int iyp=jp1; if (jp1 > (ly1-1)){ iyp=jm1;} // Guermond symmetry int im1=ix-1; int ip1=ix+1; int ixm=im1; if (im1 < 0){ ixm=ip1;} // Guermond symmetry int ixp=ip1; if (ip1 > (lx1-1)) {ixp=im1 ;} // Guermond symmetry double x0 = res2[e*lxyz+iz*lxy+iy*lx1+ix]; double x1 = res2[e*lxyz+iz*lxy+iy*lx1+ixm]; double x2 = res2[e*lxyz+iz*lxy+iy*lx1+ixp]; double x3 = res2[e*lxyz+iz*lxy+iym*lx1+ix]; double x4 = res2[e*lxyz+iz*lxy+iyp*lx1+ix]; double x5,x6; if (if3d){ x5 = res2[e*lxyz+izm*lxy+iy*lx1+ixp]; x6 = res2[e*lxyz+izp*lxy+iy*lx1+ixp]; } else { x5=0.0; x6=0.0; } rtmp[id]=0.25*(2.0*ldim*x0+x1+x2+x3+x4+x5+x6)*rldim;// check whether this is same as rtmp [id]. adeesha } res2[id]=rtmp[id]; } } extern "C" void evnsmooth_gpu_wrapper_(int *glbblockSize1,double *d_res2, double *d_t, int *nelt, int *lelt, int *lx1, int *ly1, int *lz1,int *kstart, int *kend, int *jstart, int *jend, int *istart, int *iend, int *ldim , double *rldim, int *if3d ){ #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code1 = cudaPeekAtLastError(); // if (code1 != cudaSuccess){ printf("CUDA: Start evnsmooth_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1)); printf("CUDA: Start compute_entropy_gpu_wrapper values nelt =%d ,lelt=%d,lx1=%d,ly1=%d,lz1=%d,kstart=%d,kend=%d,jstart=%d,jend=%d,istart=%d,iend=%d,ldim=%d ,rldim=%lf,if3d=%d,\n", nelt[0],lelt[0],lx1[0],ly1[0],lz1[0],kstart[0],kend[0],jstart[0],jend[0],istart[0],iend[0],ldim[0] ,rldim[0],if3d[0]); #endif // } int ntot = nelt[0]*lx1[0]*ly1[0]*lz1[0]; int lxyz = lx1[0]*ly1[0]*lz1[0]; int ltot = lelt[0]*lxyz; double *d_rtmp; cudaMalloc((void**)&d_rtmp,nelt[0]*lxyz* sizeof(double)); int blockSize = glbblockSize1[0], gridSize; gridSize = (int)ceil((float)ntot/blockSize); evnsmooth_gpu_kernel<<<gridSize, blockSize>>>(d_res2,d_t,ntot,lxyz,lx1[0],ly1[0],lz1[0], ltot,lx1[0]*ly1[0],(kstart[0]-1),(kend[0]-1),(jstart[0]-1),(jend[0]-1),(istart[0]-1),(iend[0]-1),ldim[0] ,rldim[0], d_rtmp, if3d[0] ); cudaFree(d_rtmp); #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code2 = cudaPeekAtLastError(); // if (code2 != cudaSuccess){ printf("CUDA: End evnsmooth_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code2)); #endif // } }
78f3154fb6d3b8fc2182458562fd7d6f96009b02.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "warmup.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *out = NULL; hipMalloc(&out, XSIZE*YSIZE); float *in = NULL; hipMalloc(&in, XSIZE*YSIZE); const int nx = 1; const int ny = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( warmup), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,nx,ny); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( warmup), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,nx,ny); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( warmup), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,nx,ny); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
78f3154fb6d3b8fc2182458562fd7d6f96009b02.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "warmup.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); float *in = NULL; cudaMalloc(&in, XSIZE*YSIZE); const int nx = 1; const int ny = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); warmup<<<gridBlock,threadBlock>>>(out,in,nx,ny); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { warmup<<<gridBlock,threadBlock>>>(out,in,nx,ny); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { warmup<<<gridBlock,threadBlock>>>(out,in,nx,ny); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f2ebfb746f113ee6a3ea731a1ecd29378035ebb4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * This file is part of sublabel_relax. * * Copyright 2016 Thomas Mllenhoff <thomas dot moellenhoff at in dot tum dot de> * and Emanuel Laude <emanuel dot laude at in dot tum dot de> (Technical University of Munich) * * sublabel_relax is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * prost is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with sublabel_relax. If not, see <http://www.gnu.org/licenses/>. */ #include "prox_ind_epi_polyhedral_1d.hpp" #include "prost/prox/helper.hpp" #include "prost/prox/vector.hpp" #include "prost/config.hpp" #include "prost/exception.hpp" namespace prost { template<typename T> __global__ void ProxIndEpiPolyhedral1DKernel( T *d_res, const T *d_arg, const T *d_alpha, const T *d_beta, const T *d_pt_x, const T *d_pt_y, const size_t *d_count, const size_t *d_index, size_t count, bool interleaved) { size_t tx = threadIdx.x + blockDim.x * blockIdx.x; if(tx < count) { Vector<const T> arg(count, 2, interleaved, tx, d_arg); Vector<T> res(count, 2, interleaved, tx, d_res); // temporary result T result[2]; // read data from global memory // get v = (x0, y0) and alpha,beta and count,index T alpha = d_alpha[tx]; T beta = d_beta[tx]; size_t count_local = d_count[tx]; size_t index = d_index[tx]; T v[2] = { arg[0], arg[1] }; // compute vector normal to slope for feasibility-check T n_slope[2]; n_slope[0] = alpha; n_slope[1] = -1; T x1 = d_pt_x[index]; T y1 = d_pt_y[index]; T p[2]; p[0] = x1; p[1] = y1; bool feasible_left = helper::IsPointInHalfspace<T>(v, p, n_slope, 2); T n_halfspace[2]; n_halfspace[0] = 1; n_halfspace[1] = alpha; bool halfspace_left = helper::IsPointInHalfspace<T>(v, p, n_halfspace, 2); bool projected = false; if(!feasible_left && halfspace_left) { // point is not feasible wrt to 0-th piece and // lies in rectangle => projection is the // respective half space projection T t = x1*n_slope[0] + y1*n_slope[1]; helper::ProjectHalfspace<T>(v, n_slope, t, result, 2); projected = true; } if(!projected) { for(size_t i = 0; i < count_local-1; i++) { // read "kink" at i+1 T x2 = d_pt_x[index+i+1]; T y2 = d_pt_y[index+i+1]; // compute slope T c = (y2-y1) / (x2-x1); // compute vector normal to slope n_slope[0] = c; n_slope[1] = -1; // check whether point v is feasible wrt i-th piece bool feasible_right = helper::IsPointInHalfspace<T>(v, p, n_slope, 2); n_halfspace[0] = -1; n_halfspace[1] = -c; bool halfspace_right = helper::IsPointInHalfspace<T>(v, p, n_halfspace, 2); p[0] = x2; p[1] = y2; if(!feasible_left || !feasible_right) { // point is not feasible wrt to i-th piece or (i-1)-th piece if(!halfspace_left && !halfspace_right) { // point lies in (i-1)-th normal cone => projection is onto the "kink" result[0] = x1; result[1] = y1; projected = true; break; } // compute inverse normal -n s.t. the two normals n and -n // together with the two kinks define a rectangle n_halfspace[0] = -n_halfspace[0]; n_halfspace[1] = -n_halfspace[1]; // check wether phoint lies in i-th halfspace halfspace_left = helper::IsPointInHalfspace<T>(v, p, n_halfspace, 2); if(halfspace_right && halfspace_left) { // point lies in i-th rectangle => projection is the // respective half space projection T t = x1*n_slope[0] + y1*n_slope[1]; helper::ProjectHalfspace<T>(v, n_slope, t, result, 2); projected = true; break; } } // hand over variables for next iteration x1 = x2; y1 = y2; feasible_left = feasible_right; } } if(!projected) { // compute vector normal to slope n_slope[0] = beta; n_slope[1] = -1; // check whether point v is feasible wrt i-th piece bool feasible_right = helper::IsPointInHalfspace<T>(v, p, n_slope, 2); n_halfspace[0] = -1; n_halfspace[1] = -beta; bool halfspace_right = helper::IsPointInHalfspace<T>(v, p, n_halfspace, 2); if(!feasible_left || !feasible_right) { // point is not feasible wrt to i-th piece or (i-1)-th piece if(!halfspace_left && !halfspace_right) { // point lies in last normal cone => projection is the last "kink" result[0] = x1; result[1] = y1; projected = true; } else if(halfspace_right) { // point lies in last rectangle => projection is the // respective half space projection T t = x1*n_slope[0] + y1*n_slope[1]; helper::ProjectHalfspace<T>(v, n_slope, t, result, 2); projected = true; } } } // point has not been projected. That means we output the original point if(!projected) { result[0] = v[0]; result[1] = v[1]; } // write result to global memory res[0] = result[0]; res[1] = result[1]; } // if(tx < count) } template<typename T> ProxIndEpiPolyhedral1D<T>::ProxIndEpiPolyhedral1D( size_t index, size_t count, bool interleaved, const vector<T>& pt_x, const vector<T>& pt_y, const vector<T>& alpha, const vector<T>& beta, const vector<size_t>& count_vec, const vector<size_t>& index_vec) : ProxSeparableSum<T>(index, count, 2, interleaved, false), host_pt_x_(pt_x), host_pt_y_(pt_y), host_alpha_(alpha), host_beta_(beta), host_count_(count_vec), host_index_(index_vec) { } template<typename T> void ProxIndEpiPolyhedral1D<T>::Initialize() { if(host_pt_x_.empty() || host_pt_y_.empty() || host_alpha_.empty() || host_beta_.empty() || host_index_.empty() || host_count_.empty()) throw Exception("ProxIndEpiPolyhedral1D: empty data array!"); if(host_index_.size() != this->count() || host_count_.size() != this->count()) throw Exception("count doesn't match size of indices/counts array!"); // Test convexity for(size_t i = 0; i < this->count_; i++) { T slope_left = host_alpha_[i]; for(size_t j = host_index_[i]; j < host_index_[i] + host_count_[i] - 1; j++) { T slope_right = (host_pt_y_[j+1]-host_pt_y_[j]) / (host_pt_x_[j+1]-host_pt_x_[j]); if(slope_right < slope_left) { throw Exception("Non-convex energy passed to ProxIndEpiPolyhedral1D"); } slope_left = slope_right; } if(host_beta_[i] < slope_left) { throw Exception("Non-convex energy passed to ProxIndEpiPolyhedral1D"); } } // copy and allocate data on GPU try { dev_pt_x_ = host_pt_x_; dev_pt_y_ = host_pt_y_; dev_alpha_ = host_alpha_; dev_beta_ = host_beta_; dev_count_ = host_count_; dev_index_ = host_index_; } catch(std::bad_alloc& e) { throw Exception("Out of memory."); } } template<typename T> size_t ProxIndEpiPolyhedral1D<T>::gpu_mem_amount() const { return (host_pt_x_.size() + host_pt_y_.size() + host_alpha_.size() + host_beta_.size()) * sizeof(T) + (host_count_.size() + host_index_.size()) * sizeof(size_t); } template<typename T> void ProxIndEpiPolyhedral1D<T>::EvalLocal( const typename device_vector<T>::iterator& result_beg, const typename device_vector<T>::iterator& result_end, const typename device_vector<T>::const_iterator& arg_beg, const typename device_vector<T>::const_iterator& arg_end, const typename device_vector<T>::const_iterator& tau_beg, const typename device_vector<T>::const_iterator& tau_end, T tau, bool invert_tau) { dim3 block(kBlockSizeCUDA, 1, 1); dim3 grid((this->count_ + block.x - 1) / block.x, 1, 1); hipLaunchKernelGGL(( ProxIndEpiPolyhedral1DKernel<T>) , dim3(grid), dim3(block), 0, 0, thrust::raw_pointer_cast(&(*result_beg)), thrust::raw_pointer_cast(&(*arg_beg)), thrust::raw_pointer_cast(dev_alpha_.data()), thrust::raw_pointer_cast(dev_beta_.data()), thrust::raw_pointer_cast(dev_pt_x_.data()), thrust::raw_pointer_cast(dev_pt_y_.data()), thrust::raw_pointer_cast(dev_count_.data()), thrust::raw_pointer_cast(dev_index_.data()), this->count_, this->interleaved_); } template class ProxIndEpiPolyhedral1D<float>; template class ProxIndEpiPolyhedral1D<double>; } // namespace prost
f2ebfb746f113ee6a3ea731a1ecd29378035ebb4.cu
/** * This file is part of sublabel_relax. * * Copyright 2016 Thomas Möllenhoff <thomas dot moellenhoff at in dot tum dot de> * and Emanuel Laude <emanuel dot laude at in dot tum dot de> (Technical University of Munich) * * sublabel_relax is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * prost is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with sublabel_relax. If not, see <http://www.gnu.org/licenses/>. */ #include "prox_ind_epi_polyhedral_1d.hpp" #include "prost/prox/helper.hpp" #include "prost/prox/vector.hpp" #include "prost/config.hpp" #include "prost/exception.hpp" namespace prost { template<typename T> __global__ void ProxIndEpiPolyhedral1DKernel( T *d_res, const T *d_arg, const T *d_alpha, const T *d_beta, const T *d_pt_x, const T *d_pt_y, const size_t *d_count, const size_t *d_index, size_t count, bool interleaved) { size_t tx = threadIdx.x + blockDim.x * blockIdx.x; if(tx < count) { Vector<const T> arg(count, 2, interleaved, tx, d_arg); Vector<T> res(count, 2, interleaved, tx, d_res); // temporary result T result[2]; // read data from global memory // get v = (x0, y0) and alpha,beta and count,index T alpha = d_alpha[tx]; T beta = d_beta[tx]; size_t count_local = d_count[tx]; size_t index = d_index[tx]; T v[2] = { arg[0], arg[1] }; // compute vector normal to slope for feasibility-check T n_slope[2]; n_slope[0] = alpha; n_slope[1] = -1; T x1 = d_pt_x[index]; T y1 = d_pt_y[index]; T p[2]; p[0] = x1; p[1] = y1; bool feasible_left = helper::IsPointInHalfspace<T>(v, p, n_slope, 2); T n_halfspace[2]; n_halfspace[0] = 1; n_halfspace[1] = alpha; bool halfspace_left = helper::IsPointInHalfspace<T>(v, p, n_halfspace, 2); bool projected = false; if(!feasible_left && halfspace_left) { // point is not feasible wrt to 0-th piece and // lies in rectangle => projection is the // respective half space projection T t = x1*n_slope[0] + y1*n_slope[1]; helper::ProjectHalfspace<T>(v, n_slope, t, result, 2); projected = true; } if(!projected) { for(size_t i = 0; i < count_local-1; i++) { // read "kink" at i+1 T x2 = d_pt_x[index+i+1]; T y2 = d_pt_y[index+i+1]; // compute slope T c = (y2-y1) / (x2-x1); // compute vector normal to slope n_slope[0] = c; n_slope[1] = -1; // check whether point v is feasible wrt i-th piece bool feasible_right = helper::IsPointInHalfspace<T>(v, p, n_slope, 2); n_halfspace[0] = -1; n_halfspace[1] = -c; bool halfspace_right = helper::IsPointInHalfspace<T>(v, p, n_halfspace, 2); p[0] = x2; p[1] = y2; if(!feasible_left || !feasible_right) { // point is not feasible wrt to i-th piece or (i-1)-th piece if(!halfspace_left && !halfspace_right) { // point lies in (i-1)-th normal cone => projection is onto the "kink" result[0] = x1; result[1] = y1; projected = true; break; } // compute inverse normal -n s.t. the two normals n and -n // together with the two kinks define a rectangle n_halfspace[0] = -n_halfspace[0]; n_halfspace[1] = -n_halfspace[1]; // check wether phoint lies in i-th halfspace halfspace_left = helper::IsPointInHalfspace<T>(v, p, n_halfspace, 2); if(halfspace_right && halfspace_left) { // point lies in i-th rectangle => projection is the // respective half space projection T t = x1*n_slope[0] + y1*n_slope[1]; helper::ProjectHalfspace<T>(v, n_slope, t, result, 2); projected = true; break; } } // hand over variables for next iteration x1 = x2; y1 = y2; feasible_left = feasible_right; } } if(!projected) { // compute vector normal to slope n_slope[0] = beta; n_slope[1] = -1; // check whether point v is feasible wrt i-th piece bool feasible_right = helper::IsPointInHalfspace<T>(v, p, n_slope, 2); n_halfspace[0] = -1; n_halfspace[1] = -beta; bool halfspace_right = helper::IsPointInHalfspace<T>(v, p, n_halfspace, 2); if(!feasible_left || !feasible_right) { // point is not feasible wrt to i-th piece or (i-1)-th piece if(!halfspace_left && !halfspace_right) { // point lies in last normal cone => projection is the last "kink" result[0] = x1; result[1] = y1; projected = true; } else if(halfspace_right) { // point lies in last rectangle => projection is the // respective half space projection T t = x1*n_slope[0] + y1*n_slope[1]; helper::ProjectHalfspace<T>(v, n_slope, t, result, 2); projected = true; } } } // point has not been projected. That means we output the original point if(!projected) { result[0] = v[0]; result[1] = v[1]; } // write result to global memory res[0] = result[0]; res[1] = result[1]; } // if(tx < count) } template<typename T> ProxIndEpiPolyhedral1D<T>::ProxIndEpiPolyhedral1D( size_t index, size_t count, bool interleaved, const vector<T>& pt_x, const vector<T>& pt_y, const vector<T>& alpha, const vector<T>& beta, const vector<size_t>& count_vec, const vector<size_t>& index_vec) : ProxSeparableSum<T>(index, count, 2, interleaved, false), host_pt_x_(pt_x), host_pt_y_(pt_y), host_alpha_(alpha), host_beta_(beta), host_count_(count_vec), host_index_(index_vec) { } template<typename T> void ProxIndEpiPolyhedral1D<T>::Initialize() { if(host_pt_x_.empty() || host_pt_y_.empty() || host_alpha_.empty() || host_beta_.empty() || host_index_.empty() || host_count_.empty()) throw Exception("ProxIndEpiPolyhedral1D: empty data array!"); if(host_index_.size() != this->count() || host_count_.size() != this->count()) throw Exception("count doesn't match size of indices/counts array!"); // Test convexity for(size_t i = 0; i < this->count_; i++) { T slope_left = host_alpha_[i]; for(size_t j = host_index_[i]; j < host_index_[i] + host_count_[i] - 1; j++) { T slope_right = (host_pt_y_[j+1]-host_pt_y_[j]) / (host_pt_x_[j+1]-host_pt_x_[j]); if(slope_right < slope_left) { throw Exception("Non-convex energy passed to ProxIndEpiPolyhedral1D"); } slope_left = slope_right; } if(host_beta_[i] < slope_left) { throw Exception("Non-convex energy passed to ProxIndEpiPolyhedral1D"); } } // copy and allocate data on GPU try { dev_pt_x_ = host_pt_x_; dev_pt_y_ = host_pt_y_; dev_alpha_ = host_alpha_; dev_beta_ = host_beta_; dev_count_ = host_count_; dev_index_ = host_index_; } catch(std::bad_alloc& e) { throw Exception("Out of memory."); } } template<typename T> size_t ProxIndEpiPolyhedral1D<T>::gpu_mem_amount() const { return (host_pt_x_.size() + host_pt_y_.size() + host_alpha_.size() + host_beta_.size()) * sizeof(T) + (host_count_.size() + host_index_.size()) * sizeof(size_t); } template<typename T> void ProxIndEpiPolyhedral1D<T>::EvalLocal( const typename device_vector<T>::iterator& result_beg, const typename device_vector<T>::iterator& result_end, const typename device_vector<T>::const_iterator& arg_beg, const typename device_vector<T>::const_iterator& arg_end, const typename device_vector<T>::const_iterator& tau_beg, const typename device_vector<T>::const_iterator& tau_end, T tau, bool invert_tau) { dim3 block(kBlockSizeCUDA, 1, 1); dim3 grid((this->count_ + block.x - 1) / block.x, 1, 1); ProxIndEpiPolyhedral1DKernel<T> <<<grid, block>>>(thrust::raw_pointer_cast(&(*result_beg)), thrust::raw_pointer_cast(&(*arg_beg)), thrust::raw_pointer_cast(dev_alpha_.data()), thrust::raw_pointer_cast(dev_beta_.data()), thrust::raw_pointer_cast(dev_pt_x_.data()), thrust::raw_pointer_cast(dev_pt_y_.data()), thrust::raw_pointer_cast(dev_count_.data()), thrust::raw_pointer_cast(dev_index_.data()), this->count_, this->interleaved_); } template class ProxIndEpiPolyhedral1D<float>; template class ProxIndEpiPolyhedral1D<double>; } // namespace prost
7e5d8980666e5d0decf0dace9bab3bb0cb9fa1f3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <unittest/unittest.h> #include <thrust/scatter.h> #include <thrust/execution_policy.h> #include <algorithm> #ifdef THRUST_TEST_DEVICE_SIDE template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3> __global__ void scatter_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 map_first, Iterator3 result) { thrust::scatter(exec, first, last, map_first, result); } template<typename ExecutionPolicy> void TestScatterDevice(ExecutionPolicy exec) { size_t n = 1000; const size_t output_size = ::min((size_t) 10, 2 * n); thrust::host_vector<int> h_input(n, 1); thrust::device_vector<int> d_input(n, 1); thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) { h_map[i] = h_map[i] % output_size; } thrust::device_vector<unsigned int> d_map = h_map; thrust::host_vector<int> h_output(output_size, 0); thrust::device_vector<int> d_output(output_size, 0); thrust::scatter(h_input.begin(), h_input.end(), h_map.begin(), h_output.begin()); hipLaunchKernelGGL(( scatter_kernel), dim3(1),dim3(1), 0, 0, exec, d_input.begin(), d_input.end(), d_map.begin(), d_output.begin()); hipError_t const err = hipDeviceSynchronize(); ASSERT_EQUAL(hipSuccess, err); ASSERT_EQUAL(h_output, d_output); } void TestScatterDeviceSeq() { TestScatterDevice(thrust::seq); } DECLARE_UNITTEST(TestScatterDeviceSeq); void TestScatterDeviceDevice() { TestScatterDevice(thrust::device); } DECLARE_UNITTEST(TestScatterDeviceDevice); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Function> __global__ void scatter_if_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 map_first, Iterator3 stencil_first, Iterator4 result, Function f) { thrust::scatter_if(exec, first, last, map_first, stencil_first, result, f); } template<typename T> struct is_even_scatter_if { __host__ __device__ bool operator()(const T i) const { return (i % 2) == 0; } }; template<typename ExecutionPolicy> void TestScatterIfDevice(ExecutionPolicy exec) { size_t n = 1000; const size_t output_size = ::min((size_t) 10, 2 * n); thrust::host_vector<int> h_input(n, 1); thrust::device_vector<int> d_input(n, 1); thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) { h_map[i] = h_map[i] % output_size; } thrust::device_vector<unsigned int> d_map = h_map; thrust::host_vector<int> h_output(output_size, 0); thrust::device_vector<int> d_output(output_size, 0); thrust::scatter_if(h_input.begin(), h_input.end(), h_map.begin(), h_map.begin(), h_output.begin(), is_even_scatter_if<unsigned int>()); hipLaunchKernelGGL(( scatter_if_kernel), dim3(1),dim3(1), 0, 0, exec, d_input.begin(), d_input.end(), d_map.begin(), d_map.begin(), d_output.begin(), is_even_scatter_if<unsigned int>()); hipError_t const err = hipDeviceSynchronize(); ASSERT_EQUAL(hipSuccess, err); ASSERT_EQUAL(h_output, d_output); } void TestScatterIfDeviceSeq() { TestScatterIfDevice(thrust::seq); } DECLARE_UNITTEST(TestScatterIfDeviceSeq); void TestScatterIfDeviceDevice() { TestScatterIfDevice(thrust::device); } DECLARE_UNITTEST(TestScatterIfDeviceDevice); #endif void TestScatterCudaStreams() { typedef thrust::device_vector<int> Vector; Vector map(5); // scatter indices Vector src(5); // source vector Vector dst(8); // destination vector map[0] = 6; map[1] = 3; map[2] = 1; map[3] = 7; map[4] = 2; src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0; dst[5] = 0; dst[6] = 0; dst[7] = 0; hipStream_t s; hipStreamCreate(&s); thrust::scatter(thrust::hip::par.on(s), src.begin(), src.end(), map.begin(), dst.begin()); hipStreamSynchronize(s); ASSERT_EQUAL(dst[0], 0); ASSERT_EQUAL(dst[1], 2); ASSERT_EQUAL(dst[2], 4); ASSERT_EQUAL(dst[3], 1); ASSERT_EQUAL(dst[4], 0); ASSERT_EQUAL(dst[5], 0); ASSERT_EQUAL(dst[6], 0); ASSERT_EQUAL(dst[7], 3); hipStreamDestroy(s); } DECLARE_UNITTEST(TestScatterCudaStreams); void TestScatterIfCudaStreams() { typedef thrust::device_vector<int> Vector; Vector flg(5); // predicate array Vector map(5); // scatter indices Vector src(5); // source vector Vector dst(8); // destination vector flg[0] = 0; flg[1] = 1; flg[2] = 0; flg[3] = 1; flg[4] = 0; map[0] = 6; map[1] = 3; map[2] = 1; map[3] = 7; map[4] = 2; src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0; dst[5] = 0; dst[6] = 0; dst[7] = 0; hipStream_t s; hipStreamCreate(&s); thrust::scatter_if(thrust::hip::par.on(s), src.begin(), src.end(), map.begin(), flg.begin(), dst.begin()); hipStreamSynchronize(s); ASSERT_EQUAL(dst[0], 0); ASSERT_EQUAL(dst[1], 0); ASSERT_EQUAL(dst[2], 0); ASSERT_EQUAL(dst[3], 1); ASSERT_EQUAL(dst[4], 0); ASSERT_EQUAL(dst[5], 0); ASSERT_EQUAL(dst[6], 0); ASSERT_EQUAL(dst[7], 3); hipStreamDestroy(s); } DECLARE_UNITTEST(TestScatterIfCudaStreams);
7e5d8980666e5d0decf0dace9bab3bb0cb9fa1f3.cu
#include <unittest/unittest.h> #include <thrust/scatter.h> #include <thrust/execution_policy.h> #include <algorithm> #ifdef THRUST_TEST_DEVICE_SIDE template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3> __global__ void scatter_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 map_first, Iterator3 result) { thrust::scatter(exec, first, last, map_first, result); } template<typename ExecutionPolicy> void TestScatterDevice(ExecutionPolicy exec) { size_t n = 1000; const size_t output_size = std::min((size_t) 10, 2 * n); thrust::host_vector<int> h_input(n, 1); thrust::device_vector<int> d_input(n, 1); thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) { h_map[i] = h_map[i] % output_size; } thrust::device_vector<unsigned int> d_map = h_map; thrust::host_vector<int> h_output(output_size, 0); thrust::device_vector<int> d_output(output_size, 0); thrust::scatter(h_input.begin(), h_input.end(), h_map.begin(), h_output.begin()); scatter_kernel<<<1,1>>>(exec, d_input.begin(), d_input.end(), d_map.begin(), d_output.begin()); cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); ASSERT_EQUAL(h_output, d_output); } void TestScatterDeviceSeq() { TestScatterDevice(thrust::seq); } DECLARE_UNITTEST(TestScatterDeviceSeq); void TestScatterDeviceDevice() { TestScatterDevice(thrust::device); } DECLARE_UNITTEST(TestScatterDeviceDevice); template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Function> __global__ void scatter_if_kernel(ExecutionPolicy exec, Iterator1 first, Iterator1 last, Iterator2 map_first, Iterator3 stencil_first, Iterator4 result, Function f) { thrust::scatter_if(exec, first, last, map_first, stencil_first, result, f); } template<typename T> struct is_even_scatter_if { __host__ __device__ bool operator()(const T i) const { return (i % 2) == 0; } }; template<typename ExecutionPolicy> void TestScatterIfDevice(ExecutionPolicy exec) { size_t n = 1000; const size_t output_size = std::min((size_t) 10, 2 * n); thrust::host_vector<int> h_input(n, 1); thrust::device_vector<int> d_input(n, 1); thrust::host_vector<unsigned int> h_map = unittest::random_integers<unsigned int>(n); for(size_t i = 0; i < n; i++) { h_map[i] = h_map[i] % output_size; } thrust::device_vector<unsigned int> d_map = h_map; thrust::host_vector<int> h_output(output_size, 0); thrust::device_vector<int> d_output(output_size, 0); thrust::scatter_if(h_input.begin(), h_input.end(), h_map.begin(), h_map.begin(), h_output.begin(), is_even_scatter_if<unsigned int>()); scatter_if_kernel<<<1,1>>>(exec, d_input.begin(), d_input.end(), d_map.begin(), d_map.begin(), d_output.begin(), is_even_scatter_if<unsigned int>()); cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); ASSERT_EQUAL(h_output, d_output); } void TestScatterIfDeviceSeq() { TestScatterIfDevice(thrust::seq); } DECLARE_UNITTEST(TestScatterIfDeviceSeq); void TestScatterIfDeviceDevice() { TestScatterIfDevice(thrust::device); } DECLARE_UNITTEST(TestScatterIfDeviceDevice); #endif void TestScatterCudaStreams() { typedef thrust::device_vector<int> Vector; Vector map(5); // scatter indices Vector src(5); // source vector Vector dst(8); // destination vector map[0] = 6; map[1] = 3; map[2] = 1; map[3] = 7; map[4] = 2; src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0; dst[5] = 0; dst[6] = 0; dst[7] = 0; cudaStream_t s; cudaStreamCreate(&s); thrust::scatter(thrust::cuda::par.on(s), src.begin(), src.end(), map.begin(), dst.begin()); cudaStreamSynchronize(s); ASSERT_EQUAL(dst[0], 0); ASSERT_EQUAL(dst[1], 2); ASSERT_EQUAL(dst[2], 4); ASSERT_EQUAL(dst[3], 1); ASSERT_EQUAL(dst[4], 0); ASSERT_EQUAL(dst[5], 0); ASSERT_EQUAL(dst[6], 0); ASSERT_EQUAL(dst[7], 3); cudaStreamDestroy(s); } DECLARE_UNITTEST(TestScatterCudaStreams); void TestScatterIfCudaStreams() { typedef thrust::device_vector<int> Vector; Vector flg(5); // predicate array Vector map(5); // scatter indices Vector src(5); // source vector Vector dst(8); // destination vector flg[0] = 0; flg[1] = 1; flg[2] = 0; flg[3] = 1; flg[4] = 0; map[0] = 6; map[1] = 3; map[2] = 1; map[3] = 7; map[4] = 2; src[0] = 0; src[1] = 1; src[2] = 2; src[3] = 3; src[4] = 4; dst[0] = 0; dst[1] = 0; dst[2] = 0; dst[3] = 0; dst[4] = 0; dst[5] = 0; dst[6] = 0; dst[7] = 0; cudaStream_t s; cudaStreamCreate(&s); thrust::scatter_if(thrust::cuda::par.on(s), src.begin(), src.end(), map.begin(), flg.begin(), dst.begin()); cudaStreamSynchronize(s); ASSERT_EQUAL(dst[0], 0); ASSERT_EQUAL(dst[1], 0); ASSERT_EQUAL(dst[2], 0); ASSERT_EQUAL(dst[3], 1); ASSERT_EQUAL(dst[4], 0); ASSERT_EQUAL(dst[5], 0); ASSERT_EQUAL(dst[6], 0); ASSERT_EQUAL(dst[7], 3); cudaStreamDestroy(s); } DECLARE_UNITTEST(TestScatterIfCudaStreams);
a08342511e7a0acac99eee89b8aa6389bae8a7a7.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/native/BinaryOps.h> #include <ATen/AccumulateType.h> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void logaddexp_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND( ScalarType::BFloat16, iter.dtype(), "logaddexp_cuda", [&]() { using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t { if (::isinf(static_cast<accscalar_t>(a)) && a == b) { return a; } else { scalar_t m = ::max(a, b); return m + ::log((scalar_t)(1.0) + ::exp(-::abs(a - b))); } }); }); } void logaddexp2_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND( ScalarType::BFloat16, iter.dtype(), "logaddexp2_cuda", [&]() { using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t { if (::isinf(static_cast<accscalar_t>(a)) && a == b) { return a; } else { scalar_t m = ::max(a, b); return m + ::log2((scalar_t)(1.0) + ::pow((scalar_t)(2.0), -::abs(a - b))); } }); }); } REGISTER_DISPATCH(logaddexp_stub, &logaddexp_kernel_cuda); REGISTER_DISPATCH(logaddexp2_stub, &logaddexp2_kernel_cuda); }} // namespace at::native
a08342511e7a0acac99eee89b8aa6389bae8a7a7.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/native/BinaryOps.h> #include <ATen/AccumulateType.h> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void logaddexp_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND( ScalarType::BFloat16, iter.dtype(), "logaddexp_cuda", [&]() { using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t { if (::isinf(static_cast<accscalar_t>(a)) && a == b) { return a; } else { scalar_t m = ::max(a, b); return m + ::log((scalar_t)(1.0) + ::exp(-::abs(a - b))); } }); }); } void logaddexp2_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND( ScalarType::BFloat16, iter.dtype(), "logaddexp2_cuda", [&]() { using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>; gpu_kernel(iter, [] GPU_LAMBDA (scalar_t a, scalar_t b) -> scalar_t { if (::isinf(static_cast<accscalar_t>(a)) && a == b) { return a; } else { scalar_t m = ::max(a, b); return m + ::log2((scalar_t)(1.0) + ::pow((scalar_t)(2.0), -::abs(a - b))); } }); }); } REGISTER_DISPATCH(logaddexp_stub, &logaddexp_kernel_cuda); REGISTER_DISPATCH(logaddexp2_stub, &logaddexp2_kernel_cuda); }} // namespace at::native
2abab553134fbac6ce195177d09d716d261ff143.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2017-2022 XGBoost contributors */ #include <gtest/gtest.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <xgboost/base.h> #include <random> #include <string> #include <vector> #include "../../../src/common/common.h" #include "../../../src/data/sparse_page_source.h" #include "../../../src/tree/constraints.cuh" #include "../../../src/tree/updater_gpu_common.cuh" #include "../../../src/tree/updater_gpu_hist.cu" #include "../filesystem.h" // dmlc::TemporaryDirectory #include "../helpers.h" #include "../histogram_helpers.h" #include "xgboost/context.h" #include "xgboost/json.h" namespace xgboost { namespace tree { TEST(GpuHist, DeviceHistogram) { // Ensures that node allocates correctly after reaching `kStopGrowingSize`. dh::safe_cuda(hipSetDevice(0)); constexpr size_t kNBins = 128; constexpr int kNNodes = 4; constexpr size_t kStopGrowing = kNNodes * kNBins * 2u; DeviceHistogramStorage<kStopGrowing> histogram; histogram.Init(0, kNBins); for (int i = 0; i < kNNodes; ++i) { histogram.AllocateHistograms({i}); } histogram.Reset(); ASSERT_EQ(histogram.Data().size(), kStopGrowing); // Use allocated memory but do not erase nidx_map. for (int i = 0; i < kNNodes; ++i) { histogram.AllocateHistograms({i}); } for (int i = 0; i < kNNodes; ++i) { ASSERT_TRUE(histogram.HistogramExists(i)); } // Add two new nodes histogram.AllocateHistograms({kNNodes}); histogram.AllocateHistograms({kNNodes + 1}); // Old cached nodes should still exist for (int i = 0; i < kNNodes; ++i) { ASSERT_TRUE(histogram.HistogramExists(i)); } // Should be deleted ASSERT_FALSE(histogram.HistogramExists(kNNodes)); // Most recent node should exist ASSERT_TRUE(histogram.HistogramExists(kNNodes + 1)); // Add same node again - should fail EXPECT_ANY_THROW(histogram.AllocateHistograms({kNNodes + 1});); } std::vector<GradientPairPrecise> GetHostHistGpair() { // 24 bins, 3 bins for each feature (column). std::vector<GradientPairPrecise> hist_gpair = { {0.8314f, 0.7147f}, {1.7989f, 3.7312f}, {3.3846f, 3.4598f}, {2.9277f, 3.5886f}, {1.8429f, 2.4152f}, {1.2443f, 1.9019f}, {1.6380f, 2.9174f}, {1.5657f, 2.5107f}, {2.8111f, 2.4776f}, {2.1322f, 3.0651f}, {3.2927f, 3.8540f}, {0.5899f, 0.9866f}, {1.5185f, 1.6263f}, {2.0686f, 3.1844f}, {2.4278f, 3.0950f}, {1.5105f, 2.1403f}, {2.6922f, 4.2217f}, {1.8122f, 1.5437f}, {0.0000f, 0.0000f}, {4.3245f, 5.7955f}, {1.6903f, 2.1103f}, {2.4012f, 4.4754f}, {3.6136f, 3.4303f}, {0.0000f, 0.0000f} }; return hist_gpair; } template <typename GradientSumT> void TestBuildHist(bool use_shared_memory_histograms) { int const kNRows = 16, kNCols = 8; TrainParam param; std::vector<std::pair<std::string, std::string>> args { {"max_depth", "6"}, {"max_leaves", "0"}, }; param.Init(args); auto page = BuildEllpackPage(kNRows, kNCols); BatchParam batch_param{}; Context ctx{CreateEmptyGenericParam(0)}; GPUHistMakerDevice<GradientSumT> maker(&ctx, page.get(), {}, kNRows, param, kNCols, kNCols, batch_param); xgboost::SimpleLCG gen; xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f); HostDeviceVector<GradientPair> gpair(kNRows); for (auto &gp : gpair.HostVector()) { bst_float grad = dist(&gen); bst_float hess = dist(&gen); gp = GradientPair(grad, hess); } gpair.SetDevice(0); thrust::host_vector<common::CompressedByteT> h_gidx_buffer (page->gidx_buffer.HostVector()); maker.row_partitioner.reset(new RowPartitioner(0, kNRows)); maker.hist.AllocateHistograms({0}); maker.gpair = gpair.DeviceSpan(); maker.quantiser.reset(new GradientQuantiser(maker.gpair)); BuildGradientHistogram(ctx.CUDACtx(), page->GetDeviceAccessor(0), maker.feature_groups->DeviceAccessor(0), gpair.DeviceSpan(), maker.row_partitioner->GetRows(0), maker.hist.GetNodeHistogram(0), *maker.quantiser, !use_shared_memory_histograms); DeviceHistogramStorage<>& d_hist = maker.hist; auto node_histogram = d_hist.GetNodeHistogram(0); // d_hist.data stored in float, not gradient pair thrust::host_vector<GradientPairInt64> h_result (node_histogram.size()); dh::safe_cuda(hipMemcpy(h_result.data(), node_histogram.data(), node_histogram.size_bytes(), hipMemcpyDeviceToHost)); std::vector<GradientPairPrecise> solution = GetHostHistGpair(); for (size_t i = 0; i < h_result.size(); ++i) { auto result = maker.quantiser->ToFloatingPoint(h_result[i]); EXPECT_NEAR(result.GetGrad(), solution[i].GetGrad(), 0.01f); EXPECT_NEAR(result.GetHess(), solution[i].GetHess(), 0.01f); } } TEST(GpuHist, BuildHistGlobalMem) { TestBuildHist<GradientPairPrecise>(false); } TEST(GpuHist, BuildHistSharedMem) { TestBuildHist<GradientPairPrecise>(true); } HistogramCutsWrapper GetHostCutMatrix () { HistogramCutsWrapper cmat; cmat.SetPtrs({0, 3, 6, 9, 12, 15, 18, 21, 24}); cmat.SetMins({0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f}); // 24 cut fields, 3 cut fields for each feature (column). // Each row of the cut represents the cuts for a data column. cmat.SetValues({0.30f, 0.67f, 1.64f, 0.32f, 0.77f, 1.95f, 0.29f, 0.70f, 1.80f, 0.32f, 0.75f, 1.85f, 0.18f, 0.59f, 1.69f, 0.25f, 0.74f, 2.00f, 0.26f, 0.74f, 1.98f, 0.26f, 0.71f, 1.83f}); return cmat; } inline GradientQuantiser DummyRoundingFactor() { thrust::device_vector<GradientPair> gpair(1); gpair[0] = {1000.f, 1000.f}; // Tests should not exceed sum of 1000 return GradientQuantiser(dh::ToSpan(gpair)); } void TestHistogramIndexImpl() { // Test if the compressed histogram index matches when using a sparse // dmatrix with and without using external memory int constexpr kNRows = 1000, kNCols = 10; // Build 2 matrices and build a histogram maker with that Context ctx(CreateEmptyGenericParam(0)); tree::GPUHistMaker hist_maker{&ctx, ObjInfo{ObjInfo::kRegression}}, hist_maker_ext{&ctx, ObjInfo{ObjInfo::kRegression}}; std::unique_ptr<DMatrix> hist_maker_dmat( CreateSparsePageDMatrixWithRC(kNRows, kNCols, 0, true)); dmlc::TemporaryDirectory tempdir; std::unique_ptr<DMatrix> hist_maker_ext_dmat( CreateSparsePageDMatrixWithRC(kNRows, kNCols, 128UL, true, tempdir)); std::vector<std::pair<std::string, std::string>> training_params = { {"max_depth", "10"}, {"max_leaves", "0"} }; hist_maker.Configure(training_params); hist_maker.InitDataOnce(hist_maker_dmat.get()); hist_maker_ext.Configure(training_params); hist_maker_ext.InitDataOnce(hist_maker_ext_dmat.get()); // Extract the device maker from the histogram makers and from that its compressed // histogram index const auto &maker = hist_maker.maker; auto grad = GenerateRandomGradients(kNRows); grad.SetDevice(0); maker->Reset(&grad, hist_maker_dmat.get(), kNCols); std::vector<common::CompressedByteT> h_gidx_buffer(maker->page->gidx_buffer.HostVector()); const auto &maker_ext = hist_maker_ext.maker; maker_ext->Reset(&grad, hist_maker_ext_dmat.get(), kNCols); std::vector<common::CompressedByteT> h_gidx_buffer_ext(maker_ext->page->gidx_buffer.HostVector()); ASSERT_EQ(maker->page->Cuts().TotalBins(), maker_ext->page->Cuts().TotalBins()); ASSERT_EQ(maker->page->gidx_buffer.Size(), maker_ext->page->gidx_buffer.Size()); } TEST(GpuHist, TestHistogramIndex) { TestHistogramIndexImpl(); } void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat, size_t gpu_page_size, RegTree* tree, HostDeviceVector<bst_float>* preds, float subsample = 1.0f, const std::string& sampling_method = "uniform", int max_bin = 2) { if (gpu_page_size > 0) { // Loop over the batches and count the records int64_t batch_count = 0; int64_t row_count = 0; for (const auto& batch : dmat->GetBatches<EllpackPage>({0, max_bin})) { EXPECT_LT(batch.Size(), dmat->Info().num_row_); batch_count++; row_count += batch.Size(); } EXPECT_GE(batch_count, 2); EXPECT_EQ(row_count, dmat->Info().num_row_); } Args args{ {"max_depth", "2"}, {"max_bin", std::to_string(max_bin)}, {"min_child_weight", "0.0"}, {"reg_alpha", "0"}, {"reg_lambda", "0"}, {"subsample", std::to_string(subsample)}, {"sampling_method", sampling_method}, }; Context ctx(CreateEmptyGenericParam(0)); tree::GPUHistMaker hist_maker{&ctx,ObjInfo{ObjInfo::kRegression}}; hist_maker.Configure(args); std::vector<HostDeviceVector<bst_node_t>> position(1); hist_maker.Update(gpair, dmat, common::Span<HostDeviceVector<bst_node_t>>{position}, {tree}); auto cache = linalg::VectorView<float>{preds->DeviceSpan(), {preds->Size()}, 0}; hist_maker.UpdatePredictionCache(dmat, cache); } TEST(GpuHist, UniformSampling) { constexpr size_t kRows = 4096; constexpr size_t kCols = 2; constexpr float kSubsample = 0.9999; common::GlobalRandom().seed(1994); // Create an in-memory DMatrix. std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true)); auto gpair = GenerateRandomGradients(kRows); // Build a tree using the in-memory DMatrix. RegTree tree; HostDeviceVector<bst_float> preds(kRows, 0.0, 0); UpdateTree(&gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows); // Build another tree using sampling. RegTree tree_sampling; HostDeviceVector<bst_float> preds_sampling(kRows, 0.0, 0); UpdateTree(&gpair, dmat.get(), 0, &tree_sampling, &preds_sampling, kSubsample, "uniform", kRows); // Make sure the predictions are the same. auto preds_h = preds.ConstHostVector(); auto preds_sampling_h = preds_sampling.ConstHostVector(); for (size_t i = 0; i < kRows; i++) { EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-8); } } TEST(GpuHist, GradientBasedSampling) { constexpr size_t kRows = 4096; constexpr size_t kCols = 2; constexpr float kSubsample = 0.9999; common::GlobalRandom().seed(1994); // Create an in-memory DMatrix. std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true)); auto gpair = GenerateRandomGradients(kRows); // Build a tree using the in-memory DMatrix. RegTree tree; HostDeviceVector<bst_float> preds(kRows, 0.0, 0); UpdateTree(&gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows); // Build another tree using sampling. RegTree tree_sampling; HostDeviceVector<bst_float> preds_sampling(kRows, 0.0, 0); UpdateTree(&gpair, dmat.get(), 0, &tree_sampling, &preds_sampling, kSubsample, "gradient_based", kRows); // Make sure the predictions are the same. auto preds_h = preds.ConstHostVector(); auto preds_sampling_h = preds_sampling.ConstHostVector(); for (size_t i = 0; i < kRows; i++) { EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-3); } } TEST(GpuHist, ExternalMemory) { constexpr size_t kRows = 4096; constexpr size_t kCols = 2; constexpr size_t kPageSize = 1024; dmlc::TemporaryDirectory tmpdir; // Create a DMatrix with multiple batches. std::unique_ptr<DMatrix> dmat_ext( CreateSparsePageDMatrix(kRows, kCols, kRows / kPageSize, tmpdir.path + "/cache")); // Create a single batch DMatrix. std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrix(kRows, kCols, 1, tmpdir.path + "/cache")); auto gpair = GenerateRandomGradients(kRows); // Build a tree using the in-memory DMatrix. RegTree tree; HostDeviceVector<bst_float> preds(kRows, 0.0, 0); UpdateTree(&gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows); // Build another tree using multiple ELLPACK pages. RegTree tree_ext; HostDeviceVector<bst_float> preds_ext(kRows, 0.0, 0); UpdateTree(&gpair, dmat_ext.get(), kPageSize, &tree_ext, &preds_ext, 1.0, "uniform", kRows); // Make sure the predictions are the same. auto preds_h = preds.ConstHostVector(); auto preds_ext_h = preds_ext.ConstHostVector(); for (size_t i = 0; i < kRows; i++) { EXPECT_NEAR(preds_h[i], preds_ext_h[i], 1e-6); } } TEST(GpuHist, ExternalMemoryWithSampling) { constexpr size_t kRows = 4096; constexpr size_t kCols = 2; constexpr size_t kPageSize = 1024; constexpr float kSubsample = 0.5; const std::string kSamplingMethod = "gradient_based"; common::GlobalRandom().seed(0); dmlc::TemporaryDirectory tmpdir; // Create a single batch DMatrix. std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrix(kRows, kCols, 1, tmpdir.path + "/cache")); // Create a DMatrix with multiple batches. std::unique_ptr<DMatrix> dmat_ext( CreateSparsePageDMatrix(kRows, kCols, kRows / kPageSize, tmpdir.path + "/cache")); auto gpair = GenerateRandomGradients(kRows); // Build a tree using the in-memory DMatrix. auto rng = common::GlobalRandom(); RegTree tree; HostDeviceVector<bst_float> preds(kRows, 0.0, 0); UpdateTree(&gpair, dmat.get(), 0, &tree, &preds, kSubsample, kSamplingMethod, kRows); // Build another tree using multiple ELLPACK pages. common::GlobalRandom() = rng; RegTree tree_ext; HostDeviceVector<bst_float> preds_ext(kRows, 0.0, 0); UpdateTree(&gpair, dmat_ext.get(), kPageSize, &tree_ext, &preds_ext, kSubsample, kSamplingMethod, kRows); // Make sure the predictions are the same. auto preds_h = preds.ConstHostVector(); auto preds_ext_h = preds_ext.ConstHostVector(); for (size_t i = 0; i < kRows; i++) { ASSERT_NEAR(preds_h[i], preds_ext_h[i], 1e-3); } } TEST(GpuHist, ConfigIO) { Context ctx(CreateEmptyGenericParam(0)); std::unique_ptr<TreeUpdater> updater{ TreeUpdater::Create("grow_gpu_hist", &ctx, ObjInfo{ObjInfo::kRegression})}; updater->Configure(Args{}); Json j_updater { Object() }; updater->SaveConfig(&j_updater); ASSERT_TRUE(IsA<Object>(j_updater["gpu_hist_train_param"])); ASSERT_TRUE(IsA<Object>(j_updater["train_param"])); updater->LoadConfig(j_updater); Json j_updater_roundtrip { Object() }; updater->SaveConfig(&j_updater_roundtrip); ASSERT_TRUE(IsA<Object>(j_updater_roundtrip["gpu_hist_train_param"])); ASSERT_TRUE(IsA<Object>(j_updater_roundtrip["train_param"])); ASSERT_EQ(j_updater, j_updater_roundtrip); } TEST(GpuHist, MaxDepth) { Context ctx(CreateEmptyGenericParam(0)); size_t constexpr kRows = 16; size_t constexpr kCols = 4; auto p_mat = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix(); auto learner = std::unique_ptr<Learner>(Learner::Create({p_mat})); learner->SetParam("max_depth", "32"); learner->Configure(); ASSERT_THROW({learner->UpdateOneIter(0, p_mat);}, dmlc::Error); } } // namespace tree } // namespace xgboost
2abab553134fbac6ce195177d09d716d261ff143.cu
/*! * Copyright 2017-2022 XGBoost contributors */ #include <gtest/gtest.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <xgboost/base.h> #include <random> #include <string> #include <vector> #include "../../../src/common/common.h" #include "../../../src/data/sparse_page_source.h" #include "../../../src/tree/constraints.cuh" #include "../../../src/tree/updater_gpu_common.cuh" #include "../../../src/tree/updater_gpu_hist.cu" #include "../filesystem.h" // dmlc::TemporaryDirectory #include "../helpers.h" #include "../histogram_helpers.h" #include "xgboost/context.h" #include "xgboost/json.h" namespace xgboost { namespace tree { TEST(GpuHist, DeviceHistogram) { // Ensures that node allocates correctly after reaching `kStopGrowingSize`. dh::safe_cuda(cudaSetDevice(0)); constexpr size_t kNBins = 128; constexpr int kNNodes = 4; constexpr size_t kStopGrowing = kNNodes * kNBins * 2u; DeviceHistogramStorage<kStopGrowing> histogram; histogram.Init(0, kNBins); for (int i = 0; i < kNNodes; ++i) { histogram.AllocateHistograms({i}); } histogram.Reset(); ASSERT_EQ(histogram.Data().size(), kStopGrowing); // Use allocated memory but do not erase nidx_map. for (int i = 0; i < kNNodes; ++i) { histogram.AllocateHistograms({i}); } for (int i = 0; i < kNNodes; ++i) { ASSERT_TRUE(histogram.HistogramExists(i)); } // Add two new nodes histogram.AllocateHistograms({kNNodes}); histogram.AllocateHistograms({kNNodes + 1}); // Old cached nodes should still exist for (int i = 0; i < kNNodes; ++i) { ASSERT_TRUE(histogram.HistogramExists(i)); } // Should be deleted ASSERT_FALSE(histogram.HistogramExists(kNNodes)); // Most recent node should exist ASSERT_TRUE(histogram.HistogramExists(kNNodes + 1)); // Add same node again - should fail EXPECT_ANY_THROW(histogram.AllocateHistograms({kNNodes + 1});); } std::vector<GradientPairPrecise> GetHostHistGpair() { // 24 bins, 3 bins for each feature (column). std::vector<GradientPairPrecise> hist_gpair = { {0.8314f, 0.7147f}, {1.7989f, 3.7312f}, {3.3846f, 3.4598f}, {2.9277f, 3.5886f}, {1.8429f, 2.4152f}, {1.2443f, 1.9019f}, {1.6380f, 2.9174f}, {1.5657f, 2.5107f}, {2.8111f, 2.4776f}, {2.1322f, 3.0651f}, {3.2927f, 3.8540f}, {0.5899f, 0.9866f}, {1.5185f, 1.6263f}, {2.0686f, 3.1844f}, {2.4278f, 3.0950f}, {1.5105f, 2.1403f}, {2.6922f, 4.2217f}, {1.8122f, 1.5437f}, {0.0000f, 0.0000f}, {4.3245f, 5.7955f}, {1.6903f, 2.1103f}, {2.4012f, 4.4754f}, {3.6136f, 3.4303f}, {0.0000f, 0.0000f} }; return hist_gpair; } template <typename GradientSumT> void TestBuildHist(bool use_shared_memory_histograms) { int const kNRows = 16, kNCols = 8; TrainParam param; std::vector<std::pair<std::string, std::string>> args { {"max_depth", "6"}, {"max_leaves", "0"}, }; param.Init(args); auto page = BuildEllpackPage(kNRows, kNCols); BatchParam batch_param{}; Context ctx{CreateEmptyGenericParam(0)}; GPUHistMakerDevice<GradientSumT> maker(&ctx, page.get(), {}, kNRows, param, kNCols, kNCols, batch_param); xgboost::SimpleLCG gen; xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f); HostDeviceVector<GradientPair> gpair(kNRows); for (auto &gp : gpair.HostVector()) { bst_float grad = dist(&gen); bst_float hess = dist(&gen); gp = GradientPair(grad, hess); } gpair.SetDevice(0); thrust::host_vector<common::CompressedByteT> h_gidx_buffer (page->gidx_buffer.HostVector()); maker.row_partitioner.reset(new RowPartitioner(0, kNRows)); maker.hist.AllocateHistograms({0}); maker.gpair = gpair.DeviceSpan(); maker.quantiser.reset(new GradientQuantiser(maker.gpair)); BuildGradientHistogram(ctx.CUDACtx(), page->GetDeviceAccessor(0), maker.feature_groups->DeviceAccessor(0), gpair.DeviceSpan(), maker.row_partitioner->GetRows(0), maker.hist.GetNodeHistogram(0), *maker.quantiser, !use_shared_memory_histograms); DeviceHistogramStorage<>& d_hist = maker.hist; auto node_histogram = d_hist.GetNodeHistogram(0); // d_hist.data stored in float, not gradient pair thrust::host_vector<GradientPairInt64> h_result (node_histogram.size()); dh::safe_cuda(cudaMemcpy(h_result.data(), node_histogram.data(), node_histogram.size_bytes(), cudaMemcpyDeviceToHost)); std::vector<GradientPairPrecise> solution = GetHostHistGpair(); for (size_t i = 0; i < h_result.size(); ++i) { auto result = maker.quantiser->ToFloatingPoint(h_result[i]); EXPECT_NEAR(result.GetGrad(), solution[i].GetGrad(), 0.01f); EXPECT_NEAR(result.GetHess(), solution[i].GetHess(), 0.01f); } } TEST(GpuHist, BuildHistGlobalMem) { TestBuildHist<GradientPairPrecise>(false); } TEST(GpuHist, BuildHistSharedMem) { TestBuildHist<GradientPairPrecise>(true); } HistogramCutsWrapper GetHostCutMatrix () { HistogramCutsWrapper cmat; cmat.SetPtrs({0, 3, 6, 9, 12, 15, 18, 21, 24}); cmat.SetMins({0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f}); // 24 cut fields, 3 cut fields for each feature (column). // Each row of the cut represents the cuts for a data column. cmat.SetValues({0.30f, 0.67f, 1.64f, 0.32f, 0.77f, 1.95f, 0.29f, 0.70f, 1.80f, 0.32f, 0.75f, 1.85f, 0.18f, 0.59f, 1.69f, 0.25f, 0.74f, 2.00f, 0.26f, 0.74f, 1.98f, 0.26f, 0.71f, 1.83f}); return cmat; } inline GradientQuantiser DummyRoundingFactor() { thrust::device_vector<GradientPair> gpair(1); gpair[0] = {1000.f, 1000.f}; // Tests should not exceed sum of 1000 return GradientQuantiser(dh::ToSpan(gpair)); } void TestHistogramIndexImpl() { // Test if the compressed histogram index matches when using a sparse // dmatrix with and without using external memory int constexpr kNRows = 1000, kNCols = 10; // Build 2 matrices and build a histogram maker with that Context ctx(CreateEmptyGenericParam(0)); tree::GPUHistMaker hist_maker{&ctx, ObjInfo{ObjInfo::kRegression}}, hist_maker_ext{&ctx, ObjInfo{ObjInfo::kRegression}}; std::unique_ptr<DMatrix> hist_maker_dmat( CreateSparsePageDMatrixWithRC(kNRows, kNCols, 0, true)); dmlc::TemporaryDirectory tempdir; std::unique_ptr<DMatrix> hist_maker_ext_dmat( CreateSparsePageDMatrixWithRC(kNRows, kNCols, 128UL, true, tempdir)); std::vector<std::pair<std::string, std::string>> training_params = { {"max_depth", "10"}, {"max_leaves", "0"} }; hist_maker.Configure(training_params); hist_maker.InitDataOnce(hist_maker_dmat.get()); hist_maker_ext.Configure(training_params); hist_maker_ext.InitDataOnce(hist_maker_ext_dmat.get()); // Extract the device maker from the histogram makers and from that its compressed // histogram index const auto &maker = hist_maker.maker; auto grad = GenerateRandomGradients(kNRows); grad.SetDevice(0); maker->Reset(&grad, hist_maker_dmat.get(), kNCols); std::vector<common::CompressedByteT> h_gidx_buffer(maker->page->gidx_buffer.HostVector()); const auto &maker_ext = hist_maker_ext.maker; maker_ext->Reset(&grad, hist_maker_ext_dmat.get(), kNCols); std::vector<common::CompressedByteT> h_gidx_buffer_ext(maker_ext->page->gidx_buffer.HostVector()); ASSERT_EQ(maker->page->Cuts().TotalBins(), maker_ext->page->Cuts().TotalBins()); ASSERT_EQ(maker->page->gidx_buffer.Size(), maker_ext->page->gidx_buffer.Size()); } TEST(GpuHist, TestHistogramIndex) { TestHistogramIndexImpl(); } void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat, size_t gpu_page_size, RegTree* tree, HostDeviceVector<bst_float>* preds, float subsample = 1.0f, const std::string& sampling_method = "uniform", int max_bin = 2) { if (gpu_page_size > 0) { // Loop over the batches and count the records int64_t batch_count = 0; int64_t row_count = 0; for (const auto& batch : dmat->GetBatches<EllpackPage>({0, max_bin})) { EXPECT_LT(batch.Size(), dmat->Info().num_row_); batch_count++; row_count += batch.Size(); } EXPECT_GE(batch_count, 2); EXPECT_EQ(row_count, dmat->Info().num_row_); } Args args{ {"max_depth", "2"}, {"max_bin", std::to_string(max_bin)}, {"min_child_weight", "0.0"}, {"reg_alpha", "0"}, {"reg_lambda", "0"}, {"subsample", std::to_string(subsample)}, {"sampling_method", sampling_method}, }; Context ctx(CreateEmptyGenericParam(0)); tree::GPUHistMaker hist_maker{&ctx,ObjInfo{ObjInfo::kRegression}}; hist_maker.Configure(args); std::vector<HostDeviceVector<bst_node_t>> position(1); hist_maker.Update(gpair, dmat, common::Span<HostDeviceVector<bst_node_t>>{position}, {tree}); auto cache = linalg::VectorView<float>{preds->DeviceSpan(), {preds->Size()}, 0}; hist_maker.UpdatePredictionCache(dmat, cache); } TEST(GpuHist, UniformSampling) { constexpr size_t kRows = 4096; constexpr size_t kCols = 2; constexpr float kSubsample = 0.9999; common::GlobalRandom().seed(1994); // Create an in-memory DMatrix. std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true)); auto gpair = GenerateRandomGradients(kRows); // Build a tree using the in-memory DMatrix. RegTree tree; HostDeviceVector<bst_float> preds(kRows, 0.0, 0); UpdateTree(&gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows); // Build another tree using sampling. RegTree tree_sampling; HostDeviceVector<bst_float> preds_sampling(kRows, 0.0, 0); UpdateTree(&gpair, dmat.get(), 0, &tree_sampling, &preds_sampling, kSubsample, "uniform", kRows); // Make sure the predictions are the same. auto preds_h = preds.ConstHostVector(); auto preds_sampling_h = preds_sampling.ConstHostVector(); for (size_t i = 0; i < kRows; i++) { EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-8); } } TEST(GpuHist, GradientBasedSampling) { constexpr size_t kRows = 4096; constexpr size_t kCols = 2; constexpr float kSubsample = 0.9999; common::GlobalRandom().seed(1994); // Create an in-memory DMatrix. std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true)); auto gpair = GenerateRandomGradients(kRows); // Build a tree using the in-memory DMatrix. RegTree tree; HostDeviceVector<bst_float> preds(kRows, 0.0, 0); UpdateTree(&gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows); // Build another tree using sampling. RegTree tree_sampling; HostDeviceVector<bst_float> preds_sampling(kRows, 0.0, 0); UpdateTree(&gpair, dmat.get(), 0, &tree_sampling, &preds_sampling, kSubsample, "gradient_based", kRows); // Make sure the predictions are the same. auto preds_h = preds.ConstHostVector(); auto preds_sampling_h = preds_sampling.ConstHostVector(); for (size_t i = 0; i < kRows; i++) { EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-3); } } TEST(GpuHist, ExternalMemory) { constexpr size_t kRows = 4096; constexpr size_t kCols = 2; constexpr size_t kPageSize = 1024; dmlc::TemporaryDirectory tmpdir; // Create a DMatrix with multiple batches. std::unique_ptr<DMatrix> dmat_ext( CreateSparsePageDMatrix(kRows, kCols, kRows / kPageSize, tmpdir.path + "/cache")); // Create a single batch DMatrix. std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrix(kRows, kCols, 1, tmpdir.path + "/cache")); auto gpair = GenerateRandomGradients(kRows); // Build a tree using the in-memory DMatrix. RegTree tree; HostDeviceVector<bst_float> preds(kRows, 0.0, 0); UpdateTree(&gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows); // Build another tree using multiple ELLPACK pages. RegTree tree_ext; HostDeviceVector<bst_float> preds_ext(kRows, 0.0, 0); UpdateTree(&gpair, dmat_ext.get(), kPageSize, &tree_ext, &preds_ext, 1.0, "uniform", kRows); // Make sure the predictions are the same. auto preds_h = preds.ConstHostVector(); auto preds_ext_h = preds_ext.ConstHostVector(); for (size_t i = 0; i < kRows; i++) { EXPECT_NEAR(preds_h[i], preds_ext_h[i], 1e-6); } } TEST(GpuHist, ExternalMemoryWithSampling) { constexpr size_t kRows = 4096; constexpr size_t kCols = 2; constexpr size_t kPageSize = 1024; constexpr float kSubsample = 0.5; const std::string kSamplingMethod = "gradient_based"; common::GlobalRandom().seed(0); dmlc::TemporaryDirectory tmpdir; // Create a single batch DMatrix. std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrix(kRows, kCols, 1, tmpdir.path + "/cache")); // Create a DMatrix with multiple batches. std::unique_ptr<DMatrix> dmat_ext( CreateSparsePageDMatrix(kRows, kCols, kRows / kPageSize, tmpdir.path + "/cache")); auto gpair = GenerateRandomGradients(kRows); // Build a tree using the in-memory DMatrix. auto rng = common::GlobalRandom(); RegTree tree; HostDeviceVector<bst_float> preds(kRows, 0.0, 0); UpdateTree(&gpair, dmat.get(), 0, &tree, &preds, kSubsample, kSamplingMethod, kRows); // Build another tree using multiple ELLPACK pages. common::GlobalRandom() = rng; RegTree tree_ext; HostDeviceVector<bst_float> preds_ext(kRows, 0.0, 0); UpdateTree(&gpair, dmat_ext.get(), kPageSize, &tree_ext, &preds_ext, kSubsample, kSamplingMethod, kRows); // Make sure the predictions are the same. auto preds_h = preds.ConstHostVector(); auto preds_ext_h = preds_ext.ConstHostVector(); for (size_t i = 0; i < kRows; i++) { ASSERT_NEAR(preds_h[i], preds_ext_h[i], 1e-3); } } TEST(GpuHist, ConfigIO) { Context ctx(CreateEmptyGenericParam(0)); std::unique_ptr<TreeUpdater> updater{ TreeUpdater::Create("grow_gpu_hist", &ctx, ObjInfo{ObjInfo::kRegression})}; updater->Configure(Args{}); Json j_updater { Object() }; updater->SaveConfig(&j_updater); ASSERT_TRUE(IsA<Object>(j_updater["gpu_hist_train_param"])); ASSERT_TRUE(IsA<Object>(j_updater["train_param"])); updater->LoadConfig(j_updater); Json j_updater_roundtrip { Object() }; updater->SaveConfig(&j_updater_roundtrip); ASSERT_TRUE(IsA<Object>(j_updater_roundtrip["gpu_hist_train_param"])); ASSERT_TRUE(IsA<Object>(j_updater_roundtrip["train_param"])); ASSERT_EQ(j_updater, j_updater_roundtrip); } TEST(GpuHist, MaxDepth) { Context ctx(CreateEmptyGenericParam(0)); size_t constexpr kRows = 16; size_t constexpr kCols = 4; auto p_mat = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix(); auto learner = std::unique_ptr<Learner>(Learner::Create({p_mat})); learner->SetParam("max_depth", "32"); learner->Configure(); ASSERT_THROW({learner->UpdateOneIter(0, p_mat);}, dmlc::Error); } } // namespace tree } // namespace xgboost
75ef7a83abfbb7df542fc2a3ce12d1b4c673a9e0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2016 Athanassios Kintsakis. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Author: Athanassios Kintsakis * contact: [email protected] */ #include <stdio.h> #include <stdlib.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <time.h> /* WNN and PSO parameters that also affect CUDA shared memory size */ #define particle_num 1024 #define inputs 24 //how many previous sequential inputs should be used to forecast #define hidd_neurons 2 #define particle_dimension ((3*inputs+1)*hidd_neurons) #define threadnum 256 //threadnum multiplied by hours_each equals size of training set #define hours_each 4 #define off 8 //how many values forward to predict, a value of 0 means 1 value forward, a value of 2 means 2 values forward etc #define training_time_hours (inputs+hours_each*threadnum+off) /* */ /* Calculate Fitness Function for all particles Kernel */ __global__ void calculate_particle_fitnesses_kernel(float* particles, float* lbest, float* set, float* particle_fitness) { int start_set = inputs + threadIdx.x*hours_each; int end_set = start_set + hours_each; __shared__ float particle_data[particle_dimension]; __shared__ float training_set[training_time_hours]; __shared__ float thread_error[threadnum]; __shared__ float total_thread_error; __shared__ int signal_value; thread_error[threadIdx.x] = 0; int i; if (particle_dimension < threadnum) { particle_data[threadIdx.x] = particles[blockIdx.x * particle_dimension + threadIdx.x]; } else { int particle_dimension_each = particle_dimension / threadnum; int extras = particle_dimension % threadnum; for (i = 0; i < particle_dimension_each; i++) { particle_data[threadIdx.x * particle_dimension_each + i] = particles[blockIdx.x * particle_dimension + threadIdx.x * particle_dimension_each + i]; } if (threadIdx.x == 0 && extras > 0) { for (i = 0; i < extras; i++) { particle_data[threadnum * particle_dimension_each + i] = particles[blockIdx.x * particle_dimension + threadnum * particle_dimension_each + i]; } } } __syncthreads(); if (training_time_hours < threadnum) { training_set[threadIdx.x] = particles[threadIdx.x]; } else { int training_time_hours_each = training_time_hours / threadnum; int training_time_hours_extras = training_time_hours % threadnum; for (i = 0; i < training_time_hours_each; i++) { training_set[i * threadnum + threadIdx.x] = set[i * threadnum + threadIdx.x]; } if (threadIdx.x == 0 && training_time_hours_extras > 0) { for (i = 0; i < training_time_hours_extras; i++) { training_set[threadnum * training_time_hours_each + i] = set[threadnum * training_time_hours_each + i]; } } } __syncthreads(); int n_offset = 3*inputs+1; for (int training_set_position = start_set; training_set_position < end_set; training_set_position++) { int input_scale_back = inputs; float llwnn_output = 0; for (int j = 0; j < hidd_neurons; j++) { float linear_factor = 0; input_scale_back = inputs; for (int k = 0; k < inputs; k++) { linear_factor = linear_factor + particle_data[ j * n_offset + k + 1 ] * training_set[training_set_position + k - input_scale_back]; } linear_factor = linear_factor + particle_data[ j * n_offset + 0 ]; float total_wavelet_factor = 0; for(int k = 0; k < inputs; k++) { float a = (float)abs(particle_data[ j * n_offset + inputs + 1 ]); if (a == 0) { a = 0.00000000001f; } float b = particle_data[ j * n_offset + inputs + inputs + 1 ]; float x = training_set[training_set_position + k - input_scale_back]; float in = (x - b) / (a); total_wavelet_factor = total_wavelet_factor + (float) pow(a, -0.5f) *((-(in * in) / 2.0f) * (float) exp(-(in * in) / 2.0f)); } llwnn_output = llwnn_output + linear_factor * total_wavelet_factor; } thread_error[threadIdx.x] = thread_error[threadIdx.x]+(training_set[training_set_position + off] - llwnn_output)*(training_set[training_set_position + off] - llwnn_output); } atomicAdd(&total_thread_error, thread_error[threadIdx.x]); __syncthreads(); if (threadIdx.x == 0) { signal_value = 0; float local_fitness = sqrt(total_thread_error / (float) (threadnum * hours_each)); if (local_fitness < particle_fitness[blockIdx.x]) { particle_fitness[blockIdx.x] = local_fitness; signal_value = 1; } } __syncthreads(); if (threadIdx.x < particle_dimension && signal_value == 1) { lbest[blockIdx.x * particle_dimension + threadIdx.x] = particle_data[threadIdx.x]; } } /* Find gbest particle Kernel */ __global__ void find_gbest_kernel(float *particle_best_positions, float *particle_fitnesses, float *gbest, float *current_gbest_fitness) { __shared__ float particle_mins[particle_num]; __shared__ int particle_mins_pos[particle_num]; __shared__ int signal_value; int thr = particle_num / 2; particle_mins[threadIdx.x] = particle_fitnesses[threadIdx.x]; particle_mins[thr + threadIdx.x] = particle_fitnesses[thr + threadIdx.x]; particle_mins_pos[threadIdx.x] = threadIdx.x; particle_mins_pos[thr + threadIdx.x] = thr + threadIdx.x; while (thr >= 1) { if (threadIdx.x < particle_num) { if (particle_mins[threadIdx.x] > particle_mins[thr + threadIdx.x]) { particle_mins[threadIdx.x] = particle_mins[threadIdx.x + thr]; particle_mins_pos[threadIdx.x] = particle_mins_pos[threadIdx.x + thr]; } } thr = thr / 2; __syncthreads(); } __syncthreads(); if (threadIdx.x == 0) { signal_value = 0; if (particle_mins[0] < current_gbest_fitness[0]) { current_gbest_fitness[0] = particle_mins[0]; signal_value = 1; //!!!! Disable the print for normal usage !!!! Use only for debug //printf("fitness: %f \n", current_gbest_fitness[0]); } } __syncthreads(); if (threadIdx.x < particle_dimension && signal_value == 1) { gbest[threadIdx.x] = particle_best_positions[particle_mins_pos[0] * particle_dimension + threadIdx.x]; } } /* Mark the bottom 25% particles, those with the highest fitness values Kernel */ __global__ void mark_worst_kernel(float *particle_fitnesses, float *particle_keep) { __shared__ float particle_mins[particle_num]; __shared__ int particle_mins_pos[particle_num]; int thr = particle_num / 2; particle_mins[threadIdx.x] = particle_fitnesses[threadIdx.x]; particle_mins[threadIdx.x + thr] = particle_fitnesses[thr + threadIdx.x]; particle_mins_pos[threadIdx.x] = threadIdx.x; particle_mins_pos[threadIdx.x + thr] = thr + threadIdx.x; while (thr >= particle_num / 4) { if (threadIdx.x < particle_num) { if (particle_mins[threadIdx.x] < particle_mins[threadIdx.x + thr]) { particle_mins[threadIdx.x] = particle_mins[threadIdx.x + thr]; particle_mins_pos[threadIdx.x] = particle_mins_pos[threadIdx.x + thr]; } } thr = thr / 2; __syncthreads(); } __syncthreads(); if (threadIdx.x < particle_num / 4) { particle_keep[particle_mins_pos[threadIdx.x]] = 1; } } /* Update positions of all particles Kernel */ __global__ void update_positions_kernel(float *particle_positions, float *particle_velocities, float *particle_lbest, float *particle_gbest, float *particle_keep, hiprandState_t* globalState, int k, int iterations_total, float b) //, hiprandState_t *my_curandstate) { hiprandState_t localState = globalState[blockIdx.x * blockDim.x + threadIdx.x]; int particle_dimension1 = particle_dimension; float Dmax1 = 1000000.0f; float Xmax1 = 1000000.0f; if (particle_keep[blockIdx.x] == 0) { float factor1 = (float) hiprand_uniform(&localState) * 2.0f; float factor2 = (float) hiprand_uniform(&localState) * 2.0f; globalState[blockIdx.x * blockDim.x + threadIdx.x] = localState; particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x] = 0.729f * particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x] + (particle_lbest[blockIdx.x * particle_dimension1 + threadIdx.x] - particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x]) * factor1 + (particle_gbest[threadIdx.x] - particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x]) * factor2; if (particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x] > Dmax1) { particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x] = -Dmax1; } else if (particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x] < -Dmax1) { particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x] = +Dmax1; } particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] = particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] + particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x]; if (particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] > Xmax1) { particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] = Xmax1; } else if (particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] <= -Xmax1) { particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] = -Xmax1; } } else { float q = 0.5f; float temp2 = (float) hiprand_uniform(&localState); float temp1 = (float) hiprand_uniform(&localState); float temp3 = (float) hiprand_uniform(&localState); if (k > iterations_total / 3) { if (temp3 <= 0.5f) { particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] = (float) (particle_gbest[threadIdx.x] + (float) hiprand_uniform(&localState) * 500.0f * (float) (k / iterations_total)); particle_lbest[blockIdx.x * particle_dimension1 + threadIdx.x] = particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x]; } else { particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] = (float) (particle_gbest[threadIdx.x] - (float) hiprand_uniform(&localState) * 500.0f * (float) (k / iterations_total)); particle_lbest[blockIdx.x * particle_dimension1 + threadIdx.x] = particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x]; } } globalState[blockIdx.x * blockDim.x + threadIdx.x] = localState; if (temp2 <= 0.5f) { particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x] = (float) ((1.0f / b) * (float) log(temp1 / (1.0f - q))); } else { particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x] = (float) (-(1.0f / b) * (float) log((1.0f - temp1) / q)); } particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x] = particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x] * 500.0f; particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] = particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] + particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x]; if (particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] > Xmax1) { particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] = Xmax1; } else if (particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] < -Xmax1) { particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] = -Xmax1; } } } /* Init hiprand states Kernel */ __global__ void setup_kernel(hiprandState_t * state, unsigned long seed) { int id = blockIdx.x * blockDim.x + threadIdx.x; hiprand_init(seed, id, 0, &state[id]); } int main(int argc, char **argv) { /* PSO Algorithm training parameters */ int num_iterations = atoi(argv[1]); float Dmax1 = 1000000.0; float b = 0.4; float b0 = 0.05; clock_t t; /* */ if (particle_dimension >= 1024) { printf("invalid particle_num size, maximum value is 1024 and your value was %d. Please reduce number of inputs or hidden neurons. \n", particle_dimension); return 1; } FILE *myFile; myFile = fopen(argv[2], "r"); if (myFile == NULL) { printf("Error opening input file \n"); exit(1); } hiprandState_t* devStates; hipMalloc(&devStates, particle_num * threadnum * sizeof ( hiprandState_t)); //initializes the hiprand kernles setup_kernel << < particle_num, particle_dimension >>> (devStates, (unsigned) time(NULL)); /* Read training set file into array */ float *host_training_set = (float *) malloc((training_time_hours + (training_time_hours * 0.3)) * sizeof (float)); //test set size = 0.3 * training set size //float *host_test_set = (float *) malloc(training_time_hours + training_time_hours * 0.3 * sizeof (float)); if (myFile == NULL) { printf("Error Reading File\n"); exit(0); } int i; for (i = 0; i < training_time_hours + (training_time_hours * 0.3); i++) { fscanf(myFile, "%f,", &host_training_set[i]); } fclose(myFile); /* */ /* Initialiaze variables, cuda mallocs and cuda mem copies */ float *host_gbest_fitness = (float *) malloc(1 * sizeof (float)); host_gbest_fitness[0] = 100000000.0; float *dev_gbest_fitness; hipMalloc(&dev_gbest_fitness, 1 * sizeof (float)); hipMemcpy(dev_gbest_fitness, host_gbest_fitness, 1 * sizeof (float), hipMemcpyHostToDevice); float *host_particles_present_position = (float *) malloc(particle_num * particle_dimension * sizeof (float)); float *host_particles_localbestpos = (float *) malloc(particle_num * particle_dimension * sizeof (float)); float *host_particles_velocity = (float *) malloc(particle_num * particle_dimension * sizeof (float)); //randomize initial weights for (i = 0; i < particle_num * particle_dimension; i++) { host_particles_present_position[i] = ((float) rand() / RAND_MAX) * Dmax1; if ((float) rand() / RAND_MAX < 0.5) { host_particles_present_position[i] = -host_particles_present_position[i]; } host_particles_localbestpos[i] = host_particles_present_position[i]; host_particles_velocity[i] = ((float) rand() / RAND_MAX) * Dmax1; if ((float) rand() / RAND_MAX < 0.5) { host_particles_velocity[i] = -host_particles_velocity[i]; } } int *host_particle_keep = (int *) malloc(particle_num * sizeof (int)); float *host_particles_fitness = (float *) malloc(particle_num * sizeof (float)); float *host_globalbestpos = (float *) malloc(particle_dimension * sizeof (float)); for (i = 0; i < particle_num; i++) { host_particles_fitness[i] = 100000000.0; host_particle_keep[i] = 0; } float *dev_particles_present_position, *dev_particles_localbestpos, *dev_particles_velocity; float *dev_training_set, *dev_particle_keep, *dev_particles_fitness, *dev_globalbestpos; hipMalloc(&dev_particles_present_position, particle_num * particle_dimension * sizeof (float)); hipMalloc(&dev_particles_localbestpos, particle_num * particle_dimension * sizeof (float)); hipMalloc(&dev_particles_velocity, particle_num * particle_dimension * sizeof (float)); hipMalloc(&dev_particles_fitness, particle_num * sizeof (float)); hipMalloc(&dev_training_set, training_time_hours * sizeof (float)); hipMalloc(&dev_globalbestpos, particle_dimension * sizeof (float)); hipMalloc(&dev_particle_keep, particle_num * sizeof (int)); hipMemcpy(dev_particles_present_position, host_particles_present_position, particle_num * particle_dimension * sizeof (float), hipMemcpyHostToDevice); hipMemcpy(dev_particles_localbestpos, host_particles_localbestpos, particle_num * particle_dimension * sizeof (float), hipMemcpyHostToDevice); hipMemcpy(dev_particles_velocity, host_particles_velocity, particle_num * particle_dimension * sizeof (float), hipMemcpyHostToDevice); hipMemcpy(dev_particles_fitness, host_particles_fitness, particle_num * sizeof (float), hipMemcpyHostToDevice); hipMemcpy(dev_training_set, host_training_set, training_time_hours * sizeof (float), hipMemcpyHostToDevice); hipMemcpy(dev_particle_keep, host_particle_keep, particle_num * sizeof (int), hipMemcpyHostToDevice); hipMemcpy(dev_globalbestpos, host_globalbestpos, particle_dimension * sizeof (float), hipMemcpyHostToDevice); /* */ //Training algorithm commences t = clock(); for (int i = 0; i < num_iterations; i++) { calculate_particle_fitnesses_kernel << <particle_num, threadnum>>>(dev_particles_present_position, dev_particles_localbestpos, dev_training_set, dev_particles_fitness); if (i > 1) { b = (((b - b0) * (num_iterations - i)) / num_iterations) + b0; } find_gbest_kernel << <1, particle_num / 2 >> >(dev_particles_localbestpos, dev_particles_fitness, dev_globalbestpos, dev_gbest_fitness); mark_worst_kernel << <1, particle_num / 2 >> >(dev_particles_fitness, dev_particle_keep); update_positions_kernel << <particle_num, particle_dimension>>>(dev_particles_present_position, dev_particles_velocity, dev_particles_localbestpos, dev_globalbestpos, dev_particle_keep, devStates, i, num_iterations, b); //, hiprandState_t *my_curandstate) } hipMemcpy(host_globalbestpos, dev_globalbestpos, particle_dimension * sizeof (float), hipMemcpyDeviceToHost); hipMemcpy(host_gbest_fitness, dev_gbest_fitness, 1 * sizeof (float), hipMemcpyDeviceToHost); hipDeviceSynchronize(); printf("CUDA RMSE on training set: %f \n", host_gbest_fitness[0]); t = clock() - t; printf("Training time took %d clicks (%f seconds).\n", t, ((float) t) / CLOCKS_PER_SEC); //Training algorithm completed //freeing memory hipFree(dev_particles_present_position); hipFree(dev_particles_localbestpos); hipFree(dev_particles_velocity); hipFree(dev_particles_fitness); hipFree(dev_training_set); /* Applying model on test data. By default testdata size is equal to 0.3 * training_data size and is located immediatelly after the training data, in the time series. Change it below with the Start and Stop indexes */ /* Predicted and Actual values are written to file results.txt */ FILE *f = fopen("results.txt", "w"); if (f == NULL) { printf("Error opening file results.txt \n"); exit(1); } int testSetStartPosition = training_time_hours; int testSetStopPosition = training_time_hours + training_time_hours * 0.3 - off; int n_offset = 3*inputs+1; float testSetRMSE = 0; float s = 2.0f; for (int SetCurrentPosition = testSetStartPosition; SetCurrentPosition < testSetStopPosition; SetCurrentPosition++) { float llwnn_output = 0; for (int j = 0; j < hidd_neurons; j++) { float linear_factor = 0; int input_scale_back = inputs; for (int k = 0; k < inputs; k++) { linear_factor = linear_factor + host_globalbestpos[ j * n_offset + k + 1 ] * host_training_set[SetCurrentPosition + k - input_scale_back]; } linear_factor = linear_factor + host_globalbestpos[ j * n_offset + 0 ]; float total_wavelet_factor = 0; for(int k = 0; k < inputs; k++) { float a = (float)abs(host_globalbestpos[ j * n_offset + inputs + 1 ]); if (a == 0) { a = 0.00000000001f; } float b = host_globalbestpos[ j * n_offset + inputs + inputs + 1 ]; float x = host_training_set[SetCurrentPosition + k - input_scale_back]; float in = (x - b) / (a); total_wavelet_factor = total_wavelet_factor + (float) pow(a, -0.5f) *((-(in * in) / 2.0f) * (float) exp(-(in * in) / 2.0f)); } llwnn_output = llwnn_output + linear_factor*total_wavelet_factor; } testSetRMSE = testSetRMSE + (llwnn_output - host_training_set[SetCurrentPosition + off]) * (llwnn_output - host_training_set[SetCurrentPosition + off]); fprintf(f, "%f %f \n",host_training_set[SetCurrentPosition + off], llwnn_output); } fclose(f); testSetRMSE = sqrt(testSetRMSE / (float) (testSetStopPosition - testSetStartPosition)); printf("Calculate RMSE on test set: %f \n", testSetRMSE); printf("\n"); printf("\n"); return 0; }
75ef7a83abfbb7df542fc2a3ce12d1b4c673a9e0.cu
/* * Copyright 2016 Athanassios Kintsakis. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Author: Athanassios Kintsakis * contact: [email protected] */ #include <stdio.h> #include <stdlib.h> #include <curand.h> #include <curand_kernel.h> #include <time.h> /* WNN and PSO parameters that also affect CUDA shared memory size */ #define particle_num 1024 #define inputs 24 //how many previous sequential inputs should be used to forecast #define hidd_neurons 2 #define particle_dimension ((3*inputs+1)*hidd_neurons) #define threadnum 256 //threadnum multiplied by hours_each equals size of training set #define hours_each 4 #define off 8 //how many values forward to predict, a value of 0 means 1 value forward, a value of 2 means 2 values forward etc #define training_time_hours (inputs+hours_each*threadnum+off) /* */ /* Calculate Fitness Function for all particles Kernel */ __global__ void calculate_particle_fitnesses_kernel(float* particles, float* lbest, float* set, float* particle_fitness) { int start_set = inputs + threadIdx.x*hours_each; int end_set = start_set + hours_each; __shared__ float particle_data[particle_dimension]; __shared__ float training_set[training_time_hours]; __shared__ float thread_error[threadnum]; __shared__ float total_thread_error; __shared__ int signal_value; thread_error[threadIdx.x] = 0; int i; if (particle_dimension < threadnum) { particle_data[threadIdx.x] = particles[blockIdx.x * particle_dimension + threadIdx.x]; } else { int particle_dimension_each = particle_dimension / threadnum; int extras = particle_dimension % threadnum; for (i = 0; i < particle_dimension_each; i++) { particle_data[threadIdx.x * particle_dimension_each + i] = particles[blockIdx.x * particle_dimension + threadIdx.x * particle_dimension_each + i]; } if (threadIdx.x == 0 && extras > 0) { for (i = 0; i < extras; i++) { particle_data[threadnum * particle_dimension_each + i] = particles[blockIdx.x * particle_dimension + threadnum * particle_dimension_each + i]; } } } __syncthreads(); if (training_time_hours < threadnum) { training_set[threadIdx.x] = particles[threadIdx.x]; } else { int training_time_hours_each = training_time_hours / threadnum; int training_time_hours_extras = training_time_hours % threadnum; for (i = 0; i < training_time_hours_each; i++) { training_set[i * threadnum + threadIdx.x] = set[i * threadnum + threadIdx.x]; } if (threadIdx.x == 0 && training_time_hours_extras > 0) { for (i = 0; i < training_time_hours_extras; i++) { training_set[threadnum * training_time_hours_each + i] = set[threadnum * training_time_hours_each + i]; } } } __syncthreads(); int n_offset = 3*inputs+1; for (int training_set_position = start_set; training_set_position < end_set; training_set_position++) { int input_scale_back = inputs; float llwnn_output = 0; for (int j = 0; j < hidd_neurons; j++) { float linear_factor = 0; input_scale_back = inputs; for (int k = 0; k < inputs; k++) { linear_factor = linear_factor + particle_data[ j * n_offset + k + 1 ] * training_set[training_set_position + k - input_scale_back]; } linear_factor = linear_factor + particle_data[ j * n_offset + 0 ]; float total_wavelet_factor = 0; for(int k = 0; k < inputs; k++) { float a = (float)abs(particle_data[ j * n_offset + inputs + 1 ]); if (a == 0) { a = 0.00000000001f; } float b = particle_data[ j * n_offset + inputs + inputs + 1 ]; float x = training_set[training_set_position + k - input_scale_back]; float in = (x - b) / (a); total_wavelet_factor = total_wavelet_factor + (float) pow(a, -0.5f) *((-(in * in) / 2.0f) * (float) exp(-(in * in) / 2.0f)); } llwnn_output = llwnn_output + linear_factor * total_wavelet_factor; } thread_error[threadIdx.x] = thread_error[threadIdx.x]+(training_set[training_set_position + off] - llwnn_output)*(training_set[training_set_position + off] - llwnn_output); } atomicAdd(&total_thread_error, thread_error[threadIdx.x]); __syncthreads(); if (threadIdx.x == 0) { signal_value = 0; float local_fitness = sqrt(total_thread_error / (float) (threadnum * hours_each)); if (local_fitness < particle_fitness[blockIdx.x]) { particle_fitness[blockIdx.x] = local_fitness; signal_value = 1; } } __syncthreads(); if (threadIdx.x < particle_dimension && signal_value == 1) { lbest[blockIdx.x * particle_dimension + threadIdx.x] = particle_data[threadIdx.x]; } } /* Find gbest particle Kernel */ __global__ void find_gbest_kernel(float *particle_best_positions, float *particle_fitnesses, float *gbest, float *current_gbest_fitness) { __shared__ float particle_mins[particle_num]; __shared__ int particle_mins_pos[particle_num]; __shared__ int signal_value; int thr = particle_num / 2; particle_mins[threadIdx.x] = particle_fitnesses[threadIdx.x]; particle_mins[thr + threadIdx.x] = particle_fitnesses[thr + threadIdx.x]; particle_mins_pos[threadIdx.x] = threadIdx.x; particle_mins_pos[thr + threadIdx.x] = thr + threadIdx.x; while (thr >= 1) { if (threadIdx.x < particle_num) { if (particle_mins[threadIdx.x] > particle_mins[thr + threadIdx.x]) { particle_mins[threadIdx.x] = particle_mins[threadIdx.x + thr]; particle_mins_pos[threadIdx.x] = particle_mins_pos[threadIdx.x + thr]; } } thr = thr / 2; __syncthreads(); } __syncthreads(); if (threadIdx.x == 0) { signal_value = 0; if (particle_mins[0] < current_gbest_fitness[0]) { current_gbest_fitness[0] = particle_mins[0]; signal_value = 1; //!!!! Disable the print for normal usage !!!! Use only for debug //printf("fitness: %f \n", current_gbest_fitness[0]); } } __syncthreads(); if (threadIdx.x < particle_dimension && signal_value == 1) { gbest[threadIdx.x] = particle_best_positions[particle_mins_pos[0] * particle_dimension + threadIdx.x]; } } /* Mark the bottom 25% particles, those with the highest fitness values Kernel */ __global__ void mark_worst_kernel(float *particle_fitnesses, float *particle_keep) { __shared__ float particle_mins[particle_num]; __shared__ int particle_mins_pos[particle_num]; int thr = particle_num / 2; particle_mins[threadIdx.x] = particle_fitnesses[threadIdx.x]; particle_mins[threadIdx.x + thr] = particle_fitnesses[thr + threadIdx.x]; particle_mins_pos[threadIdx.x] = threadIdx.x; particle_mins_pos[threadIdx.x + thr] = thr + threadIdx.x; while (thr >= particle_num / 4) { if (threadIdx.x < particle_num) { if (particle_mins[threadIdx.x] < particle_mins[threadIdx.x + thr]) { particle_mins[threadIdx.x] = particle_mins[threadIdx.x + thr]; particle_mins_pos[threadIdx.x] = particle_mins_pos[threadIdx.x + thr]; } } thr = thr / 2; __syncthreads(); } __syncthreads(); if (threadIdx.x < particle_num / 4) { particle_keep[particle_mins_pos[threadIdx.x]] = 1; } } /* Update positions of all particles Kernel */ __global__ void update_positions_kernel(float *particle_positions, float *particle_velocities, float *particle_lbest, float *particle_gbest, float *particle_keep, curandState* globalState, int k, int iterations_total, float b) //, curandState *my_curandstate) { curandState localState = globalState[blockIdx.x * blockDim.x + threadIdx.x]; int particle_dimension1 = particle_dimension; float Dmax1 = 1000000.0f; float Xmax1 = 1000000.0f; if (particle_keep[blockIdx.x] == 0) { float factor1 = (float) curand_uniform(&localState) * 2.0f; float factor2 = (float) curand_uniform(&localState) * 2.0f; globalState[blockIdx.x * blockDim.x + threadIdx.x] = localState; particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x] = 0.729f * particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x] + (particle_lbest[blockIdx.x * particle_dimension1 + threadIdx.x] - particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x]) * factor1 + (particle_gbest[threadIdx.x] - particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x]) * factor2; if (particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x] > Dmax1) { particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x] = -Dmax1; } else if (particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x] < -Dmax1) { particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x] = +Dmax1; } particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] = particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] + particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x]; if (particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] > Xmax1) { particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] = Xmax1; } else if (particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] <= -Xmax1) { particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] = -Xmax1; } } else { float q = 0.5f; float temp2 = (float) curand_uniform(&localState); float temp1 = (float) curand_uniform(&localState); float temp3 = (float) curand_uniform(&localState); if (k > iterations_total / 3) { if (temp3 <= 0.5f) { particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] = (float) (particle_gbest[threadIdx.x] + (float) curand_uniform(&localState) * 500.0f * (float) (k / iterations_total)); particle_lbest[blockIdx.x * particle_dimension1 + threadIdx.x] = particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x]; } else { particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] = (float) (particle_gbest[threadIdx.x] - (float) curand_uniform(&localState) * 500.0f * (float) (k / iterations_total)); particle_lbest[blockIdx.x * particle_dimension1 + threadIdx.x] = particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x]; } } globalState[blockIdx.x * blockDim.x + threadIdx.x] = localState; if (temp2 <= 0.5f) { particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x] = (float) ((1.0f / b) * (float) log(temp1 / (1.0f - q))); } else { particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x] = (float) (-(1.0f / b) * (float) log((1.0f - temp1) / q)); } particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x] = particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x] * 500.0f; particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] = particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] + particle_velocities[blockIdx.x * particle_dimension1 + threadIdx.x]; if (particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] > Xmax1) { particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] = Xmax1; } else if (particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] < -Xmax1) { particle_positions[blockIdx.x * particle_dimension1 + threadIdx.x] = -Xmax1; } } } /* Init curand states Kernel */ __global__ void setup_kernel(curandState * state, unsigned long seed) { int id = blockIdx.x * blockDim.x + threadIdx.x; curand_init(seed, id, 0, &state[id]); } int main(int argc, char **argv) { /* PSO Algorithm training parameters */ int num_iterations = atoi(argv[1]); float Dmax1 = 1000000.0; float b = 0.4; float b0 = 0.05; clock_t t; /* */ if (particle_dimension >= 1024) { printf("invalid particle_num size, maximum value is 1024 and your value was %d. Please reduce number of inputs or hidden neurons. \n", particle_dimension); return 1; } FILE *myFile; myFile = fopen(argv[2], "r"); if (myFile == NULL) { printf("Error opening input file \n"); exit(1); } curandState* devStates; cudaMalloc(&devStates, particle_num * threadnum * sizeof ( curandState)); //initializes the curand kernles setup_kernel << < particle_num, particle_dimension >>> (devStates, (unsigned) time(NULL)); /* Read training set file into array */ float *host_training_set = (float *) malloc((training_time_hours + (training_time_hours * 0.3)) * sizeof (float)); //test set size = 0.3 * training set size //float *host_test_set = (float *) malloc(training_time_hours + training_time_hours * 0.3 * sizeof (float)); if (myFile == NULL) { printf("Error Reading File\n"); exit(0); } int i; for (i = 0; i < training_time_hours + (training_time_hours * 0.3); i++) { fscanf(myFile, "%f,", &host_training_set[i]); } fclose(myFile); /* */ /* Initialiaze variables, cuda mallocs and cuda mem copies */ float *host_gbest_fitness = (float *) malloc(1 * sizeof (float)); host_gbest_fitness[0] = 100000000.0; float *dev_gbest_fitness; cudaMalloc(&dev_gbest_fitness, 1 * sizeof (float)); cudaMemcpy(dev_gbest_fitness, host_gbest_fitness, 1 * sizeof (float), cudaMemcpyHostToDevice); float *host_particles_present_position = (float *) malloc(particle_num * particle_dimension * sizeof (float)); float *host_particles_localbestpos = (float *) malloc(particle_num * particle_dimension * sizeof (float)); float *host_particles_velocity = (float *) malloc(particle_num * particle_dimension * sizeof (float)); //randomize initial weights for (i = 0; i < particle_num * particle_dimension; i++) { host_particles_present_position[i] = ((float) rand() / RAND_MAX) * Dmax1; if ((float) rand() / RAND_MAX < 0.5) { host_particles_present_position[i] = -host_particles_present_position[i]; } host_particles_localbestpos[i] = host_particles_present_position[i]; host_particles_velocity[i] = ((float) rand() / RAND_MAX) * Dmax1; if ((float) rand() / RAND_MAX < 0.5) { host_particles_velocity[i] = -host_particles_velocity[i]; } } int *host_particle_keep = (int *) malloc(particle_num * sizeof (int)); float *host_particles_fitness = (float *) malloc(particle_num * sizeof (float)); float *host_globalbestpos = (float *) malloc(particle_dimension * sizeof (float)); for (i = 0; i < particle_num; i++) { host_particles_fitness[i] = 100000000.0; host_particle_keep[i] = 0; } float *dev_particles_present_position, *dev_particles_localbestpos, *dev_particles_velocity; float *dev_training_set, *dev_particle_keep, *dev_particles_fitness, *dev_globalbestpos; cudaMalloc(&dev_particles_present_position, particle_num * particle_dimension * sizeof (float)); cudaMalloc(&dev_particles_localbestpos, particle_num * particle_dimension * sizeof (float)); cudaMalloc(&dev_particles_velocity, particle_num * particle_dimension * sizeof (float)); cudaMalloc(&dev_particles_fitness, particle_num * sizeof (float)); cudaMalloc(&dev_training_set, training_time_hours * sizeof (float)); cudaMalloc(&dev_globalbestpos, particle_dimension * sizeof (float)); cudaMalloc(&dev_particle_keep, particle_num * sizeof (int)); cudaMemcpy(dev_particles_present_position, host_particles_present_position, particle_num * particle_dimension * sizeof (float), cudaMemcpyHostToDevice); cudaMemcpy(dev_particles_localbestpos, host_particles_localbestpos, particle_num * particle_dimension * sizeof (float), cudaMemcpyHostToDevice); cudaMemcpy(dev_particles_velocity, host_particles_velocity, particle_num * particle_dimension * sizeof (float), cudaMemcpyHostToDevice); cudaMemcpy(dev_particles_fitness, host_particles_fitness, particle_num * sizeof (float), cudaMemcpyHostToDevice); cudaMemcpy(dev_training_set, host_training_set, training_time_hours * sizeof (float), cudaMemcpyHostToDevice); cudaMemcpy(dev_particle_keep, host_particle_keep, particle_num * sizeof (int), cudaMemcpyHostToDevice); cudaMemcpy(dev_globalbestpos, host_globalbestpos, particle_dimension * sizeof (float), cudaMemcpyHostToDevice); /* */ //Training algorithm commences t = clock(); for (int i = 0; i < num_iterations; i++) { calculate_particle_fitnesses_kernel << <particle_num, threadnum>>>(dev_particles_present_position, dev_particles_localbestpos, dev_training_set, dev_particles_fitness); if (i > 1) { b = (((b - b0) * (num_iterations - i)) / num_iterations) + b0; } find_gbest_kernel << <1, particle_num / 2 >> >(dev_particles_localbestpos, dev_particles_fitness, dev_globalbestpos, dev_gbest_fitness); mark_worst_kernel << <1, particle_num / 2 >> >(dev_particles_fitness, dev_particle_keep); update_positions_kernel << <particle_num, particle_dimension>>>(dev_particles_present_position, dev_particles_velocity, dev_particles_localbestpos, dev_globalbestpos, dev_particle_keep, devStates, i, num_iterations, b); //, curandState *my_curandstate) } cudaMemcpy(host_globalbestpos, dev_globalbestpos, particle_dimension * sizeof (float), cudaMemcpyDeviceToHost); cudaMemcpy(host_gbest_fitness, dev_gbest_fitness, 1 * sizeof (float), cudaMemcpyDeviceToHost); cudaThreadSynchronize(); printf("CUDA RMSE on training set: %f \n", host_gbest_fitness[0]); t = clock() - t; printf("Training time took %d clicks (%f seconds).\n", t, ((float) t) / CLOCKS_PER_SEC); //Training algorithm completed //freeing memory cudaFree(dev_particles_present_position); cudaFree(dev_particles_localbestpos); cudaFree(dev_particles_velocity); cudaFree(dev_particles_fitness); cudaFree(dev_training_set); /* Applying model on test data. By default testdata size is equal to 0.3 * training_data size and is located immediatelly after the training data, in the time series. Change it below with the Start and Stop indexes */ /* Predicted and Actual values are written to file results.txt */ FILE *f = fopen("results.txt", "w"); if (f == NULL) { printf("Error opening file results.txt \n"); exit(1); } int testSetStartPosition = training_time_hours; int testSetStopPosition = training_time_hours + training_time_hours * 0.3 - off; int n_offset = 3*inputs+1; float testSetRMSE = 0; float s = 2.0f; for (int SetCurrentPosition = testSetStartPosition; SetCurrentPosition < testSetStopPosition; SetCurrentPosition++) { float llwnn_output = 0; for (int j = 0; j < hidd_neurons; j++) { float linear_factor = 0; int input_scale_back = inputs; for (int k = 0; k < inputs; k++) { linear_factor = linear_factor + host_globalbestpos[ j * n_offset + k + 1 ] * host_training_set[SetCurrentPosition + k - input_scale_back]; } linear_factor = linear_factor + host_globalbestpos[ j * n_offset + 0 ]; float total_wavelet_factor = 0; for(int k = 0; k < inputs; k++) { float a = (float)abs(host_globalbestpos[ j * n_offset + inputs + 1 ]); if (a == 0) { a = 0.00000000001f; } float b = host_globalbestpos[ j * n_offset + inputs + inputs + 1 ]; float x = host_training_set[SetCurrentPosition + k - input_scale_back]; float in = (x - b) / (a); total_wavelet_factor = total_wavelet_factor + (float) pow(a, -0.5f) *((-(in * in) / 2.0f) * (float) exp(-(in * in) / 2.0f)); } llwnn_output = llwnn_output + linear_factor*total_wavelet_factor; } testSetRMSE = testSetRMSE + (llwnn_output - host_training_set[SetCurrentPosition + off]) * (llwnn_output - host_training_set[SetCurrentPosition + off]); fprintf(f, "%f %f \n",host_training_set[SetCurrentPosition + off], llwnn_output); } fclose(f); testSetRMSE = sqrt(testSetRMSE / (float) (testSetStopPosition - testSetStartPosition)); printf("Calculate RMSE on test set: %f \n", testSetRMSE); printf("\n"); printf("\n"); return 0; }
a3a1f1e3bd0344baf9d35457d8ca0c721e8a0752.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include "caffe/layers/relu_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ReLUForward(const int n, const Dtype* in, Dtype* out, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; } } template <typename Dtype> void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, top_data, negative_slope); CUDA_POST_KERNEL_CHECK; // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data // << " top_data: " << (unsigned long)top_data // << " blocks: " << CAFFE_GET_BLOCKS(count) // << " threads: " << CAFFE_CUDA_NUM_THREADS; } template <typename Dtype> __global__ void ReLUBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); } } template <typename Dtype> __global__ void ReLUForwardJv(const int n, const Dtype* bottom_data, const Dtype* bottom_jv_data, Dtype* top_jv_data, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { top_jv_data[index] = bottom_jv_data[index] * ((bottom_data[index] > 0) + (bottom_data[index] <= 0) * negative_slope); } } template <typename Dtype> void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, bottom_data, bottom_diff, negative_slope); CUDA_POST_KERNEL_CHECK; } } template <typename Dtype> void ReLULayer<Dtype>::ForwardJv_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_jv_data = bottom[0]->gpu_diff(); Dtype* top_jv_data = top[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ReLUForwardJv<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom_jv_data, top_jv_data, negative_slope); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FORWARDJV(ReLULayer); INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer); } // namespace caffe
a3a1f1e3bd0344baf9d35457d8ca0c721e8a0752.cu
#include <algorithm> #include <vector> #include "caffe/layers/relu_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ReLUForward(const int n, const Dtype* in, Dtype* out, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; } } template <typename Dtype> void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) ReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, top_data, negative_slope); CUDA_POST_KERNEL_CHECK; // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data // << " top_data: " << (unsigned long)top_data // << " blocks: " << CAFFE_GET_BLOCKS(count) // << " threads: " << CAFFE_CUDA_NUM_THREADS; } template <typename Dtype> __global__ void ReLUBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); } } template <typename Dtype> __global__ void ReLUForwardJv(const int n, const Dtype* bottom_data, const Dtype* bottom_jv_data, Dtype* top_jv_data, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { top_jv_data[index] = bottom_jv_data[index] * ((bottom_data[index] > 0) + (bottom_data[index] <= 0) * negative_slope); } } template <typename Dtype> void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) ReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, bottom_data, bottom_diff, negative_slope); CUDA_POST_KERNEL_CHECK; } } template <typename Dtype> void ReLULayer<Dtype>::ForwardJv_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_jv_data = bottom[0]->gpu_diff(); Dtype* top_jv_data = top[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) ReLUForwardJv<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom_jv_data, top_jv_data, negative_slope); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FORWARDJV(ReLULayer); INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer); } // namespace caffe
e350830585a5ad8974e305981aac469f0033a599.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #define N 512 using namespace std; __global__ void add(int *a, int *b, int *c){ c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; } void random_ints(int* a, int n){ int i; for(i=0; i < n; i++){ a[i] = rand(); } } int main(void){ // input int *a, *b, *c; // input to device int *d_a, *d_b, *d_c; int size = N * sizeof(int); // malloc d_a hipMalloc((void**) &d_a, size); hipMalloc((void**) &d_b, size); hipMalloc((void**) &d_c, size); a = (int *) malloc(size); random_ints(a, N); b = (int *) malloc(size); random_ints(b, N); c = (int *) malloc(size); hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, b, size, hipMemcpyHostToDevice); // use N blocks. hipLaunchKernelGGL(( add), dim3(N), dim3(1), 0, 0, d_a, d_b, d_c); hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost); hipFree(d_a); hipFree(d_b); hipFree(d_c); free(a); free(b); free(c); return 0; }
e350830585a5ad8974e305981aac469f0033a599.cu
#include<iostream> #define N 512 using namespace std; __global__ void add(int *a, int *b, int *c){ c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; } void random_ints(int* a, int n){ int i; for(i=0; i < n; i++){ a[i] = rand(); } } int main(void){ // input int *a, *b, *c; // input to device int *d_a, *d_b, *d_c; int size = N * sizeof(int); // malloc d_a cudaMalloc((void**) &d_a, size); cudaMalloc((void**) &d_b, size); cudaMalloc((void**) &d_c, size); a = (int *) malloc(size); random_ints(a, N); b = (int *) malloc(size); random_ints(b, N); c = (int *) malloc(size); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // use N blocks. add<<<N, 1>>>(d_a, d_b, d_c); cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(a); free(b); free(c); return 0; }
9340c26465085e3a716eca3d6da10f345c7c4c99.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zpipelinedgmres.cu, normal z -> s, Wed Jan 2 14:18:53 2019 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define REAL #define BLOCK_SIZE 512 template< int n > __device__ void sum_reduce( /*int n,*/ int i, float* x ) { __syncthreads(); if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); } if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); } if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); } if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); } if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); } if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); } // probably don't need __syncthreads for < 16 threads // because of implicit warp level synchronization. if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); } if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); } if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); } if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); } if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); } } __global__ void magma_spipelined_correction( int n, int k, float * skp, float * r, float * v ) { int i = blockIdx.x * blockDim.x + threadIdx.x; float zz= 0.0, tmp= 0.0; extern __shared__ float temp[]; temp[ i ] = ( i < k ) ? skp[ i ] * skp[ i ] : MAGMA_S_MAKE( 0.0, 0.0); __syncthreads(); if (i < 64) { temp[ i ] += temp[ i + 64 ]; } __syncthreads(); if (i < 32) { temp[ i ] += temp[ i + 32 ]; __syncthreads(); temp[ i ] += temp[ i + 16 ]; __syncthreads(); temp[ i ] += temp[ i + 8 ]; __syncthreads(); temp[ i ] += temp[ i + 4 ]; __syncthreads(); temp[ i ] += temp[ i + 2 ]; __syncthreads(); temp[ i ] += temp[ i + 1 ]; __syncthreads(); } if ( i == 0 ) { tmp = MAGMA_S_REAL( temp[ i ] ); zz = MAGMA_S_REAL( skp[(k)] ); skp[k] = MAGMA_S_MAKE( sqrt(zz-tmp),0.0 ); } } __global__ void magma_spipelined_copyscale( int n, int k, float * skp, float * r, float * v ) { int i = blockIdx.x * blockDim.x + threadIdx.x; float rr=skp[k]; if ( i < n ) { v[i] = r[i] * 1.0 / rr; } } //----------------------------------------------------------------------------// __global__ void magma_spipelinedsnrm2_kernel( int m, float * da, int ldda, float * dxnorm ) { const int i = threadIdx.x; magmaFloat_ptr dx = da + blockIdx.x * ldda; __shared__ float sum[ 512 ]; float re, lsum; // get norm of dx lsum = 0; for( int j = i; j < m; j += 512 ) { #ifdef REAL re = dx[j]; lsum += re*re; #else re = MAGMA_S_REAL( dx[j] ); float im = MAGMA_S_IMAG( dx[j] ); lsum += re*re + im*im; #endif } sum[i] = lsum; sum_reduce< 512 >( i, sum ); if (i==0) dxnorm[blockIdx.x] = MAGMA_S_MAKE( sqrt(sum[0]), 0.0 ); } //----------------------------------------------------------------------------// __global__ void magma_spipelinesscale( int n, float * r, float * drnorm ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<n ) { r[i] = r[i] * 1.0 / drnorm[0]; } } /** Purpose ------- Computes the correction term of the pipelined GMRES according to P. Ghysels and scales and copies the new search direction Returns the vector v = r/ ( skp[k] - (sum_i=1^k skp[i]^2) ) . Arguments --------- @param[in] n int length of v_i @param[in] k int # skp entries v_i^T * r ( without r ) @param[in] r magmaFloat_ptr vector of length n @param[in] v magmaFloat_ptr vector of length n @param[in] skp magmaFloat_ptr array of parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_saux ********************************************************************/ extern "C" magma_int_t magma_scopyscale( magma_int_t n, magma_int_t k, magmaFloat_ptr r, magmaFloat_ptr v, magmaFloat_ptr skp, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( k, BLOCK_SIZE ) ); unsigned int Ms = Bs.x * sizeof( float ); dim3 Gs2( magma_ceildiv( n, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_spipelined_correction), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , n, k, skp, r, v ); hipLaunchKernelGGL(( magma_spipelined_copyscale), dim3(Gs2), dim3(Bs), 0, queue->cuda_stream() , n, k, skp, r, v ); return MAGMA_SUCCESS; } extern "C" magma_int_t magma_snrm2scale( magma_int_t m, magmaFloat_ptr r, magma_int_t lddr, magmaFloat_ptr drnorm, magma_queue_t queue ) { dim3 blocks( 1 ); dim3 threads( 512 ); hipLaunchKernelGGL(( magma_spipelinedsnrm2_kernel), dim3(blocks), dim3(threads), 0, queue->cuda_stream() , m, r, lddr, drnorm ); dim3 Bs( BLOCK_SIZE ); dim3 Gs2( magma_ceildiv( m, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_spipelinesscale), dim3(Gs2), dim3(Bs), 0, queue->cuda_stream() , m, r, drnorm ); return MAGMA_SUCCESS; }
9340c26465085e3a716eca3d6da10f345c7c4c99.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zpipelinedgmres.cu, normal z -> s, Wed Jan 2 14:18:53 2019 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define REAL #define BLOCK_SIZE 512 template< int n > __device__ void sum_reduce( /*int n,*/ int i, float* x ) { __syncthreads(); if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); } if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); } if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); } if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); } if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); } if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); } // probably don't need __syncthreads for < 16 threads // because of implicit warp level synchronization. if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); } if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); } if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); } if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); } if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); } } __global__ void magma_spipelined_correction( int n, int k, float * skp, float * r, float * v ) { int i = blockIdx.x * blockDim.x + threadIdx.x; float zz= 0.0, tmp= 0.0; extern __shared__ float temp[]; temp[ i ] = ( i < k ) ? skp[ i ] * skp[ i ] : MAGMA_S_MAKE( 0.0, 0.0); __syncthreads(); if (i < 64) { temp[ i ] += temp[ i + 64 ]; } __syncthreads(); if (i < 32) { temp[ i ] += temp[ i + 32 ]; __syncthreads(); temp[ i ] += temp[ i + 16 ]; __syncthreads(); temp[ i ] += temp[ i + 8 ]; __syncthreads(); temp[ i ] += temp[ i + 4 ]; __syncthreads(); temp[ i ] += temp[ i + 2 ]; __syncthreads(); temp[ i ] += temp[ i + 1 ]; __syncthreads(); } if ( i == 0 ) { tmp = MAGMA_S_REAL( temp[ i ] ); zz = MAGMA_S_REAL( skp[(k)] ); skp[k] = MAGMA_S_MAKE( sqrt(zz-tmp),0.0 ); } } __global__ void magma_spipelined_copyscale( int n, int k, float * skp, float * r, float * v ) { int i = blockIdx.x * blockDim.x + threadIdx.x; float rr=skp[k]; if ( i < n ) { v[i] = r[i] * 1.0 / rr; } } //----------------------------------------------------------------------------// __global__ void magma_spipelinedsnrm2_kernel( int m, float * da, int ldda, float * dxnorm ) { const int i = threadIdx.x; magmaFloat_ptr dx = da + blockIdx.x * ldda; __shared__ float sum[ 512 ]; float re, lsum; // get norm of dx lsum = 0; for( int j = i; j < m; j += 512 ) { #ifdef REAL re = dx[j]; lsum += re*re; #else re = MAGMA_S_REAL( dx[j] ); float im = MAGMA_S_IMAG( dx[j] ); lsum += re*re + im*im; #endif } sum[i] = lsum; sum_reduce< 512 >( i, sum ); if (i==0) dxnorm[blockIdx.x] = MAGMA_S_MAKE( sqrt(sum[0]), 0.0 ); } //----------------------------------------------------------------------------// __global__ void magma_spipelinesscale( int n, float * r, float * drnorm ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<n ) { r[i] = r[i] * 1.0 / drnorm[0]; } } /** Purpose ------- Computes the correction term of the pipelined GMRES according to P. Ghysels and scales and copies the new search direction Returns the vector v = r/ ( skp[k] - (sum_i=1^k skp[i]^2) ) . Arguments --------- @param[in] n int length of v_i @param[in] k int # skp entries v_i^T * r ( without r ) @param[in] r magmaFloat_ptr vector of length n @param[in] v magmaFloat_ptr vector of length n @param[in] skp magmaFloat_ptr array of parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_saux ********************************************************************/ extern "C" magma_int_t magma_scopyscale( magma_int_t n, magma_int_t k, magmaFloat_ptr r, magmaFloat_ptr v, magmaFloat_ptr skp, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( k, BLOCK_SIZE ) ); unsigned int Ms = Bs.x * sizeof( float ); dim3 Gs2( magma_ceildiv( n, BLOCK_SIZE ) ); magma_spipelined_correction<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( n, k, skp, r, v ); magma_spipelined_copyscale<<< Gs2, Bs, 0, queue->cuda_stream() >>> ( n, k, skp, r, v ); return MAGMA_SUCCESS; } extern "C" magma_int_t magma_snrm2scale( magma_int_t m, magmaFloat_ptr r, magma_int_t lddr, magmaFloat_ptr drnorm, magma_queue_t queue ) { dim3 blocks( 1 ); dim3 threads( 512 ); magma_spipelinedsnrm2_kernel<<< blocks, threads, 0, queue->cuda_stream() >>> ( m, r, lddr, drnorm ); dim3 Bs( BLOCK_SIZE ); dim3 Gs2( magma_ceildiv( m, BLOCK_SIZE ) ); magma_spipelinesscale<<< Gs2, Bs, 0, queue->cuda_stream() >>>( m, r, drnorm ); return MAGMA_SUCCESS; }
copy_make_border.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "internal_shared.hpp" #include "opencv2/gpu/device/border_interpolate.hpp" namespace cv { namespace gpu { namespace device { namespace imgproc { template <typename Ptr2D, typename T> __global__ void copyMakeBorder(const Ptr2D src, PtrStepSz<T> dst, int top, int left) { const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; if (x < dst.cols && y < dst.rows) dst.ptr(y)[x] = src(y - top, x - left); } template <template <typename> class B, typename T> struct CopyMakeBorderDispatcher { static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& dst, int top, int left, const typename VecTraits<T>::elem_type* borderValue, hipStream_t stream) { dim3 block(32, 8); dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); B<T> brd(src.rows, src.cols, VecTraits<T>::make(borderValue)); BorderReader< PtrStep<T>, B<T> > brdSrc(src, brd); hipLaunchKernelGGL(( copyMakeBorder), dim3(grid), dim3(block), 0, stream, brdSrc, dst, top, left); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } }; template <typename T, int cn> void copyMakeBorder_gpu(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const T* borderValue, hipStream_t stream) { typedef typename TypeVec<T, cn>::vec_type vec_type; typedef void (*caller_t)(const PtrStepSz<vec_type>& src, const PtrStepSz<vec_type>& dst, int top, int left, const T* borderValue, hipStream_t stream); static const caller_t callers[5] = { CopyMakeBorderDispatcher<BrdReflect101, vec_type>::call, CopyMakeBorderDispatcher<BrdReplicate, vec_type>::call, CopyMakeBorderDispatcher<BrdConstant, vec_type>::call, CopyMakeBorderDispatcher<BrdReflect, vec_type>::call, CopyMakeBorderDispatcher<BrdWrap, vec_type>::call }; callers[borderMode](PtrStepSz<vec_type>(src), PtrStepSz<vec_type>(dst), top, left, borderValue, stream); } template void copyMakeBorder_gpu<uchar, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, hipStream_t stream); template void copyMakeBorder_gpu<uchar, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, hipStream_t stream); template void copyMakeBorder_gpu<uchar, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, hipStream_t stream); template void copyMakeBorder_gpu<uchar, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, hipStream_t stream); //template void copyMakeBorder_gpu<schar, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, hipStream_t stream); //template void copyMakeBorder_gpu<schar, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, hipStream_t stream); //template void copyMakeBorder_gpu<schar, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, hipStream_t stream); //template void copyMakeBorder_gpu<schar, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, hipStream_t stream); template void copyMakeBorder_gpu<ushort, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, hipStream_t stream); //template void copyMakeBorder_gpu<ushort, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, hipStream_t stream); template void copyMakeBorder_gpu<ushort, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, hipStream_t stream); template void copyMakeBorder_gpu<ushort, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, hipStream_t stream); template void copyMakeBorder_gpu<short, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, hipStream_t stream); //template void copyMakeBorder_gpu<short, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, hipStream_t stream); template void copyMakeBorder_gpu<short, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, hipStream_t stream); template void copyMakeBorder_gpu<short, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, hipStream_t stream); //template void copyMakeBorder_gpu<int, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, hipStream_t stream); //template void copyMakeBorder_gpu<int, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, hipStream_t stream); //template void copyMakeBorder_gpu<int, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, hipStream_t stream); //template void copyMakeBorder_gpu<int, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, hipStream_t stream); template void copyMakeBorder_gpu<float, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, hipStream_t stream); //template void copyMakeBorder_gpu<float, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, hipStream_t stream); template void copyMakeBorder_gpu<float, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, hipStream_t stream); template void copyMakeBorder_gpu<float, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, hipStream_t stream); } // namespace imgproc }}} // namespace cv { namespace gpu { namespace device #endif /* CUDA_DISABLER */
copy_make_border.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "internal_shared.hpp" #include "opencv2/gpu/device/border_interpolate.hpp" namespace cv { namespace gpu { namespace device { namespace imgproc { template <typename Ptr2D, typename T> __global__ void copyMakeBorder(const Ptr2D src, PtrStepSz<T> dst, int top, int left) { const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; if (x < dst.cols && y < dst.rows) dst.ptr(y)[x] = src(y - top, x - left); } template <template <typename> class B, typename T> struct CopyMakeBorderDispatcher { static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& dst, int top, int left, const typename VecTraits<T>::elem_type* borderValue, cudaStream_t stream) { dim3 block(32, 8); dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); B<T> brd(src.rows, src.cols, VecTraits<T>::make(borderValue)); BorderReader< PtrStep<T>, B<T> > brdSrc(src, brd); copyMakeBorder<<<grid, block, 0, stream>>>(brdSrc, dst, top, left); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } }; template <typename T, int cn> void copyMakeBorder_gpu(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const T* borderValue, cudaStream_t stream) { typedef typename TypeVec<T, cn>::vec_type vec_type; typedef void (*caller_t)(const PtrStepSz<vec_type>& src, const PtrStepSz<vec_type>& dst, int top, int left, const T* borderValue, cudaStream_t stream); static const caller_t callers[5] = { CopyMakeBorderDispatcher<BrdReflect101, vec_type>::call, CopyMakeBorderDispatcher<BrdReplicate, vec_type>::call, CopyMakeBorderDispatcher<BrdConstant, vec_type>::call, CopyMakeBorderDispatcher<BrdReflect, vec_type>::call, CopyMakeBorderDispatcher<BrdWrap, vec_type>::call }; callers[borderMode](PtrStepSz<vec_type>(src), PtrStepSz<vec_type>(dst), top, left, borderValue, stream); } template void copyMakeBorder_gpu<uchar, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream); template void copyMakeBorder_gpu<uchar, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream); template void copyMakeBorder_gpu<uchar, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream); template void copyMakeBorder_gpu<uchar, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const uchar* borderValue, cudaStream_t stream); //template void copyMakeBorder_gpu<schar, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream); //template void copyMakeBorder_gpu<schar, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream); //template void copyMakeBorder_gpu<schar, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream); //template void copyMakeBorder_gpu<schar, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const schar* borderValue, cudaStream_t stream); template void copyMakeBorder_gpu<ushort, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream); //template void copyMakeBorder_gpu<ushort, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream); template void copyMakeBorder_gpu<ushort, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream); template void copyMakeBorder_gpu<ushort, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const ushort* borderValue, cudaStream_t stream); template void copyMakeBorder_gpu<short, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream); //template void copyMakeBorder_gpu<short, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream); template void copyMakeBorder_gpu<short, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream); template void copyMakeBorder_gpu<short, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const short* borderValue, cudaStream_t stream); //template void copyMakeBorder_gpu<int, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream); //template void copyMakeBorder_gpu<int, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream); //template void copyMakeBorder_gpu<int, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream); //template void copyMakeBorder_gpu<int, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const int* borderValue, cudaStream_t stream); template void copyMakeBorder_gpu<float, 1>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream); //template void copyMakeBorder_gpu<float, 2>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream); template void copyMakeBorder_gpu<float, 3>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream); template void copyMakeBorder_gpu<float, 4>(const PtrStepSzb& src, const PtrStepSzb& dst, int top, int left, int borderMode, const float* borderValue, cudaStream_t stream); } // namespace imgproc }}} // namespace cv { namespace gpu { namespace device #endif /* CUDA_DISABLER */
28b6f1a05b8ae431dec54ff0b24edf95dc9b1866.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //blackScholesAnalyticEngineKernels.cu //Scott Grauer-Gray //Kernels for running black scholes using the analytic engine #ifndef BLACK_SCHOLES_ANALYTIC_ENGINE_KERNELS_CU #define BLACK_SCHOLES_ANALYTIC_ENGINE_KERNELS_CU //declarations for the kernels #include "blackScholesAnalyticEngineKernels.cuh" //needed for the constants in the error function #include "errorFunctConsts.cuh" //device kernel to retrieve the compound factor in interestRate __device__ float interestRateCompoundFactor(float t, yieldTermStruct currYieldTermStruct) { return (expf((currYieldTermStruct.forward)*t)); } //device kernel to retrieve the discount factor in interestRate __device__ float interestRateDiscountFactor(float t, yieldTermStruct currYieldTermStruct) { return 1.0f / interestRateCompoundFactor(t, currYieldTermStruct); } //device function to get the variance of the black volatility function __device__ float getBlackVolBlackVar(blackVolStruct volTS) { float vol = volTS.volatility; return vol*vol*volTS.timeYearFraction; } //device function to get the discount on a dividend yield __device__ float getDiscountOnDividendYield(float yearFraction, yieldTermStruct dividendYieldTermStruct) { float intDiscountFactor = interestRateDiscountFactor(yearFraction, dividendYieldTermStruct); return intDiscountFactor; } //device function to get the discount on the risk free rate __device__ float getDiscountOnRiskFreeRate(float yearFraction, yieldTermStruct riskFreeRateYieldTermStruct) { return interestRateDiscountFactor(yearFraction, riskFreeRateYieldTermStruct); } //device kernel to run the error function __device__ float errorFunct(normalDistStruct normDist, float x) { float R,S,P,Q,s,y,z,r, ax; ax = fabsf(x); if(ax < 0.84375f) { if(ax < 3.7252902984e-09f) { if (ax < FLT_MIN*16.0f) return 0.125f*(8.0f*x+ (ERROR_FUNCT_efx8)*x); /*avoid underflow */ return x + (ERROR_FUNCT_efx)*x; } z = x*x; r = ERROR_FUNCT_pp0+z*(ERROR_FUNCT_pp1+z*(ERROR_FUNCT_pp2+z*(ERROR_FUNCT_pp3+z*ERROR_FUNCT_pp4))); s = ERROR_FUNCT_one+z*(ERROR_FUNCT_qq1+z*(ERROR_FUNCT_qq2+z*(ERROR_FUNCT_qq3+z*(ERROR_FUNCT_qq4+z*ERROR_FUNCT_qq5)))); y = r/s; return x + x*y; } if(ax <1.25f) { s = ax-ERROR_FUNCT_one; P = ERROR_FUNCT_pa0+s*(ERROR_FUNCT_pa1+s*(ERROR_FUNCT_pa2+s*(ERROR_FUNCT_pa3+s*(ERROR_FUNCT_pa4+s*(ERROR_FUNCT_pa5+s*ERROR_FUNCT_pa6))))); Q = ERROR_FUNCT_one+s*(ERROR_FUNCT_qa1+s*(ERROR_FUNCT_qa2+s*(ERROR_FUNCT_qa3+s*(ERROR_FUNCT_qa4+s*(ERROR_FUNCT_qa5+s*ERROR_FUNCT_qa6))))); if(x>=0.0f) return ERROR_FUNCT_erx + P/Q; else return -1.0f*ERROR_FUNCT_erx - P/Q; } if (ax >= 6.0f) { if(x>=0.0f) return ERROR_FUNCT_one-ERROR_FUNCT_tiny; else return ERROR_FUNCT_tiny-ERROR_FUNCT_one; } /* Starts to lose accuracy when ax~5 */ s = ERROR_FUNCT_one/(ax*ax); if(ax < 2.85714285714285f) { /* |x| < 1/0.35 */ R = ERROR_FUNCT_ra0+s*(ERROR_FUNCT_ra1+s*(ERROR_FUNCT_ra2+s*(ERROR_FUNCT_ra3+s*(ERROR_FUNCT_ra4+s*(ERROR_FUNCT_ra5+s*(ERROR_FUNCT_ra6+s*ERROR_FUNCT_ra7)))))); S = ERROR_FUNCT_one+s*(ERROR_FUNCT_sa1+s*(ERROR_FUNCT_sa2+s*(ERROR_FUNCT_sa3+s*(ERROR_FUNCT_sa4+s*(ERROR_FUNCT_sa5+s*(ERROR_FUNCT_sa6+s*(ERROR_FUNCT_sa7+s*ERROR_FUNCT_sa8))))))); } else { /* |x| >= 1/0.35 */ R=ERROR_FUNCT_rb0+s*(ERROR_FUNCT_rb1+s*(ERROR_FUNCT_rb2+s*(ERROR_FUNCT_rb3+s*(ERROR_FUNCT_rb4+s*(ERROR_FUNCT_rb5+s*ERROR_FUNCT_rb6))))); S=ERROR_FUNCT_one+s*(ERROR_FUNCT_sb1+s*(ERROR_FUNCT_sb2+s*(ERROR_FUNCT_sb3+s*(ERROR_FUNCT_sb4+s*(ERROR_FUNCT_sb5+s*(ERROR_FUNCT_sb6+s*ERROR_FUNCT_sb7)))))); } r = expf( -ax*ax-0.5625f +R/S); if(x>=0.0f) return ERROR_FUNCT_one-r/ax; else return r/ax-ERROR_FUNCT_one; } //device kernel to run the operator function in cumulative normal distribution __device__ float cumNormDistOp(normalDistStruct normDist, float z) { z = (z - normDist.average) / normDist.sigma; float result = 0.5f * ( 1.0f + errorFunct(normDist, z*M_SQRT_2 ) ); return result; } //device kernel to run the gaussian function in the normal distribution __device__ float gaussianFunctNormDist(normalDistStruct normDist, float x) { float deltax = x - normDist.average; float exponent = -(deltax*deltax)/normDist.denominator; // debian alpha had some strange problem in the very-low range return exponent <= -690.0f ? 0.0f : // exp(x) < 1.0e-300 anyway normDist.normalizationFactor * expf(exponent); } //device kernel to retrieve the derivative in a cumulative normal distribution __device__ float cumNormDistDeriv(normalDistStruct normDist, float x) { float xn = (x - normDist.average) / normDist.sigma; return gaussianFunctNormDist(normDist, xn) / normDist.sigma; } //device function to initialize the cumulative normal distribution structure __device__ void initCumNormDist(normalDistStruct& currCumNormDist) { currCumNormDist.average = 0.0f; currCumNormDist.sigma = 1.0f; currCumNormDist.normalizationFactor = M_SQRT_2*M_1_SQRTPI/currCumNormDist.sigma; currCumNormDist.derNormalizationFactor = currCumNormDist.sigma*currCumNormDist.sigma; currCumNormDist.denominator = 2.0f*currCumNormDist.derNormalizationFactor; } //device function to initialize variable in the black calculator __device__ void initBlackCalcVars(blackCalcStruct& blackCalculator, payoffStruct payoff) { blackCalculator.d1 = log(blackCalculator.forward / blackCalculator.strike)/blackCalculator.stdDev + 0.5f*blackCalculator.stdDev; blackCalculator.d2 = blackCalculator.d1 - blackCalculator.stdDev; //initialize the cumulative normal distribution structure normalDistStruct currCumNormDist; initCumNormDist(currCumNormDist); blackCalculator.cum_d1 = cumNormDistOp(currCumNormDist, blackCalculator.d1); blackCalculator.cum_d2 = cumNormDistOp(currCumNormDist, blackCalculator.d2); blackCalculator.n_d1 = cumNormDistDeriv(currCumNormDist, blackCalculator.d1); blackCalculator.n_d2 = cumNormDistDeriv(currCumNormDist, blackCalculator.d2); blackCalculator.x = payoff.strike; blackCalculator.DxDstrike = 1.0f; // the following one will probably disappear as soon as // super-share will be properly handled blackCalculator.DxDs = 0.0f; // this part is always executed. // in case of plain-vanilla payoffs, it is also the only part // which is executed. switch (payoff.type) { case CALL: blackCalculator.alpha = blackCalculator.cum_d1;// N(d1) blackCalculator.DalphaDd1 = blackCalculator.n_d1;// n(d1) blackCalculator.beta = -1.0f*blackCalculator.cum_d2;// -N(d2) blackCalculator.DbetaDd2 = -1.0f*blackCalculator.n_d2;// -n(d2) break; case PUT: blackCalculator.alpha = -1.0f+blackCalculator.cum_d1;// -N(-d1) blackCalculator.DalphaDd1 = blackCalculator.n_d1;// n( d1) blackCalculator.beta = 1.0f-blackCalculator.cum_d2;// N(-d2) blackCalculator.DbetaDd2 = -1.0f* blackCalculator.n_d2;// -n( d2) break; } } //device function to initialize the black calculator __device__ void initBlackCalculator(blackCalcStruct& blackCalc, payoffStruct payoff, float forwardPrice, float stdDev, float riskFreeDiscount) { blackCalc.strike = payoff.strike; blackCalc.forward = forwardPrice; blackCalc.stdDev = stdDev; blackCalc.discount = riskFreeDiscount; blackCalc.variance = stdDev * stdDev; initBlackCalcVars(blackCalc, payoff); } //device function to retrieve the output resulting value __device__ float getResultVal(blackCalcStruct blackCalculator) { float result = blackCalculator.discount * (blackCalculator.forward * blackCalculator.alpha + blackCalculator.x * blackCalculator.beta); return result; } //global function to retrieve the output value for an option __global__ void getOutValOption(const optionInputStruct* options, float* outputVals, int numVals) { int optionNum = blockIdx.x * blockDim.x + threadIdx.x; //check if within current options if (optionNum < numVals) { const optionInputStruct threadOption = options[optionNum]; payoffStruct currPayoff; currPayoff.type = threadOption.type; currPayoff.strike = threadOption.strike; yieldTermStruct qTS; qTS.timeYearFraction = threadOption.t; qTS.forward = threadOption.q; yieldTermStruct rTS; rTS.timeYearFraction = threadOption.t; rTS.forward = threadOption.r; blackVolStruct volTS; volTS.timeYearFraction = threadOption.t; volTS.volatility = threadOption.vol; blackScholesMertStruct stochProcess; stochProcess.x0 = threadOption.spot; stochProcess.dividendTS = qTS; stochProcess.riskFreeTS = rTS; stochProcess.blackVolTS = volTS; optionStruct currOption; currOption.payoff = currPayoff; currOption.yearFractionTime = threadOption.t; currOption.pricingEngine = stochProcess; float variance = getBlackVolBlackVar(currOption.pricingEngine.blackVolTS); float dividendDiscount = getDiscountOnDividendYield(currOption.yearFractionTime, currOption.pricingEngine.dividendTS); float riskFreeDiscount = getDiscountOnRiskFreeRate(currOption.yearFractionTime, currOption.pricingEngine.riskFreeTS); float spot = currOption.pricingEngine.x0; float forwardPrice = spot * dividendDiscount / riskFreeDiscount; //declare the blackCalcStruct blackCalcStruct blackCalc; //initialize the calculator initBlackCalculator(blackCalc, currOption.payoff, forwardPrice, sqrt(variance), riskFreeDiscount); //retrieve the results values float resultVal = getResultVal(blackCalc); //write the resulting value to global memory outputVals[optionNum] = resultVal; } } #endif //BLACK_SCHOLES_ANALYTIC_ENGINE_KERNELS_CU
28b6f1a05b8ae431dec54ff0b24edf95dc9b1866.cu
//blackScholesAnalyticEngineKernels.cu //Scott Grauer-Gray //Kernels for running black scholes using the analytic engine #ifndef BLACK_SCHOLES_ANALYTIC_ENGINE_KERNELS_CU #define BLACK_SCHOLES_ANALYTIC_ENGINE_KERNELS_CU //declarations for the kernels #include "blackScholesAnalyticEngineKernels.cuh" //needed for the constants in the error function #include "errorFunctConsts.cuh" //device kernel to retrieve the compound factor in interestRate __device__ float interestRateCompoundFactor(float t, yieldTermStruct currYieldTermStruct) { return (expf((currYieldTermStruct.forward)*t)); } //device kernel to retrieve the discount factor in interestRate __device__ float interestRateDiscountFactor(float t, yieldTermStruct currYieldTermStruct) { return 1.0f / interestRateCompoundFactor(t, currYieldTermStruct); } //device function to get the variance of the black volatility function __device__ float getBlackVolBlackVar(blackVolStruct volTS) { float vol = volTS.volatility; return vol*vol*volTS.timeYearFraction; } //device function to get the discount on a dividend yield __device__ float getDiscountOnDividendYield(float yearFraction, yieldTermStruct dividendYieldTermStruct) { float intDiscountFactor = interestRateDiscountFactor(yearFraction, dividendYieldTermStruct); return intDiscountFactor; } //device function to get the discount on the risk free rate __device__ float getDiscountOnRiskFreeRate(float yearFraction, yieldTermStruct riskFreeRateYieldTermStruct) { return interestRateDiscountFactor(yearFraction, riskFreeRateYieldTermStruct); } //device kernel to run the error function __device__ float errorFunct(normalDistStruct normDist, float x) { float R,S,P,Q,s,y,z,r, ax; ax = fabsf(x); if(ax < 0.84375f) { if(ax < 3.7252902984e-09f) { if (ax < FLT_MIN*16.0f) return 0.125f*(8.0f*x+ (ERROR_FUNCT_efx8)*x); /*avoid underflow */ return x + (ERROR_FUNCT_efx)*x; } z = x*x; r = ERROR_FUNCT_pp0+z*(ERROR_FUNCT_pp1+z*(ERROR_FUNCT_pp2+z*(ERROR_FUNCT_pp3+z*ERROR_FUNCT_pp4))); s = ERROR_FUNCT_one+z*(ERROR_FUNCT_qq1+z*(ERROR_FUNCT_qq2+z*(ERROR_FUNCT_qq3+z*(ERROR_FUNCT_qq4+z*ERROR_FUNCT_qq5)))); y = r/s; return x + x*y; } if(ax <1.25f) { s = ax-ERROR_FUNCT_one; P = ERROR_FUNCT_pa0+s*(ERROR_FUNCT_pa1+s*(ERROR_FUNCT_pa2+s*(ERROR_FUNCT_pa3+s*(ERROR_FUNCT_pa4+s*(ERROR_FUNCT_pa5+s*ERROR_FUNCT_pa6))))); Q = ERROR_FUNCT_one+s*(ERROR_FUNCT_qa1+s*(ERROR_FUNCT_qa2+s*(ERROR_FUNCT_qa3+s*(ERROR_FUNCT_qa4+s*(ERROR_FUNCT_qa5+s*ERROR_FUNCT_qa6))))); if(x>=0.0f) return ERROR_FUNCT_erx + P/Q; else return -1.0f*ERROR_FUNCT_erx - P/Q; } if (ax >= 6.0f) { if(x>=0.0f) return ERROR_FUNCT_one-ERROR_FUNCT_tiny; else return ERROR_FUNCT_tiny-ERROR_FUNCT_one; } /* Starts to lose accuracy when ax~5 */ s = ERROR_FUNCT_one/(ax*ax); if(ax < 2.85714285714285f) { /* |x| < 1/0.35 */ R = ERROR_FUNCT_ra0+s*(ERROR_FUNCT_ra1+s*(ERROR_FUNCT_ra2+s*(ERROR_FUNCT_ra3+s*(ERROR_FUNCT_ra4+s*(ERROR_FUNCT_ra5+s*(ERROR_FUNCT_ra6+s*ERROR_FUNCT_ra7)))))); S = ERROR_FUNCT_one+s*(ERROR_FUNCT_sa1+s*(ERROR_FUNCT_sa2+s*(ERROR_FUNCT_sa3+s*(ERROR_FUNCT_sa4+s*(ERROR_FUNCT_sa5+s*(ERROR_FUNCT_sa6+s*(ERROR_FUNCT_sa7+s*ERROR_FUNCT_sa8))))))); } else { /* |x| >= 1/0.35 */ R=ERROR_FUNCT_rb0+s*(ERROR_FUNCT_rb1+s*(ERROR_FUNCT_rb2+s*(ERROR_FUNCT_rb3+s*(ERROR_FUNCT_rb4+s*(ERROR_FUNCT_rb5+s*ERROR_FUNCT_rb6))))); S=ERROR_FUNCT_one+s*(ERROR_FUNCT_sb1+s*(ERROR_FUNCT_sb2+s*(ERROR_FUNCT_sb3+s*(ERROR_FUNCT_sb4+s*(ERROR_FUNCT_sb5+s*(ERROR_FUNCT_sb6+s*ERROR_FUNCT_sb7)))))); } r = expf( -ax*ax-0.5625f +R/S); if(x>=0.0f) return ERROR_FUNCT_one-r/ax; else return r/ax-ERROR_FUNCT_one; } //device kernel to run the operator function in cumulative normal distribution __device__ float cumNormDistOp(normalDistStruct normDist, float z) { z = (z - normDist.average) / normDist.sigma; float result = 0.5f * ( 1.0f + errorFunct(normDist, z*M_SQRT_2 ) ); return result; } //device kernel to run the gaussian function in the normal distribution __device__ float gaussianFunctNormDist(normalDistStruct normDist, float x) { float deltax = x - normDist.average; float exponent = -(deltax*deltax)/normDist.denominator; // debian alpha had some strange problem in the very-low range return exponent <= -690.0f ? 0.0f : // exp(x) < 1.0e-300 anyway normDist.normalizationFactor * expf(exponent); } //device kernel to retrieve the derivative in a cumulative normal distribution __device__ float cumNormDistDeriv(normalDistStruct normDist, float x) { float xn = (x - normDist.average) / normDist.sigma; return gaussianFunctNormDist(normDist, xn) / normDist.sigma; } //device function to initialize the cumulative normal distribution structure __device__ void initCumNormDist(normalDistStruct& currCumNormDist) { currCumNormDist.average = 0.0f; currCumNormDist.sigma = 1.0f; currCumNormDist.normalizationFactor = M_SQRT_2*M_1_SQRTPI/currCumNormDist.sigma; currCumNormDist.derNormalizationFactor = currCumNormDist.sigma*currCumNormDist.sigma; currCumNormDist.denominator = 2.0f*currCumNormDist.derNormalizationFactor; } //device function to initialize variable in the black calculator __device__ void initBlackCalcVars(blackCalcStruct& blackCalculator, payoffStruct payoff) { blackCalculator.d1 = log(blackCalculator.forward / blackCalculator.strike)/blackCalculator.stdDev + 0.5f*blackCalculator.stdDev; blackCalculator.d2 = blackCalculator.d1 - blackCalculator.stdDev; //initialize the cumulative normal distribution structure normalDistStruct currCumNormDist; initCumNormDist(currCumNormDist); blackCalculator.cum_d1 = cumNormDistOp(currCumNormDist, blackCalculator.d1); blackCalculator.cum_d2 = cumNormDistOp(currCumNormDist, blackCalculator.d2); blackCalculator.n_d1 = cumNormDistDeriv(currCumNormDist, blackCalculator.d1); blackCalculator.n_d2 = cumNormDistDeriv(currCumNormDist, blackCalculator.d2); blackCalculator.x = payoff.strike; blackCalculator.DxDstrike = 1.0f; // the following one will probably disappear as soon as // super-share will be properly handled blackCalculator.DxDs = 0.0f; // this part is always executed. // in case of plain-vanilla payoffs, it is also the only part // which is executed. switch (payoff.type) { case CALL: blackCalculator.alpha = blackCalculator.cum_d1;// N(d1) blackCalculator.DalphaDd1 = blackCalculator.n_d1;// n(d1) blackCalculator.beta = -1.0f*blackCalculator.cum_d2;// -N(d2) blackCalculator.DbetaDd2 = -1.0f*blackCalculator.n_d2;// -n(d2) break; case PUT: blackCalculator.alpha = -1.0f+blackCalculator.cum_d1;// -N(-d1) blackCalculator.DalphaDd1 = blackCalculator.n_d1;// n( d1) blackCalculator.beta = 1.0f-blackCalculator.cum_d2;// N(-d2) blackCalculator.DbetaDd2 = -1.0f* blackCalculator.n_d2;// -n( d2) break; } } //device function to initialize the black calculator __device__ void initBlackCalculator(blackCalcStruct& blackCalc, payoffStruct payoff, float forwardPrice, float stdDev, float riskFreeDiscount) { blackCalc.strike = payoff.strike; blackCalc.forward = forwardPrice; blackCalc.stdDev = stdDev; blackCalc.discount = riskFreeDiscount; blackCalc.variance = stdDev * stdDev; initBlackCalcVars(blackCalc, payoff); } //device function to retrieve the output resulting value __device__ float getResultVal(blackCalcStruct blackCalculator) { float result = blackCalculator.discount * (blackCalculator.forward * blackCalculator.alpha + blackCalculator.x * blackCalculator.beta); return result; } //global function to retrieve the output value for an option __global__ void getOutValOption(const optionInputStruct* options, float* outputVals, int numVals) { int optionNum = blockIdx.x * blockDim.x + threadIdx.x; //check if within current options if (optionNum < numVals) { const optionInputStruct threadOption = options[optionNum]; payoffStruct currPayoff; currPayoff.type = threadOption.type; currPayoff.strike = threadOption.strike; yieldTermStruct qTS; qTS.timeYearFraction = threadOption.t; qTS.forward = threadOption.q; yieldTermStruct rTS; rTS.timeYearFraction = threadOption.t; rTS.forward = threadOption.r; blackVolStruct volTS; volTS.timeYearFraction = threadOption.t; volTS.volatility = threadOption.vol; blackScholesMertStruct stochProcess; stochProcess.x0 = threadOption.spot; stochProcess.dividendTS = qTS; stochProcess.riskFreeTS = rTS; stochProcess.blackVolTS = volTS; optionStruct currOption; currOption.payoff = currPayoff; currOption.yearFractionTime = threadOption.t; currOption.pricingEngine = stochProcess; float variance = getBlackVolBlackVar(currOption.pricingEngine.blackVolTS); float dividendDiscount = getDiscountOnDividendYield(currOption.yearFractionTime, currOption.pricingEngine.dividendTS); float riskFreeDiscount = getDiscountOnRiskFreeRate(currOption.yearFractionTime, currOption.pricingEngine.riskFreeTS); float spot = currOption.pricingEngine.x0; float forwardPrice = spot * dividendDiscount / riskFreeDiscount; //declare the blackCalcStruct blackCalcStruct blackCalc; //initialize the calculator initBlackCalculator(blackCalc, currOption.payoff, forwardPrice, sqrt(variance), riskFreeDiscount); //retrieve the results values float resultVal = getResultVal(blackCalc); //write the resulting value to global memory outputVals[optionNum] = resultVal; } } #endif //BLACK_SCHOLES_ANALYTIC_ENGINE_KERNELS_CU
17dbd95d75aece60f11d293573933cc80a0d7e2c.hip
// !!! This is a file automatically generated by hipify!!! /* This code has the assumption that the source vertices are sorted in the input file Also, the vertices are 0 indexed */ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__)) #define MAX_THREADS_PER_BLOCK 1024 void safe_call(hipError_t ret, int line) { if(ret!=hipSuccess) { printf("Error at line %d : %s\n",line,hipGetErrorString(ret)); exit(-1); } } typedef struct __graph { int V; int *adj_prefix_sum; int *adj; } graph_t; __device__ bool d_over; __global__ void reset() { d_over = false; } // Print the graph __global__ void temp_kernel(graph_t * graph) { int id = blockDim.x*blockIdx.x + threadIdx.x; if(id == 0) { int j; for(j=0; j<graph->adj_prefix_sum[graph->V-1]; j++) printf("%d ",graph->adj[j]); printf("\n"); } } __global__ void init(int * vertices, int starting_vertex, int num_vertices) { int v = blockDim.x*blockIdx.x + threadIdx.x; if (v==starting_vertex) vertices[v] = 0; else if(v < num_vertices) vertices[v] = -1; } __global__ void bfs(const graph_t * graph, int * vertices, int current_depth) { int id = blockDim.x*blockIdx.x + threadIdx.x; if(id < graph->V) { if(vertices[id] == current_depth) { int i; if(id == 0) i = 0; else i = graph->adj_prefix_sum[id-1]; for(; i < graph->adj_prefix_sum[id]; i++) { if(vertices[graph->adj[i]] == -1) { vertices[graph->adj[i]] = current_depth+1; d_over = true; } } } } } int main(int argc, char * argv[]) { static char * filename; if(argc>2) { printf("./a.out <filename>\n"); exit(-1); } else if(argc==2) { filename = argv[1]; } else { filename = "../data/input.txt"; } FILE * fp = fopen(filename,"r"); if(!fp) { printf("Error reading file.\n"); exit(-1); } /* Set cuda device to K40 */ CUDA_SAFE_CALL(hipSetDevice(0)); /* Get graph from file into CPU memory */ int num_vertices, num_edges, i, j; fscanf(fp,"%d %d",&num_vertices,&num_edges); graph_t *graph_host; CUDA_SAFE_CALL(hipMallocManaged((void **)&graph_host, sizeof(graph_t))); graph_host->V = num_vertices; CUDA_SAFE_CALL(hipMallocManaged((void **)&(graph_host->adj_prefix_sum), num_vertices*sizeof(int))); CUDA_SAFE_CALL(hipMallocManaged((void **)&(graph_host->adj), num_edges*sizeof(int *))); /* for(i=0; i<num_vertices; i++) { int edges_per_vertex; fscanf(fp,"%d",&edges_per_vertex); if(i>0) { graph_host->adj_prefix_sum[i] = graph_host->adj_prefix_sum[i-1]+edges_per_vertex; j = graph_host->adj_prefix_sum[i-1]; } else { graph_host->adj_prefix_sum[i] = edges_per_vertex; j = 0; } for(; j<graph_host->adj_prefix_sum[i]; j++) { fscanf(fp,"%d",&graph_host->adj[j]); } } */ /* It has been assumed that the source vertices are in sorted order */ int * temp_adj = (int *) malloc(num_vertices*sizeof(int)); int s,d,c=0,ps=0,jt; for(i=0; i<num_edges; i++) { fscanf(fp,"%d",&s); fscanf(fp,"%d",&d); if(ps == s) { temp_adj[c] = d; c++; } else { //printf("%d %d %d\n",i,ps,s); if(ps>0) { graph_host->adj_prefix_sum[ps] = graph_host->adj_prefix_sum[ps-1]+c; j = graph_host->adj_prefix_sum[ps-1]; } else { graph_host->adj_prefix_sum[ps] = c; j = 0; } jt = j; for(; j<graph_host->adj_prefix_sum[ps]; j++) { graph_host->adj[j] = temp_adj[j-jt]; } temp_adj[0] = d; c=1; while((++ps)<s) { graph_host->adj_prefix_sum[ps] = graph_host->adj_prefix_sum[ps-1]; } } } if(ps>0) { graph_host->adj_prefix_sum[ps] = graph_host->adj_prefix_sum[ps-1]+c; j = graph_host->adj_prefix_sum[ps-1]; } else { graph_host->adj_prefix_sum[ps] = c; j = 0; } jt = j; for(; j<graph_host->adj_prefix_sum[ps]; j++) { graph_host->adj[j] = temp_adj[j-jt]; } while((++ps)<num_vertices) { graph_host->adj_prefix_sum[ps] = graph_host->adj_prefix_sum[ps-1]; } /***************************************************** XXX: GPU does not know the size of each adjacency list. For that, a new struct containing size of list and list has to be created and passed to GPU memory. Too much hassle. OR Create 1-D array in the graph itself which contains the size of each list. *****************************************************/ //temp_kernel<<<1,1>>>(graph_host); int num_of_blocks = 1; int num_of_threads_per_block = num_vertices; if(num_vertices>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(num_vertices/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } int * vertices_host; CUDA_SAFE_CALL(hipMallocManaged((void **)&vertices_host, num_vertices*sizeof(int))); dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); hipEvent_t start,end; float diff; double time = 0; CUDA_SAFE_CALL(hipEventCreate(&start)); CUDA_SAFE_CALL(hipEventCreate(&end)); hipLaunchKernelGGL(( init), dim3(grid),dim3(threads), 0, 0, vertices_host, 0, num_vertices); bool stop; int k=0; do { stop = false; CUDA_SAFE_CALL(hipMemcpyToSymbol(d_over, &stop, sizeof(bool),0, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipDeviceSynchronize()); CUDA_SAFE_CALL(hipEventRecord(start,0)); hipLaunchKernelGGL(( bfs), dim3(grid), dim3(threads), 0, 0, graph_host, vertices_host, k); CUDA_SAFE_CALL(hipDeviceSynchronize()); CUDA_SAFE_CALL(hipEventRecord(end,0)); CUDA_SAFE_CALL(hipEventSynchronize(end)); CUDA_SAFE_CALL(hipEventElapsedTime(&diff, start, end)); time += diff*1.0e-3; CUDA_SAFE_CALL(hipMemcpyFromSymbol(&stop, d_over, sizeof(bool),0, hipMemcpyDeviceToHost)); k++; }while(stop); printf("Number of iterations : %d\n",k); for(int i = 0; i < num_vertices; i++) { printf("Vertex %d Distance %d\n",i,vertices_host[i]); } printf("Time: %f ms\n",time); CUDA_SAFE_CALL(hipFree(vertices_host)); CUDA_SAFE_CALL(hipFree(graph_host->adj)); CUDA_SAFE_CALL(hipFree(graph_host->adj_prefix_sum)); CUDA_SAFE_CALL(hipFree(graph_host)); CUDA_SAFE_CALL(hipEventDestroy(start)); CUDA_SAFE_CALL(hipEventDestroy(end)); return 0; }
17dbd95d75aece60f11d293573933cc80a0d7e2c.cu
/* This code has the assumption that the source vertices are sorted in the input file Also, the vertices are 0 indexed */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__)) #define MAX_THREADS_PER_BLOCK 1024 void safe_call(cudaError_t ret, int line) { if(ret!=cudaSuccess) { printf("Error at line %d : %s\n",line,cudaGetErrorString(ret)); exit(-1); } } typedef struct __graph { int V; int *adj_prefix_sum; int *adj; } graph_t; __device__ bool d_over; __global__ void reset() { d_over = false; } // Print the graph __global__ void temp_kernel(graph_t * graph) { int id = blockDim.x*blockIdx.x + threadIdx.x; if(id == 0) { int j; for(j=0; j<graph->adj_prefix_sum[graph->V-1]; j++) printf("%d ",graph->adj[j]); printf("\n"); } } __global__ void init(int * vertices, int starting_vertex, int num_vertices) { int v = blockDim.x*blockIdx.x + threadIdx.x; if (v==starting_vertex) vertices[v] = 0; else if(v < num_vertices) vertices[v] = -1; } __global__ void bfs(const graph_t * graph, int * vertices, int current_depth) { int id = blockDim.x*blockIdx.x + threadIdx.x; if(id < graph->V) { if(vertices[id] == current_depth) { int i; if(id == 0) i = 0; else i = graph->adj_prefix_sum[id-1]; for(; i < graph->adj_prefix_sum[id]; i++) { if(vertices[graph->adj[i]] == -1) { vertices[graph->adj[i]] = current_depth+1; d_over = true; } } } } } int main(int argc, char * argv[]) { static char * filename; if(argc>2) { printf("./a.out <filename>\n"); exit(-1); } else if(argc==2) { filename = argv[1]; } else { filename = "../data/input.txt"; } FILE * fp = fopen(filename,"r"); if(!fp) { printf("Error reading file.\n"); exit(-1); } /* Set cuda device to K40 */ CUDA_SAFE_CALL(cudaSetDevice(0)); /* Get graph from file into CPU memory */ int num_vertices, num_edges, i, j; fscanf(fp,"%d %d",&num_vertices,&num_edges); graph_t *graph_host; CUDA_SAFE_CALL(cudaMallocManaged((void **)&graph_host, sizeof(graph_t))); graph_host->V = num_vertices; CUDA_SAFE_CALL(cudaMallocManaged((void **)&(graph_host->adj_prefix_sum), num_vertices*sizeof(int))); CUDA_SAFE_CALL(cudaMallocManaged((void **)&(graph_host->adj), num_edges*sizeof(int *))); /* for(i=0; i<num_vertices; i++) { int edges_per_vertex; fscanf(fp,"%d",&edges_per_vertex); if(i>0) { graph_host->adj_prefix_sum[i] = graph_host->adj_prefix_sum[i-1]+edges_per_vertex; j = graph_host->adj_prefix_sum[i-1]; } else { graph_host->adj_prefix_sum[i] = edges_per_vertex; j = 0; } for(; j<graph_host->adj_prefix_sum[i]; j++) { fscanf(fp,"%d",&graph_host->adj[j]); } } */ /* It has been assumed that the source vertices are in sorted order */ int * temp_adj = (int *) malloc(num_vertices*sizeof(int)); int s,d,c=0,ps=0,jt; for(i=0; i<num_edges; i++) { fscanf(fp,"%d",&s); fscanf(fp,"%d",&d); if(ps == s) { temp_adj[c] = d; c++; } else { //printf("%d %d %d\n",i,ps,s); if(ps>0) { graph_host->adj_prefix_sum[ps] = graph_host->adj_prefix_sum[ps-1]+c; j = graph_host->adj_prefix_sum[ps-1]; } else { graph_host->adj_prefix_sum[ps] = c; j = 0; } jt = j; for(; j<graph_host->adj_prefix_sum[ps]; j++) { graph_host->adj[j] = temp_adj[j-jt]; } temp_adj[0] = d; c=1; while((++ps)<s) { graph_host->adj_prefix_sum[ps] = graph_host->adj_prefix_sum[ps-1]; } } } if(ps>0) { graph_host->adj_prefix_sum[ps] = graph_host->adj_prefix_sum[ps-1]+c; j = graph_host->adj_prefix_sum[ps-1]; } else { graph_host->adj_prefix_sum[ps] = c; j = 0; } jt = j; for(; j<graph_host->adj_prefix_sum[ps]; j++) { graph_host->adj[j] = temp_adj[j-jt]; } while((++ps)<num_vertices) { graph_host->adj_prefix_sum[ps] = graph_host->adj_prefix_sum[ps-1]; } /***************************************************** XXX: GPU does not know the size of each adjacency list. For that, a new struct containing size of list and list has to be created and passed to GPU memory. Too much hassle. OR Create 1-D array in the graph itself which contains the size of each list. *****************************************************/ //temp_kernel<<<1,1>>>(graph_host); int num_of_blocks = 1; int num_of_threads_per_block = num_vertices; if(num_vertices>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(num_vertices/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } int * vertices_host; CUDA_SAFE_CALL(cudaMallocManaged((void **)&vertices_host, num_vertices*sizeof(int))); dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); cudaEvent_t start,end; float diff; double time = 0; CUDA_SAFE_CALL(cudaEventCreate(&start)); CUDA_SAFE_CALL(cudaEventCreate(&end)); init<<<grid,threads>>> (vertices_host, 0, num_vertices); bool stop; int k=0; do { stop = false; CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_over, &stop, sizeof(bool),0, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaDeviceSynchronize()); CUDA_SAFE_CALL(cudaEventRecord(start,0)); bfs<<<grid, threads>>> (graph_host, vertices_host, k); CUDA_SAFE_CALL(cudaDeviceSynchronize()); CUDA_SAFE_CALL(cudaEventRecord(end,0)); CUDA_SAFE_CALL(cudaEventSynchronize(end)); CUDA_SAFE_CALL(cudaEventElapsedTime(&diff, start, end)); time += diff*1.0e-3; CUDA_SAFE_CALL(cudaMemcpyFromSymbol(&stop, d_over, sizeof(bool),0, cudaMemcpyDeviceToHost)); k++; }while(stop); printf("Number of iterations : %d\n",k); for(int i = 0; i < num_vertices; i++) { printf("Vertex %d Distance %d\n",i,vertices_host[i]); } printf("Time: %f ms\n",time); CUDA_SAFE_CALL(cudaFree(vertices_host)); CUDA_SAFE_CALL(cudaFree(graph_host->adj)); CUDA_SAFE_CALL(cudaFree(graph_host->adj_prefix_sum)); CUDA_SAFE_CALL(cudaFree(graph_host)); CUDA_SAFE_CALL(cudaEventDestroy(start)); CUDA_SAFE_CALL(cudaEventDestroy(end)); return 0; }
aded35a16c09a4ac1ef7f379512545db810a404e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * cuPrintf.cu * * This is a printf command callable from within a kernel. It is set * up so that output is sent to a memory buffer, which is emptied from * the host side - but only after a hipDeviceSynchronize() on the host. * * Currently, there is a limitation of around 200 characters of output * and no more than 10 arguments to a single cuPrintf() call. Issue * multiple calls if longer format strings are required. * * It requires minimal setup, and is *NOT* optimised for performance. * For example, writes are not coalesced - this is because there is an * assumption that people will not want to printf from every single one * of thousands of threads, but only from individual threads at a time. * * Using this is simple - it requires one host-side call to initialise * everything, and then kernels can call cuPrintf at will. Sample code * is the easiest way to demonstrate: * #include "cuPrintf.hip" __global__ void testKernel(int val) { cuPrintf("Value is: %d\n", val); } int main() { cudaPrintfInit(); testKernel<<< 2, 3 >>>(10); cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); return 0; } * * See the header file, "cuPrintf.cuh" for more info, especially * arguments to cudaPrintfInit() and cudaPrintfDisplay(); */ #ifndef CUPRINTF_CU #define CUPRINTF_CU #include "CudaPrintf.cuh" #if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture #include <sm_11_atomic_functions.h> #endif // This is the smallest amount of memory, per-thread, which is allowed. // It is also the largest amount of space a single printf() can take up const static int CUPRINTF_MAX_LEN = 256; // This structure is used internally to track block/thread output restrictions. typedef struct __align__(8) { int threadid; // CUPRINTF_UNRESTRICTED for unrestricted int blockid; // CUPRINTF_UNRESTRICTED for unrestricted } cuPrintfRestriction; // The main storage is in a global print buffer, which has a known // start/end/length. These are atomically updated so it works as a // circular buffer. // Since the only control primitive that can be used is atomicAdd(), // we cannot wrap the pointer as such. The actual address must be // calculated from printfBufferPtr by mod-ing with printfBufferLength. // For sm_10 architecture, we must subdivide the buffer per-thread // since we do not even have an atomic primitive. __constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host) __constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host) __device__ static cuPrintfRestriction restrictRules; // Output restrictions __device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset // This is the header preceeding all printf entries. // NOTE: It *must* be size-aligned to the maximum entity size (size_t) typedef struct __align__(8) { unsigned short magic; // Magic number says we're valid unsigned short fmtoffset; // Offset of fmt string into buffer unsigned short blockid; // Block ID of author unsigned short threadid; // Thread ID of author } cuPrintfHeader; // Special header for sm_10 architecture #define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character typedef struct __align__(16) { unsigned short magic; // sm_10 specific magic number unsigned short unused; unsigned int thread_index; // thread ID for this buffer unsigned int thread_buf_len; // per-thread buffer length unsigned int offset; // most recent printf's offset } cuPrintfHeaderSM10; // Because we can't write an element which is not aligned to its bit-size, // we have to align all sizes and variables on maximum-size boundaries. // That means sizeof(double) in this case, but we'll use (long long) for // better arch<1.3 support #define CUPRINTF_ALIGN_SIZE sizeof(long long) // All our headers are prefixed with a magic number so we know they're ready #define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character // // getNextPrintfBufPtr // // Grabs a block of space in the general circular buffer, using an // atomic function to ensure that it's ours. We handle wrapping // around the circular buffer and return a pointer to a place which // can be written to. // // Important notes: // 1. We always grab CUPRINTF_MAX_LEN bytes // 2. Because of 1, we never worry about wrapping around the end // 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN // // This returns a pointer to the place where we own. // __device__ static char *getNextPrintfBufPtr() { // Initialisation check if (!printfBufferPtr) { return NULL; } // Thread/block restriction check if ((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y))) { return NULL; } if ((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z))) { return NULL; } // Conditional section, dependent on architecture #if __CUDA_ARCH__ == 100 // For sm_10 architectures, we have no atomic add - this means we must split the // entire available buffer into per-thread blocks. Inefficient, but what can you do. int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z); int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z + (blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z); // Find our own block of data and go to it. Make sure the per-thread length // is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and // alignment issues! We must round down, of course. unsigned int thread_buf_len = printfBufferLength / thread_count; thread_buf_len &= ~(CUPRINTF_MAX_LEN-1); // We *must* have a thread buffer length able to fit at least two printfs (one header, one real) if (thread_buf_len < (CUPRINTF_MAX_LEN * 2)) { return NULL; } // Now address our section of the buffer. The first item is a header. char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index); cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer; if (hdr.magic != CUPRINTF_SM10_MAGIC) { // If our header is not set up, initialise it hdr.magic = CUPRINTF_SM10_MAGIC; hdr.thread_index = thread_index; hdr.thread_buf_len = thread_buf_len; hdr.offset = 0; // Note we start at 0! We pre-increment below. *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header // For initial setup purposes, we might need to init thread0's header too // (so that cudaPrintfDisplay() below will work). This is only run once. cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer; tophdr->thread_buf_len = thread_buf_len; } // Adjust the offset by the right amount, and wrap it if need be unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN; if (offset >= hdr.thread_buf_len) { offset = CUPRINTF_MAX_LEN; } // Write back the new offset for next time and return a pointer to it ((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset; return myPrintfBuffer + offset; #else // Much easier with an atomic operation! size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer; offset %= printfBufferLength; return globalPrintfBuffer + offset; #endif } // // writePrintfHeader // // Inserts the header for containing our UID, fmt position and // block/thread number. We generate it dynamically to avoid // issues arising from requiring pre-initialisation. // __device__ static void writePrintfHeader(char *ptr, char *fmtptr) { if (ptr) { cuPrintfHeader header; header.magic = CUPRINTF_SM11_MAGIC; header.fmtoffset = (unsigned short)(fmtptr - ptr); header.blockid = blockIdx.x + gridDim.x*blockIdx.y; header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z; *(cuPrintfHeader *)(void *)ptr = header; } } // // cuPrintfStrncpy // // This special strncpy outputs an aligned length value, followed by the // string. It then zero-pads the rest of the string until a 64-aligned // boundary. The length *includes* the padding. A pointer to the byte // just after the \0 is returned. // // This function could overflow CUPRINTF_MAX_LEN characters in our buffer. // To avoid it, we must count as we output and truncate where necessary. // __device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end) { // Initialisation and overflow check if (!dest || !src || (dest >= end)) { return NULL; } // Prepare to write the length specifier. We're guaranteed to have // at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in // chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE. int *lenptr = (int *)(void *)dest; int len = 0; dest += CUPRINTF_ALIGN_SIZE; // Now copy the string while (n--) { if (dest >= end) // Overflow check { break; } len++; *dest++ = *src; if (*src++ == '\0') { break; } } // Now write out the padding bytes, and we have our length. while ((dest < end) && (((size_t)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0)) { len++; *dest++ = 0; } *lenptr = len; return (dest < end) ? dest : NULL; // Overflow means return NULL } // // copyArg // // This copies a length specifier and then the argument out to the // data buffer. Templates let the compiler figure all this out at // compile-time, making life much simpler from the programming // point of view. I'm assuimg all (const char *) is a string, and // everything else is the variable it points at. I'd love to see // a better way of doing it, but aside from parsing the format // string I can't think of one. // // The length of the data type is inserted at the beginning (so that // the display can distinguish between float and double), and the // pointer to the end of the entry is returned. // __device__ static char *copyArg(char *ptr, const char *arg, char *end) { // Initialisation check if (!ptr || !arg) { return NULL; } // strncpy does all our work. We just terminate. if ((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL) { *ptr = 0; } return ptr; } template <typename T> __device__ static char *copyArg(char *ptr, T &arg, char *end) { // Initisalisation and overflow check. Alignment rules mean that // we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need // to check that one offset. if (!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end)) { return NULL; } // Write the length and argument *(int *)(void *)ptr = sizeof(arg); ptr += CUPRINTF_ALIGN_SIZE; *(T *)(void *)ptr = arg; ptr += CUPRINTF_ALIGN_SIZE; *ptr = 0; return ptr; } // // cuPrintf // // Templated printf functions to handle multiple arguments. // Note we return the total amount of data copied, not the number // of characters output. But then again, who ever looks at the // return from printf() anyway? // // The format is to grab a block of circular buffer space, the // start of which will hold a header and a pointer to the format // string. We then write in all the arguments, and finally the // format string itself. This is to make it easy to prevent // overflow of our buffer (we support up to 10 arguments, each of // which can be 12 bytes in length - that means that only the // format string (or a %s) can actually overflow; so the overflow // check need only be in the strcpy function. // // The header is written at the very last because that's what // makes it look like we're done. // // Errors, which are basically lack-of-initialisation, are ignored // in the called functions because NULL pointers are passed around // // All printf variants basically do the same thing, setting up the // buffer, writing all arguments, then finalising the header. For // clarity, we'll pack the code into some big macros. #define CUPRINTF_PREAMBLE \ char *start, *end, *bufptr, *fmtstart; \ if((start = getNextPrintfBufPtr()) == NULL) return 0; \ end = start + CUPRINTF_MAX_LEN; \ bufptr = start + sizeof(cuPrintfHeader); // Posting an argument is easy #define CUPRINTF_ARG(argname) \ bufptr = copyArg(bufptr, argname, end); // After args are done, record start-of-fmt and write the fmt and header #define CUPRINTF_POSTAMBLE \ fmtstart = bufptr; \ end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \ writePrintfHeader(start, end ? fmtstart : NULL); \ return end ? (int)(end - start) : 0; __device__ int cuPrintf(const char *fmt) { CUPRINTF_PREAMBLE; CUPRINTF_POSTAMBLE; } template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_ARG(arg9); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_ARG(arg9); CUPRINTF_ARG(arg10); CUPRINTF_POSTAMBLE; } #undef CUPRINTF_PREAMBLE #undef CUPRINTF_ARG #undef CUPRINTF_POSTAMBLE // // cuPrintfRestrict // // Called to restrict output to a given thread/block. // We store the info in "restrictRules", which is set up at // init time by the host. It's not the cleanest way to do this // because it means restrictions will last between // invocations, but given the output-pointer continuity, // I feel this is reasonable. // __device__ void cuPrintfRestrict(int threadid, int blockid) { int thread_count = blockDim.x * blockDim.y * blockDim.z; if (((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED)) { restrictRules.threadid = threadid; } int block_count = gridDim.x * gridDim.y; if (((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED)) { restrictRules.blockid = blockid; } } /////////////////////////////////////////////////////////////////////////////// // HOST SIDE #include <stdio.h> static FILE *printf_fp; static char *printfbuf_start=NULL; static char *printfbuf_device=NULL; static int printfbuf_len=0; // // outputPrintfData // // Our own internal function, which takes a pointer to a data buffer // and passes it through libc's printf for output. // // We receive the formate string and a pointer to where the data is // held. We then run through and print it out. // // Returns 0 on failure, 1 on success // static int outputPrintfData(char *fmt, char *data) { // Format string is prefixed by a length that we don't need fmt += CUPRINTF_ALIGN_SIZE; // Now run through it, printing everything we can. We must // run to every % character, extract only that, and use printf // to format it. char *p = strchr(fmt, '%'); while (p != NULL) { // Print up to the % character *p = '\0'; fputs(fmt, printf_fp); *p = '%'; // Put back the % // Now handle the format specifier char *format = p++; // Points to the '%' p += strcspn(p, "%cdiouxXeEfgGaAnps"); if (*p == '\0') // If no format specifier, print the whole thing { fmt = format; break; } // Cut out the format bit and use printf to print it. It's prefixed // by its length. int arglen = *(int *)data; if (arglen > CUPRINTF_MAX_LEN) { fputs("Corrupt printf buffer data - aborting\n", printf_fp); return 0; } data += CUPRINTF_ALIGN_SIZE; char specifier = *p++; char c = *p; // Store for later *p = '\0'; switch (specifier) { // These all take integer arguments case 'c': case 'd': case 'i': case 'o': case 'u': case 'x': case 'X': case 'p': fprintf(printf_fp, format, *((int *)data)); break; // These all take double arguments case 'e': case 'E': case 'f': case 'g': case 'G': case 'a': case 'A': if (arglen == 4) // Float vs. Double thing { fprintf(printf_fp, format, *((float *)data)); } else { fprintf(printf_fp, format, *((double *)data)); } break; // Strings are handled in a special way case 's': fprintf(printf_fp, format, (char *)data); break; // % is special case '%': fprintf(printf_fp, "%%"); break; // Everything else is just printed out as-is default: fprintf(printf_fp, "%s", format); break; } data += CUPRINTF_ALIGN_SIZE; // Move on to next argument *p = c; // Restore what we removed fmt = p; // Adjust fmt string to be past the specifier p = strchr(fmt, '%'); // and get the next specifier } // Print out the last of the string fputs(fmt, printf_fp); return 1; } // // doPrintfDisplay // // This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the // print function above to display them. We've got this separate from // cudaPrintfDisplay() below so we can handle the SM_10 architecture // partitioning. // static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr) { // Grab, piece-by-piece, each output element until we catch // up with the circular buffer end pointer int printf_count=0; char printfbuf_local[CUPRINTF_MAX_LEN+1]; printfbuf_local[CUPRINTF_MAX_LEN] = '\0'; while (bufptr != endptr) { // Wrap ourselves at the end-of-buffer if (bufptr == bufend) { bufptr = bufstart; } // Adjust our start pointer to within the circular buffer and copy a block. hipMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, hipMemcpyDeviceToHost); // If the magic number isn't valid, then this write hasn't gone through // yet and we'll wait until it does (or we're past the end for non-async printfs). cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local; if ((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN)) { //fprintf(printf_fp, "Bad magic number in printf header\n"); break; } // Extract all the info and get this printf done if (headings) { fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid); } if (hdr->fmtoffset == 0) { fprintf(printf_fp, "printf buffer overflow\n"); } else if (!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader))) { break; } printf_count++; // Clear if asked if (clear) { hipMemset(bufptr, 0, CUPRINTF_MAX_LEN); } // Now advance our start location, because we're done, and keep copying bufptr += CUPRINTF_MAX_LEN; } return printf_count; } // // cudaPrintfInit // // Takes a buffer length to allocate, creates the memory on the device and // returns a pointer to it for when a kernel is called. It's up to the caller // to free it. // extern "C" hipError_t cudaPrintfInit(size_t bufferLen) { // Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN bufferLen = (bufferLen < (size_t)CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen; if ((bufferLen % CUPRINTF_MAX_LEN) > 0) { bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN)); } printfbuf_len = (int)bufferLen; // Allocate a print buffer on the device and zero it if (hipMalloc((void **)&printfbuf_device, printfbuf_len) != hipSuccess) { return hipErrorInitializationError; } hipMemset(printfbuf_device, 0, printfbuf_len); printfbuf_start = printfbuf_device; // Where we start reading from // No restrictions to begin with cuPrintfRestriction restrict; restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED; hipMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict)); // Initialise the buffer and the respective lengths/pointers. hipMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *)); hipMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *)); hipMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len)); return hipSuccess; } // // cudaPrintfEnd // // Frees up the memory which we allocated // extern "C" void cudaPrintfEnd() { if (!printfbuf_start || !printfbuf_device) { return; } hipFree(printfbuf_device); printfbuf_start = printfbuf_device = NULL; } // // cudaPrintfDisplay // // Each call to this function dumps the entire current contents // of the printf buffer to the pre-specified FILE pointer. The // circular "start" pointer is advanced so that subsequent calls // dumps only new stuff. // // In the case of async memory access (via streams), call this // repeatedly to keep trying to empty the buffer. If it's a sync // access, then the whole buffer should empty in one go. // // Arguments: // outputFP - File descriptor to output to (NULL => stdout) // showThreadID - If true, prints [block,thread] before each line // extern "C" hipError_t cudaPrintfDisplay(void *outputFP, bool showThreadID) { printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP); // For now, we force "synchronous" mode which means we're not concurrent // with kernel execution. This also means we don't need clearOnPrint. // If you're patching it for async operation, here's where you want it. bool sync_printfs = true; bool clearOnPrint = false; // Initialisation check if (!printfbuf_start || !printfbuf_device || !printf_fp) { return hipErrorMissingConfiguration; } // To determine which architecture we're using, we read the // first short from the buffer - it'll be the magic number // relating to the version. unsigned short magic; hipMemcpy(&magic, printfbuf_device, sizeof(unsigned short), hipMemcpyDeviceToHost); // For SM_10 architecture, we've split our buffer into one-per-thread. // That means we must do each thread block separately. It'll require // extra reading. We also, for now, don't support async printfs because // that requires tracking one start pointer per thread. if (magic == CUPRINTF_SM10_MAGIC) { sync_printfs = true; clearOnPrint = false; int blocklen = 0; char *blockptr = printfbuf_device; while (blockptr < (printfbuf_device + printfbuf_len)) { cuPrintfHeaderSM10 hdr; hipMemcpy(&hdr, blockptr, sizeof(hdr), hipMemcpyDeviceToHost); // We get our block-size-step from the very first header if (hdr.thread_buf_len != 0) { blocklen = hdr.thread_buf_len; } // No magic number means no printfs from this thread if (hdr.magic != CUPRINTF_SM10_MAGIC) { if (blocklen == 0) { fprintf(printf_fp, "No printf headers found at all!\n"); break; // No valid headers! } blockptr += blocklen; continue; } // "offset" is non-zero then we can print the block contents if (hdr.offset > 0) { // For synchronous printfs, we must print from endptr->bufend, then from start->end if (sync_printfs) { doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len); } doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN); } // Move on to the next block and loop again blockptr += hdr.thread_buf_len; } } // For SM_11 and up, everything is a single buffer and it's simple else if (magic == CUPRINTF_SM11_MAGIC) { // Grab the current "end of circular buffer" pointer. char *printfbuf_end = NULL; hipMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *)); // Adjust our starting and ending pointers to within the block char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device; char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device; // For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular // buffer wrap carefully because we could miss those past "end". if (sync_printfs) { doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len); } doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr); printfbuf_start = printfbuf_end; } else ;//printf("Bad magic number in cuPrintf buffer header\n"); // If we were synchronous, then we must ensure that the memory is cleared on exit // otherwise another kernel launch with a different grid size could conflict. if (sync_printfs) { hipMemset(printfbuf_device, 0, printfbuf_len); } return hipSuccess; } // Cleanup #undef CUPRINTF_MAX_LEN #undef CUPRINTF_ALIGN_SIZE #undef CUPRINTF_SM10_MAGIC #undef CUPRINTF_SM11_MAGIC #endif
aded35a16c09a4ac1ef7f379512545db810a404e.cu
/* * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * cuPrintf.cu * * This is a printf command callable from within a kernel. It is set * up so that output is sent to a memory buffer, which is emptied from * the host side - but only after a cudaDeviceSynchronize() on the host. * * Currently, there is a limitation of around 200 characters of output * and no more than 10 arguments to a single cuPrintf() call. Issue * multiple calls if longer format strings are required. * * It requires minimal setup, and is *NOT* optimised for performance. * For example, writes are not coalesced - this is because there is an * assumption that people will not want to printf from every single one * of thousands of threads, but only from individual threads at a time. * * Using this is simple - it requires one host-side call to initialise * everything, and then kernels can call cuPrintf at will. Sample code * is the easiest way to demonstrate: * #include "cuPrintf.cu" __global__ void testKernel(int val) { cuPrintf("Value is: %d\n", val); } int main() { cudaPrintfInit(); testKernel<<< 2, 3 >>>(10); cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); return 0; } * * See the header file, "cuPrintf.cuh" for more info, especially * arguments to cudaPrintfInit() and cudaPrintfDisplay(); */ #ifndef CUPRINTF_CU #define CUPRINTF_CU #include "CudaPrintf.cuh" #if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture #include <sm_11_atomic_functions.h> #endif // This is the smallest amount of memory, per-thread, which is allowed. // It is also the largest amount of space a single printf() can take up const static int CUPRINTF_MAX_LEN = 256; // This structure is used internally to track block/thread output restrictions. typedef struct __align__(8) { int threadid; // CUPRINTF_UNRESTRICTED for unrestricted int blockid; // CUPRINTF_UNRESTRICTED for unrestricted } cuPrintfRestriction; // The main storage is in a global print buffer, which has a known // start/end/length. These are atomically updated so it works as a // circular buffer. // Since the only control primitive that can be used is atomicAdd(), // we cannot wrap the pointer as such. The actual address must be // calculated from printfBufferPtr by mod-ing with printfBufferLength. // For sm_10 architecture, we must subdivide the buffer per-thread // since we do not even have an atomic primitive. __constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host) __constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host) __device__ static cuPrintfRestriction restrictRules; // Output restrictions __device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset // This is the header preceeding all printf entries. // NOTE: It *must* be size-aligned to the maximum entity size (size_t) typedef struct __align__(8) { unsigned short magic; // Magic number says we're valid unsigned short fmtoffset; // Offset of fmt string into buffer unsigned short blockid; // Block ID of author unsigned short threadid; // Thread ID of author } cuPrintfHeader; // Special header for sm_10 architecture #define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character typedef struct __align__(16) { unsigned short magic; // sm_10 specific magic number unsigned short unused; unsigned int thread_index; // thread ID for this buffer unsigned int thread_buf_len; // per-thread buffer length unsigned int offset; // most recent printf's offset } cuPrintfHeaderSM10; // Because we can't write an element which is not aligned to its bit-size, // we have to align all sizes and variables on maximum-size boundaries. // That means sizeof(double) in this case, but we'll use (long long) for // better arch<1.3 support #define CUPRINTF_ALIGN_SIZE sizeof(long long) // All our headers are prefixed with a magic number so we know they're ready #define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character // // getNextPrintfBufPtr // // Grabs a block of space in the general circular buffer, using an // atomic function to ensure that it's ours. We handle wrapping // around the circular buffer and return a pointer to a place which // can be written to. // // Important notes: // 1. We always grab CUPRINTF_MAX_LEN bytes // 2. Because of 1, we never worry about wrapping around the end // 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN // // This returns a pointer to the place where we own. // __device__ static char *getNextPrintfBufPtr() { // Initialisation check if (!printfBufferPtr) { return NULL; } // Thread/block restriction check if ((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y))) { return NULL; } if ((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z))) { return NULL; } // Conditional section, dependent on architecture #if __CUDA_ARCH__ == 100 // For sm_10 architectures, we have no atomic add - this means we must split the // entire available buffer into per-thread blocks. Inefficient, but what can you do. int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z); int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z + (blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z); // Find our own block of data and go to it. Make sure the per-thread length // is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and // alignment issues! We must round down, of course. unsigned int thread_buf_len = printfBufferLength / thread_count; thread_buf_len &= ~(CUPRINTF_MAX_LEN-1); // We *must* have a thread buffer length able to fit at least two printfs (one header, one real) if (thread_buf_len < (CUPRINTF_MAX_LEN * 2)) { return NULL; } // Now address our section of the buffer. The first item is a header. char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index); cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer; if (hdr.magic != CUPRINTF_SM10_MAGIC) { // If our header is not set up, initialise it hdr.magic = CUPRINTF_SM10_MAGIC; hdr.thread_index = thread_index; hdr.thread_buf_len = thread_buf_len; hdr.offset = 0; // Note we start at 0! We pre-increment below. *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header // For initial setup purposes, we might need to init thread0's header too // (so that cudaPrintfDisplay() below will work). This is only run once. cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer; tophdr->thread_buf_len = thread_buf_len; } // Adjust the offset by the right amount, and wrap it if need be unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN; if (offset >= hdr.thread_buf_len) { offset = CUPRINTF_MAX_LEN; } // Write back the new offset for next time and return a pointer to it ((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset; return myPrintfBuffer + offset; #else // Much easier with an atomic operation! size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer; offset %= printfBufferLength; return globalPrintfBuffer + offset; #endif } // // writePrintfHeader // // Inserts the header for containing our UID, fmt position and // block/thread number. We generate it dynamically to avoid // issues arising from requiring pre-initialisation. // __device__ static void writePrintfHeader(char *ptr, char *fmtptr) { if (ptr) { cuPrintfHeader header; header.magic = CUPRINTF_SM11_MAGIC; header.fmtoffset = (unsigned short)(fmtptr - ptr); header.blockid = blockIdx.x + gridDim.x*blockIdx.y; header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z; *(cuPrintfHeader *)(void *)ptr = header; } } // // cuPrintfStrncpy // // This special strncpy outputs an aligned length value, followed by the // string. It then zero-pads the rest of the string until a 64-aligned // boundary. The length *includes* the padding. A pointer to the byte // just after the \0 is returned. // // This function could overflow CUPRINTF_MAX_LEN characters in our buffer. // To avoid it, we must count as we output and truncate where necessary. // __device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end) { // Initialisation and overflow check if (!dest || !src || (dest >= end)) { return NULL; } // Prepare to write the length specifier. We're guaranteed to have // at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in // chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE. int *lenptr = (int *)(void *)dest; int len = 0; dest += CUPRINTF_ALIGN_SIZE; // Now copy the string while (n--) { if (dest >= end) // Overflow check { break; } len++; *dest++ = *src; if (*src++ == '\0') { break; } } // Now write out the padding bytes, and we have our length. while ((dest < end) && (((size_t)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0)) { len++; *dest++ = 0; } *lenptr = len; return (dest < end) ? dest : NULL; // Overflow means return NULL } // // copyArg // // This copies a length specifier and then the argument out to the // data buffer. Templates let the compiler figure all this out at // compile-time, making life much simpler from the programming // point of view. I'm assuimg all (const char *) is a string, and // everything else is the variable it points at. I'd love to see // a better way of doing it, but aside from parsing the format // string I can't think of one. // // The length of the data type is inserted at the beginning (so that // the display can distinguish between float and double), and the // pointer to the end of the entry is returned. // __device__ static char *copyArg(char *ptr, const char *arg, char *end) { // Initialisation check if (!ptr || !arg) { return NULL; } // strncpy does all our work. We just terminate. if ((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL) { *ptr = 0; } return ptr; } template <typename T> __device__ static char *copyArg(char *ptr, T &arg, char *end) { // Initisalisation and overflow check. Alignment rules mean that // we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need // to check that one offset. if (!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end)) { return NULL; } // Write the length and argument *(int *)(void *)ptr = sizeof(arg); ptr += CUPRINTF_ALIGN_SIZE; *(T *)(void *)ptr = arg; ptr += CUPRINTF_ALIGN_SIZE; *ptr = 0; return ptr; } // // cuPrintf // // Templated printf functions to handle multiple arguments. // Note we return the total amount of data copied, not the number // of characters output. But then again, who ever looks at the // return from printf() anyway? // // The format is to grab a block of circular buffer space, the // start of which will hold a header and a pointer to the format // string. We then write in all the arguments, and finally the // format string itself. This is to make it easy to prevent // overflow of our buffer (we support up to 10 arguments, each of // which can be 12 bytes in length - that means that only the // format string (or a %s) can actually overflow; so the overflow // check need only be in the strcpy function. // // The header is written at the very last because that's what // makes it look like we're done. // // Errors, which are basically lack-of-initialisation, are ignored // in the called functions because NULL pointers are passed around // // All printf variants basically do the same thing, setting up the // buffer, writing all arguments, then finalising the header. For // clarity, we'll pack the code into some big macros. #define CUPRINTF_PREAMBLE \ char *start, *end, *bufptr, *fmtstart; \ if((start = getNextPrintfBufPtr()) == NULL) return 0; \ end = start + CUPRINTF_MAX_LEN; \ bufptr = start + sizeof(cuPrintfHeader); // Posting an argument is easy #define CUPRINTF_ARG(argname) \ bufptr = copyArg(bufptr, argname, end); // After args are done, record start-of-fmt and write the fmt and header #define CUPRINTF_POSTAMBLE \ fmtstart = bufptr; \ end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \ writePrintfHeader(start, end ? fmtstart : NULL); \ return end ? (int)(end - start) : 0; __device__ int cuPrintf(const char *fmt) { CUPRINTF_PREAMBLE; CUPRINTF_POSTAMBLE; } template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_ARG(arg9); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_ARG(arg9); CUPRINTF_ARG(arg10); CUPRINTF_POSTAMBLE; } #undef CUPRINTF_PREAMBLE #undef CUPRINTF_ARG #undef CUPRINTF_POSTAMBLE // // cuPrintfRestrict // // Called to restrict output to a given thread/block. // We store the info in "restrictRules", which is set up at // init time by the host. It's not the cleanest way to do this // because it means restrictions will last between // invocations, but given the output-pointer continuity, // I feel this is reasonable. // __device__ void cuPrintfRestrict(int threadid, int blockid) { int thread_count = blockDim.x * blockDim.y * blockDim.z; if (((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED)) { restrictRules.threadid = threadid; } int block_count = gridDim.x * gridDim.y; if (((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED)) { restrictRules.blockid = blockid; } } /////////////////////////////////////////////////////////////////////////////// // HOST SIDE #include <stdio.h> static FILE *printf_fp; static char *printfbuf_start=NULL; static char *printfbuf_device=NULL; static int printfbuf_len=0; // // outputPrintfData // // Our own internal function, which takes a pointer to a data buffer // and passes it through libc's printf for output. // // We receive the formate string and a pointer to where the data is // held. We then run through and print it out. // // Returns 0 on failure, 1 on success // static int outputPrintfData(char *fmt, char *data) { // Format string is prefixed by a length that we don't need fmt += CUPRINTF_ALIGN_SIZE; // Now run through it, printing everything we can. We must // run to every % character, extract only that, and use printf // to format it. char *p = strchr(fmt, '%'); while (p != NULL) { // Print up to the % character *p = '\0'; fputs(fmt, printf_fp); *p = '%'; // Put back the % // Now handle the format specifier char *format = p++; // Points to the '%' p += strcspn(p, "%cdiouxXeEfgGaAnps"); if (*p == '\0') // If no format specifier, print the whole thing { fmt = format; break; } // Cut out the format bit and use printf to print it. It's prefixed // by its length. int arglen = *(int *)data; if (arglen > CUPRINTF_MAX_LEN) { fputs("Corrupt printf buffer data - aborting\n", printf_fp); return 0; } data += CUPRINTF_ALIGN_SIZE; char specifier = *p++; char c = *p; // Store for later *p = '\0'; switch (specifier) { // These all take integer arguments case 'c': case 'd': case 'i': case 'o': case 'u': case 'x': case 'X': case 'p': fprintf(printf_fp, format, *((int *)data)); break; // These all take double arguments case 'e': case 'E': case 'f': case 'g': case 'G': case 'a': case 'A': if (arglen == 4) // Float vs. Double thing { fprintf(printf_fp, format, *((float *)data)); } else { fprintf(printf_fp, format, *((double *)data)); } break; // Strings are handled in a special way case 's': fprintf(printf_fp, format, (char *)data); break; // % is special case '%': fprintf(printf_fp, "%%"); break; // Everything else is just printed out as-is default: fprintf(printf_fp, "%s", format); break; } data += CUPRINTF_ALIGN_SIZE; // Move on to next argument *p = c; // Restore what we removed fmt = p; // Adjust fmt string to be past the specifier p = strchr(fmt, '%'); // and get the next specifier } // Print out the last of the string fputs(fmt, printf_fp); return 1; } // // doPrintfDisplay // // This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the // print function above to display them. We've got this separate from // cudaPrintfDisplay() below so we can handle the SM_10 architecture // partitioning. // static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr) { // Grab, piece-by-piece, each output element until we catch // up with the circular buffer end pointer int printf_count=0; char printfbuf_local[CUPRINTF_MAX_LEN+1]; printfbuf_local[CUPRINTF_MAX_LEN] = '\0'; while (bufptr != endptr) { // Wrap ourselves at the end-of-buffer if (bufptr == bufend) { bufptr = bufstart; } // Adjust our start pointer to within the circular buffer and copy a block. cudaMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, cudaMemcpyDeviceToHost); // If the magic number isn't valid, then this write hasn't gone through // yet and we'll wait until it does (or we're past the end for non-async printfs). cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local; if ((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN)) { //fprintf(printf_fp, "Bad magic number in printf header\n"); break; } // Extract all the info and get this printf done if (headings) { fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid); } if (hdr->fmtoffset == 0) { fprintf(printf_fp, "printf buffer overflow\n"); } else if (!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader))) { break; } printf_count++; // Clear if asked if (clear) { cudaMemset(bufptr, 0, CUPRINTF_MAX_LEN); } // Now advance our start location, because we're done, and keep copying bufptr += CUPRINTF_MAX_LEN; } return printf_count; } // // cudaPrintfInit // // Takes a buffer length to allocate, creates the memory on the device and // returns a pointer to it for when a kernel is called. It's up to the caller // to free it. // extern "C" cudaError_t cudaPrintfInit(size_t bufferLen) { // Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN bufferLen = (bufferLen < (size_t)CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen; if ((bufferLen % CUPRINTF_MAX_LEN) > 0) { bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN)); } printfbuf_len = (int)bufferLen; // Allocate a print buffer on the device and zero it if (cudaMalloc((void **)&printfbuf_device, printfbuf_len) != cudaSuccess) { return cudaErrorInitializationError; } cudaMemset(printfbuf_device, 0, printfbuf_len); printfbuf_start = printfbuf_device; // Where we start reading from // No restrictions to begin with cuPrintfRestriction restrict; restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED; cudaMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict)); // Initialise the buffer and the respective lengths/pointers. cudaMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *)); cudaMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *)); cudaMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len)); return cudaSuccess; } // // cudaPrintfEnd // // Frees up the memory which we allocated // extern "C" void cudaPrintfEnd() { if (!printfbuf_start || !printfbuf_device) { return; } cudaFree(printfbuf_device); printfbuf_start = printfbuf_device = NULL; } // // cudaPrintfDisplay // // Each call to this function dumps the entire current contents // of the printf buffer to the pre-specified FILE pointer. The // circular "start" pointer is advanced so that subsequent calls // dumps only new stuff. // // In the case of async memory access (via streams), call this // repeatedly to keep trying to empty the buffer. If it's a sync // access, then the whole buffer should empty in one go. // // Arguments: // outputFP - File descriptor to output to (NULL => stdout) // showThreadID - If true, prints [block,thread] before each line // extern "C" cudaError_t cudaPrintfDisplay(void *outputFP, bool showThreadID) { printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP); // For now, we force "synchronous" mode which means we're not concurrent // with kernel execution. This also means we don't need clearOnPrint. // If you're patching it for async operation, here's where you want it. bool sync_printfs = true; bool clearOnPrint = false; // Initialisation check if (!printfbuf_start || !printfbuf_device || !printf_fp) { return cudaErrorMissingConfiguration; } // To determine which architecture we're using, we read the // first short from the buffer - it'll be the magic number // relating to the version. unsigned short magic; cudaMemcpy(&magic, printfbuf_device, sizeof(unsigned short), cudaMemcpyDeviceToHost); // For SM_10 architecture, we've split our buffer into one-per-thread. // That means we must do each thread block separately. It'll require // extra reading. We also, for now, don't support async printfs because // that requires tracking one start pointer per thread. if (magic == CUPRINTF_SM10_MAGIC) { sync_printfs = true; clearOnPrint = false; int blocklen = 0; char *blockptr = printfbuf_device; while (blockptr < (printfbuf_device + printfbuf_len)) { cuPrintfHeaderSM10 hdr; cudaMemcpy(&hdr, blockptr, sizeof(hdr), cudaMemcpyDeviceToHost); // We get our block-size-step from the very first header if (hdr.thread_buf_len != 0) { blocklen = hdr.thread_buf_len; } // No magic number means no printfs from this thread if (hdr.magic != CUPRINTF_SM10_MAGIC) { if (blocklen == 0) { fprintf(printf_fp, "No printf headers found at all!\n"); break; // No valid headers! } blockptr += blocklen; continue; } // "offset" is non-zero then we can print the block contents if (hdr.offset > 0) { // For synchronous printfs, we must print from endptr->bufend, then from start->end if (sync_printfs) { doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len); } doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN); } // Move on to the next block and loop again blockptr += hdr.thread_buf_len; } } // For SM_11 and up, everything is a single buffer and it's simple else if (magic == CUPRINTF_SM11_MAGIC) { // Grab the current "end of circular buffer" pointer. char *printfbuf_end = NULL; cudaMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *)); // Adjust our starting and ending pointers to within the block char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device; char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device; // For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular // buffer wrap carefully because we could miss those past "end". if (sync_printfs) { doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len); } doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr); printfbuf_start = printfbuf_end; } else ;//printf("Bad magic number in cuPrintf buffer header\n"); // If we were synchronous, then we must ensure that the memory is cleared on exit // otherwise another kernel launch with a different grid size could conflict. if (sync_printfs) { cudaMemset(printfbuf_device, 0, printfbuf_len); } return cudaSuccess; } // Cleanup #undef CUPRINTF_MAX_LEN #undef CUPRINTF_ALIGN_SIZE #undef CUPRINTF_SM10_MAGIC #undef CUPRINTF_SM11_MAGIC #endif
69abbcb740290ee9524f0513e24f5af86c24c975.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <cub/device/device_segmented_sort.cuh> #include <nv/target> #include <test_util.h> #include <thrust/count.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/random.h> #include <thrust/reduce.h> #include <thrust/sequence.h> #include <thrust/shuffle.h> #include <thrust/sort.h> #include <fstream> #define TEST_HALF_T !_NVHPC_CUDA #define TEST_BF_T !_NVHPC_CUDA #if TEST_HALF_T #include <hip/hip_fp16.h> #endif #if TEST_BF_T #include <cuda_bf16.h> #endif using namespace cub; template <typename T> struct UnwrapHalfAndBfloat16 { using Type = T; }; #if TEST_HALF_T template <> struct UnwrapHalfAndBfloat16<half_t> { using Type = __half; }; #endif #if TEST_BF_T template <> struct UnwrapHalfAndBfloat16<bfloat16_t> { using Type = __nv_bfloat16; }; #endif constexpr static int MAX_ITERATIONS = 2; class SizeGroupDescription { public: SizeGroupDescription(const int segments, const int segment_size) : segments(segments) , segment_size(segment_size) {} int segments {}; int segment_size {}; }; template <typename KeyT> struct SegmentChecker { const KeyT *sorted_keys {}; const int *offsets {}; SegmentChecker(const KeyT *sorted_keys, const int *offsets) : sorted_keys(sorted_keys) , offsets(offsets) {} bool operator()(int segment_id) { const int segment_begin = offsets[segment_id]; const int segment_end = offsets[segment_id + 1]; int counter = 0; for (int i = segment_begin; i < segment_end; i++) { if (sorted_keys[i] != static_cast<KeyT>(counter++)) { return false; } } return true; } }; template <typename KeyT> struct DescendingSegmentChecker { const KeyT *sorted_keys{}; const int *offsets{}; DescendingSegmentChecker(const KeyT *sorted_keys, const int *offsets) : sorted_keys(sorted_keys) , offsets(offsets) {} bool operator()(int segment_id) { const int segment_begin = offsets[segment_id]; const int segment_end = offsets[segment_id + 1]; int counter = 0; for (int i = segment_end - 1; i >= segment_begin; i--) { if (sorted_keys[i] != static_cast<KeyT>(counter++)) { return false; } } return true; } }; template <typename KeyT> struct ReversedIota { KeyT *data {}; const int *offsets {}; ReversedIota(KeyT *data, const int *offsets) : data(data) , offsets(offsets) {} void operator()(int segment_id) const { const int segment_begin = offsets[segment_id]; const int segment_end = offsets[segment_id + 1]; const int segment_size = segment_end - segment_begin; int count = 0; for (int i = segment_begin; i < segment_end; i++) { data[i] = static_cast<KeyT>(segment_size - 1 - count++); } } }; template <typename KeyT> struct Iota { KeyT *data{}; const int *offsets{}; Iota(KeyT *data, const int *offsets) : data(data) , offsets(offsets) {} void operator()(int segment_id) const { const int segment_begin = offsets[segment_id]; const int segment_end = offsets[segment_id + 1]; int count = 0; for (int i = segment_begin; i < segment_end; i++) { data[i] = static_cast<KeyT>(count++); } } }; template <typename KeyT, typename ValueT = cub::NullType> class Input { thrust::default_random_engine random_engine; thrust::device_vector<int> d_segment_sizes; thrust::device_vector<int> d_offsets; thrust::host_vector<int> h_offsets; using MaskedValueT = cub::detail::conditional_t< std::is_same<ValueT, cub::NullType>::value, KeyT, ValueT>; bool reverse {}; int num_items {}; thrust::device_vector<KeyT> d_keys; thrust::device_vector<MaskedValueT> d_values; thrust::host_vector<KeyT> h_keys; thrust::host_vector<MaskedValueT> h_values; public: Input(bool reverse, const thrust::host_vector<int> &h_segment_sizes) : d_segment_sizes(h_segment_sizes) , d_offsets(d_segment_sizes.size() + 1) , h_offsets(d_segment_sizes.size() + 1) , reverse(reverse) , num_items(static_cast<int>( thrust::reduce(d_segment_sizes.begin(), d_segment_sizes.end()))) , d_keys(num_items) , d_values(num_items) , h_keys(num_items) , h_values(num_items) { update(); } Input(thrust::host_vector<int> &h_offsets) : d_offsets(h_offsets) , h_offsets(h_offsets) , reverse(false) , num_items(h_offsets.back()) , d_keys(num_items) , d_values(num_items) { } void shuffle() { thrust::shuffle(d_segment_sizes.begin(), d_segment_sizes.end(), random_engine); update(); } int get_num_items() const { return num_items; } int get_num_segments() const { return static_cast<unsigned int>(d_segment_sizes.size()); } const KeyT *get_d_keys() const { return thrust::raw_pointer_cast(d_keys.data()); } thrust::device_vector<KeyT> &get_d_keys_vec() { return d_keys; } thrust::device_vector<MaskedValueT> &get_d_values_vec() { return d_values; } KeyT *get_d_keys() { return thrust::raw_pointer_cast(d_keys.data()); } const thrust::host_vector<int>& get_h_offsets() { return h_offsets; } MaskedValueT *get_d_values() { return thrust::raw_pointer_cast(d_values.data()); } const int *get_d_offsets() const { return thrust::raw_pointer_cast(d_offsets.data()); } template <typename T> bool check_output_implementation(const T *keys_output) { const int *offsets = thrust::raw_pointer_cast(h_offsets.data()); if (reverse) { DescendingSegmentChecker<T> checker{keys_output, offsets}; for (int i = 0; i < get_num_segments(); i++) { if (!checker(i)) { return false; } } } else { SegmentChecker<T> checker{keys_output, offsets}; for (int i = 0; i < get_num_segments(); i++) { if (!checker(i)) { return false; } } } return true; } bool check_output(const KeyT *d_keys_output, const MaskedValueT *d_values_output = nullptr) { KeyT *keys_output = thrust::raw_pointer_cast(h_keys.data()); MaskedValueT *values_output = thrust::raw_pointer_cast(h_values.data()); hipMemcpy(keys_output, d_keys_output, sizeof(KeyT) * num_items, hipMemcpyDeviceToHost); const bool keys_ok = check_output_implementation(keys_output); if (std::is_same<ValueT, cub::NullType>::value || d_values_output == nullptr) { return keys_ok; } hipMemcpy(values_output, d_values_output, sizeof(ValueT) * num_items, hipMemcpyDeviceToHost); const bool values_ok = check_output_implementation(values_output); return keys_ok && values_ok; } private: void update() { fill_offsets(); gen_keys(); } void fill_offsets() { thrust::copy(d_segment_sizes.begin(), d_segment_sizes.end(), d_offsets.begin()); thrust::exclusive_scan(d_offsets.begin(), d_offsets.end(), d_offsets.begin(), 0u); thrust::copy(d_offsets.begin(), d_offsets.end(), h_offsets.begin()); } void gen_keys() { KeyT *keys_output = thrust::raw_pointer_cast(h_keys.data()); const int *offsets = thrust::raw_pointer_cast(h_offsets.data()); if (reverse) { Iota<KeyT> generator{keys_output, offsets}; for (int i = 0; i < get_num_segments(); i++) { generator(i); } } else { ReversedIota<KeyT> generator{keys_output, offsets}; for (int i = 0; i < get_num_segments(); i++) { generator(i); } } d_keys = h_keys; d_values = d_keys; } }; template <typename KeyT, bool IsIntegralType = std::is_integral<KeyT>::value> class InputDescription { thrust::host_vector<int> segment_sizes; public: InputDescription& add(const SizeGroupDescription &group) { if (static_cast<std::size_t>(group.segment_size) < static_cast<std::size_t>((std::numeric_limits<KeyT>::max)())) { for (int i = 0; i < group.segments; i++) { segment_sizes.push_back(group.segment_size); } } return *this; } template <typename ValueT = cub::NullType> Input<KeyT, ValueT> gen(bool reverse) { return Input<KeyT, ValueT>(reverse, segment_sizes); } }; template <typename KeyT> class InputDescription<KeyT, false> { thrust::host_vector<int> segment_sizes; public: InputDescription& add(const SizeGroupDescription &group) { for (int i = 0; i < group.segments; i++) { segment_sizes.push_back(group.segment_size); } return *this; } template <typename ValueT = cub::NullType> Input<KeyT, ValueT> gen(bool reverse) { return Input<KeyT, ValueT>(reverse, segment_sizes); } }; template <typename WrappedKeyT, typename ValueT> void Sort(bool pairs, bool descending, bool double_buffer, bool stable_sort, void *tmp_storage, std::size_t &temp_storage_bytes, WrappedKeyT *wrapped_input_keys, WrappedKeyT *wrapped_output_keys, ValueT *input_values, ValueT *output_values, int num_items, int num_segments, const int *d_offsets, int *keys_selector = nullptr, int *values_selector = nullptr) { using KeyT = typename UnwrapHalfAndBfloat16<WrappedKeyT>::Type; auto input_keys = reinterpret_cast<KeyT*>(wrapped_input_keys); auto output_keys = reinterpret_cast<KeyT*>(wrapped_output_keys); if (stable_sort) { if (pairs) { if (descending) { if (double_buffer) { cub::DoubleBuffer<KeyT> keys_buffer(input_keys, output_keys); keys_buffer.selector = *keys_selector; cub::DoubleBuffer<ValueT> values_buffer(input_values, output_values); values_buffer.selector = *values_selector; CubDebugExit(cub::DeviceSegmentedSort::StableSortPairsDescending( tmp_storage, temp_storage_bytes, keys_buffer, values_buffer, num_items, num_segments, d_offsets, d_offsets + 1)); *keys_selector = keys_buffer.selector; *values_selector = values_buffer.selector; } else { CubDebugExit(cub::DeviceSegmentedSort::StableSortPairsDescending( tmp_storage, temp_storage_bytes, input_keys, output_keys, input_values, output_values, num_items, num_segments, d_offsets, d_offsets + 1)); } } else { if (double_buffer) { cub::DoubleBuffer<KeyT> keys_buffer(input_keys, output_keys); keys_buffer.selector = *keys_selector; cub::DoubleBuffer<ValueT> values_buffer(input_values, output_values); values_buffer.selector = *values_selector; CubDebugExit( cub::DeviceSegmentedSort::StableSortPairs(tmp_storage, temp_storage_bytes, keys_buffer, values_buffer, num_items, num_segments, d_offsets, d_offsets + 1)); *keys_selector = keys_buffer.selector; *values_selector = values_buffer.selector; } else { CubDebugExit( cub::DeviceSegmentedSort::StableSortPairs(tmp_storage, temp_storage_bytes, input_keys, output_keys, input_values, output_values, num_items, num_segments, d_offsets, d_offsets + 1)); } } } else { if (descending) { if (double_buffer) { cub::DoubleBuffer<KeyT> keys_buffer(input_keys, output_keys); keys_buffer.selector = *keys_selector; CubDebugExit(cub::DeviceSegmentedSort::StableSortKeysDescending( tmp_storage, temp_storage_bytes, keys_buffer, num_items, num_segments, d_offsets, d_offsets + 1)); *keys_selector = keys_buffer.selector; } else { CubDebugExit(cub::DeviceSegmentedSort::StableSortKeysDescending( tmp_storage, temp_storage_bytes, input_keys, output_keys, num_items, num_segments, d_offsets, d_offsets + 1)); } } else { if (double_buffer) { cub::DoubleBuffer<KeyT> keys_buffer(input_keys, output_keys); keys_buffer.selector = *keys_selector; CubDebugExit( cub::DeviceSegmentedSort::StableSortKeys(tmp_storage, temp_storage_bytes, keys_buffer, num_items, num_segments, d_offsets, d_offsets + 1)); *keys_selector = keys_buffer.selector; } else { CubDebugExit( cub::DeviceSegmentedSort::StableSortKeys(tmp_storage, temp_storage_bytes, input_keys, output_keys, num_items, num_segments, d_offsets, d_offsets + 1)); } } } } else { if (pairs) { if (descending) { if (double_buffer) { cub::DoubleBuffer<KeyT> keys_buffer(input_keys, output_keys); keys_buffer.selector = *keys_selector; cub::DoubleBuffer<ValueT> values_buffer(input_values, output_values); values_buffer.selector = *values_selector; CubDebugExit( cub::DeviceSegmentedSort::SortPairsDescending(tmp_storage, temp_storage_bytes, keys_buffer, values_buffer, num_items, num_segments, d_offsets, d_offsets + 1)); *keys_selector = keys_buffer.selector; *values_selector = values_buffer.selector; } else { CubDebugExit( cub::DeviceSegmentedSort::SortPairsDescending(tmp_storage, temp_storage_bytes, input_keys, output_keys, input_values, output_values, num_items, num_segments, d_offsets, d_offsets + 1)); } } else { if (double_buffer) { cub::DoubleBuffer<KeyT> keys_buffer(input_keys, output_keys); keys_buffer.selector = *keys_selector; cub::DoubleBuffer<ValueT> values_buffer(input_values, output_values); values_buffer.selector = *values_selector; CubDebugExit(cub::DeviceSegmentedSort::SortPairs(tmp_storage, temp_storage_bytes, keys_buffer, values_buffer, num_items, num_segments, d_offsets, d_offsets + 1)); *keys_selector = keys_buffer.selector; *values_selector = values_buffer.selector; } else { CubDebugExit(cub::DeviceSegmentedSort::SortPairs(tmp_storage, temp_storage_bytes, input_keys, output_keys, input_values, output_values, num_items, num_segments, d_offsets, d_offsets + 1)); } } } else { if (descending) { if (double_buffer) { cub::DoubleBuffer<KeyT> keys_buffer(input_keys, output_keys); keys_buffer.selector = *keys_selector; CubDebugExit( cub::DeviceSegmentedSort::SortKeysDescending(tmp_storage, temp_storage_bytes, keys_buffer, num_items, num_segments, d_offsets, d_offsets + 1)); *keys_selector = keys_buffer.selector; } else { CubDebugExit( cub::DeviceSegmentedSort::SortKeysDescending(tmp_storage, temp_storage_bytes, input_keys, output_keys, num_items, num_segments, d_offsets, d_offsets + 1)); } } else { if (double_buffer) { cub::DoubleBuffer<KeyT> keys_buffer(input_keys, output_keys); keys_buffer.selector = *keys_selector; CubDebugExit(cub::DeviceSegmentedSort::SortKeys(tmp_storage, temp_storage_bytes, keys_buffer, num_items, num_segments, d_offsets, d_offsets + 1)); *keys_selector = keys_buffer.selector; } else { CubDebugExit(cub::DeviceSegmentedSort::SortKeys(tmp_storage, temp_storage_bytes, input_keys, output_keys, num_items, num_segments, d_offsets, d_offsets + 1)); } } } } } template <typename KeyT, typename ValueT> std::size_t Sort(bool pairs, bool descending, bool double_buffer, bool stable_sort, KeyT *input_keys, KeyT *output_keys, ValueT *input_values, ValueT *output_values, int num_items, int num_segments, const int *d_offsets, int *keys_selector = nullptr, int *values_selector = nullptr) { std::size_t temp_storage_bytes = 42ul; Sort<KeyT, ValueT>(pairs, descending, double_buffer, stable_sort, nullptr, temp_storage_bytes, input_keys, output_keys, input_values, output_values, num_items, num_segments, d_offsets, keys_selector, values_selector); thrust::device_vector<std::uint8_t> temp_storage(temp_storage_bytes); std::uint8_t *d_temp_storage = thrust::raw_pointer_cast(temp_storage.data()); Sort<KeyT, ValueT>(pairs, descending, double_buffer, stable_sort, d_temp_storage, temp_storage_bytes, input_keys, output_keys, input_values, output_values, num_items, num_segments, d_offsets, keys_selector, values_selector); return temp_storage_bytes; } constexpr bool keys_only = false; constexpr bool pairs = true; constexpr bool ascending = false; constexpr bool descending = true; constexpr bool pointers = false; constexpr bool double_buffer = true; constexpr bool unstable = false; constexpr bool stable = true; void TestZeroSegments() { // Type doesn't affect the escape logic, so it should be fine // to test only one set of types here. using KeyT = std::uint8_t; using ValueT = std::uint64_t; for (bool stable_sort: { unstable, stable }) { for (bool sort_pairs: { keys_only, pairs }) { for (bool sort_descending: { ascending, descending }) { for (bool sort_buffer: { pointers, double_buffer }) { cub::DoubleBuffer<KeyT> keys_buffer(nullptr, nullptr); cub::DoubleBuffer<ValueT> values_buffer(nullptr, nullptr); values_buffer.selector = 1; Sort<KeyT, ValueT>(sort_pairs, sort_descending, sort_buffer, stable_sort, nullptr, nullptr, nullptr, nullptr, int{}, int{}, nullptr, &keys_buffer.selector, &values_buffer.selector); AssertEquals(keys_buffer.selector, 0); AssertEquals(values_buffer.selector, 1); } } } } } void TestEmptySegments(int segments) { // Type doesn't affect the escape logic, so it should be fine // to test only one set of types here. using KeyT = std::uint8_t; using ValueT = std::uint64_t; thrust::device_vector<int> offsets(segments + 1, int{}); const int *d_offsets = thrust::raw_pointer_cast(offsets.data()); for (bool sort_stable: { unstable, stable }) { for (bool sort_pairs: { keys_only, pairs }) { for (bool sort_descending: { ascending, descending }) { for (bool sort_buffer: { pointers, double_buffer }) { cub::DoubleBuffer<KeyT> keys_buffer(nullptr, nullptr); cub::DoubleBuffer<ValueT> values_buffer(nullptr, nullptr); values_buffer.selector = 1; Sort<KeyT, ValueT>(sort_pairs, sort_descending, sort_buffer, sort_stable, nullptr, nullptr, nullptr, nullptr, int{}, segments, d_offsets, &keys_buffer.selector, &values_buffer.selector); AssertEquals(keys_buffer.selector, 0); AssertEquals(values_buffer.selector, 1); } } } } } template <typename KeyT, typename ValueT> void TestSameSizeSegments(int segment_size, int segments, bool skip_values = false) { const int num_items = segment_size * segments; thrust::device_vector<int> offsets(segments + 1); thrust::sequence(offsets.begin(), offsets.end(), int{}, segment_size); const int *d_offsets = thrust::raw_pointer_cast(offsets.data()); const KeyT target_key {1}; const ValueT target_value {42}; thrust::device_vector<KeyT> keys_input(num_items); thrust::device_vector<KeyT> keys_output(num_items); KeyT *d_keys_input = thrust::raw_pointer_cast(keys_input.data()); KeyT *d_keys_output = thrust::raw_pointer_cast(keys_output.data()); thrust::device_vector<ValueT> values_input(num_items); thrust::device_vector<ValueT> values_output(num_items); thrust::host_vector<KeyT> host_keys(num_items); thrust::host_vector<ValueT> host_values(num_items); ValueT *d_values_input = thrust::raw_pointer_cast(values_input.data()); ValueT *d_values_output = thrust::raw_pointer_cast(values_output.data()); for (bool stable_sort: { unstable, stable }) { for (bool sort_pairs: { keys_only, pairs }) { if (sort_pairs) { if (skip_values) { continue; } } for (bool sort_descending: { ascending, descending }) { for (bool sort_buffers: { pointers, double_buffer }) { cub::DoubleBuffer<KeyT> keys_buffer(nullptr, nullptr); cub::DoubleBuffer<ValueT> values_buffer(nullptr, nullptr); values_buffer.selector = 1; thrust::fill(keys_input.begin(), keys_input.end(), target_key); thrust::fill(keys_output.begin(), keys_output.end(), KeyT{}); if (sort_pairs) { if (sort_buffers) { thrust::fill(values_input.begin(), values_input.end(), ValueT{}); thrust::fill(values_output.begin(), values_output.end(), target_value); } else { thrust::fill(values_input.begin(), values_input.end(), target_value); thrust::fill(values_output.begin(), values_output.end(), ValueT{}); } } const std::size_t temp_storage_bytes = Sort<KeyT, ValueT>(sort_pairs, sort_descending, sort_buffers, stable_sort, d_keys_input, d_keys_output, d_values_input, d_values_output, num_items, segments, d_offsets, &keys_buffer.selector, &values_buffer.selector); // If temporary storage size is defined by extra keys storage if (sort_buffers) { if (2 * segments * sizeof(unsigned int) < num_items * sizeof(KeyT)) { std::size_t extra_temp_storage_bytes{}; Sort(sort_pairs, sort_descending, pointers, stable_sort, nullptr, extra_temp_storage_bytes, d_keys_input, d_keys_output, d_values_input, d_values_output, num_items, segments, d_offsets, &keys_buffer.selector, &values_buffer.selector); AssertTrue(extra_temp_storage_bytes > temp_storage_bytes); } } { host_keys = keys_buffer.selector || !sort_buffers ? keys_output : keys_input; const std::size_t items_selected = thrust::count(host_keys.begin(), host_keys.end(), target_key); AssertEquals(static_cast<int>(items_selected), num_items); } if (sort_pairs) { host_values = values_buffer.selector || !sort_buffers ? values_output : values_input; const std::size_t items_selected = thrust::count(host_values.begin(), host_values.end(), target_value); AssertEquals(static_cast<int>(items_selected), num_items); } } } } } } template <typename KeyT, typename ValueT> void InputTest(bool sort_descending, Input<KeyT, ValueT> &input) { thrust::device_vector<KeyT> keys_output(input.get_num_items()); KeyT *d_keys_output = thrust::raw_pointer_cast(keys_output.data()); thrust::device_vector<ValueT> values_output(input.get_num_items()); ValueT *d_values_output = thrust::raw_pointer_cast(values_output.data()); for (bool stable_sort: { unstable, stable }) { for (bool sort_pairs : { keys_only, pairs }) { for (bool sort_buffers : {pointers, double_buffer}) { for (int iteration = 0; iteration < MAX_ITERATIONS; iteration++) { thrust::fill(keys_output.begin(), keys_output.end(), KeyT{}); thrust::fill(values_output.begin(), values_output.end(), ValueT{}); cub::DoubleBuffer<KeyT> keys_buffer(input.get_d_keys(), d_keys_output); cub::DoubleBuffer<ValueT> values_buffer(input.get_d_values(), d_values_output); Sort<KeyT, ValueT>(sort_pairs, sort_descending, sort_buffers, stable_sort, input.get_d_keys(), d_keys_output, input.get_d_values(), d_values_output, input.get_num_items(), input.get_num_segments(), input.get_d_offsets(), &keys_buffer.selector, &values_buffer.selector); if (sort_buffers) { if (sort_pairs) { AssertTrue(input.check_output(keys_buffer.Current(), values_buffer.Current())); } else { AssertTrue(input.check_output(keys_buffer.Current())); } } else { if (sort_pairs) { AssertTrue(input.check_output(d_keys_output, d_values_output)); } else { AssertTrue(input.check_output(d_keys_output)); } } input.shuffle(); } } } } } struct ComparisonPredicate { template <typename T> __host__ __device__ bool operator()(const T &lhs, const T &rhs) const { return lhs == rhs; } __host__ __device__ bool operator()(const half_t &lhs, const half_t &rhs) const { return lhs.raw() == rhs.raw(); } }; template <typename T> bool compare_two_outputs(const thrust::host_vector<int> &offsets, const thrust::host_vector<T> &lhs, const thrust::host_vector<T> &rhs) { const auto num_segments = static_cast<unsigned int>(offsets.size() - 1); for (std::size_t segment_id = 0; segment_id < num_segments; segment_id++) { auto lhs_begin = lhs.cbegin() + offsets[segment_id]; auto lhs_end = lhs.cbegin() + offsets[segment_id + 1]; auto rhs_begin = rhs.cbegin() + offsets[segment_id]; auto err = thrust::mismatch(lhs_begin, lhs_end, rhs_begin, ComparisonPredicate{}); if (err.first != lhs_end) { const auto idx = thrust::distance(lhs_begin, err.first); const auto segment_size = std::distance(lhs_begin, lhs_end); std::cerr << "Mismatch in segment " << segment_id << " at position " << idx << " / " << segment_size << ": " << static_cast<std::uint64_t>(lhs_begin[idx]) << " vs " << static_cast<std::uint64_t>(rhs_begin[idx]) << " (" << typeid(lhs_begin[idx]).name() << ")" << std::endl; return false; } } return true; } template <typename ValueT> void RandomizeInput(thrust::host_vector<bool> &h_keys, thrust::host_vector<ValueT> &h_values) { for (std::size_t i = 0; i < h_keys.size(); i++) { h_keys[i] = RandomValue((std::numeric_limits<std::uint8_t>::max)()) > 128; h_values[i] = RandomValue((std::numeric_limits<ValueT>::max)()); } } template <typename KeyT, typename ValueT> void RandomizeInput(thrust::host_vector<KeyT> &h_keys, thrust::host_vector<ValueT> &h_values) { for (std::size_t i = 0; i < h_keys.size(); i++) { h_keys[i] = RandomValue((std::numeric_limits<KeyT>::max)()); h_values[i] = RandomValue((std::numeric_limits<ValueT>::max)()); } } #if TEST_HALF_T void RandomizeInput(thrust::host_vector<half_t> &h_keys, thrust::host_vector<std::uint32_t> &h_values) { for (std::size_t i = 0; i < h_keys.size(); i++) { h_keys[i] = RandomValue((std::numeric_limits<int>::max)()); h_values[i] = RandomValue((std::numeric_limits<std::uint32_t>::max)()); } } #endif #if TEST_BF_T void RandomizeInput(thrust::host_vector<bfloat16_t> &h_keys, thrust::host_vector<std::uint32_t> &h_values) { for (std::size_t i = 0; i < h_keys.size(); i++) { h_keys[i] = RandomValue((std::numeric_limits<int>::max)()); h_values[i] = RandomValue((std::numeric_limits<std::uint32_t>::max)()); } } #endif template <typename KeyT, typename ValueT> void HostReferenceSort(bool sort_pairs, bool sort_descending, unsigned int num_segments, const thrust::host_vector<int> &h_offsets, thrust::host_vector<KeyT> &h_keys, thrust::host_vector<ValueT> &h_values) { for (unsigned int segment_i = 0; segment_i < num_segments; segment_i++) { const int segment_begin = h_offsets[segment_i]; const int segment_end = h_offsets[segment_i + 1]; if (sort_pairs) { if (sort_descending) { thrust::stable_sort_by_key(h_keys.begin() + segment_begin, h_keys.begin() + segment_end, h_values.begin() + segment_begin, thrust::greater<KeyT>{}); } else { thrust::stable_sort_by_key(h_keys.begin() + segment_begin, h_keys.begin() + segment_end, h_values.begin() + segment_begin); } } else { if (sort_descending) { thrust::stable_sort(h_keys.begin() + segment_begin, h_keys.begin() + segment_end, thrust::greater<KeyT>{}); } else { thrust::stable_sort(h_keys.begin() + segment_begin, h_keys.begin() + segment_end); } } } } #if STORE_ON_FAILURE template <typename KeyT, typename ValueT> void DumpInput(bool sort_pairs, bool sort_descending, bool sort_buffers, Input<KeyT, ValueT> &input, thrust::host_vector<KeyT> &h_keys, thrust::host_vector<ValueT> &h_values) { const thrust::host_vector<int> &h_offsets = input.get_h_offsets(); std::cout << "sort pairs: " << sort_pairs << "\n"; std::cout << "sort descending: " << sort_descending << "\n"; std::cout << "sort buffers: " << sort_buffers << "\n"; std::cout << "num_items: " << input.get_num_items() << "\n"; std::cout << "num_segments: " << input.get_num_segments() << "\n"; std::cout << "key type: " << typeid(h_keys[0]).name() << "\n"; std::cout << "value type: " << typeid(h_values[0]).name() << "\n"; std::cout << "offset type: " << typeid(h_offsets[0]).name() << "\n"; std::ofstream offsets_dump("offsets", std::ios::binary); offsets_dump.write(reinterpret_cast<const char *>( thrust::raw_pointer_cast(h_offsets.data())), sizeof(int) * h_offsets.size()); std::ofstream keys_dump("keys", std::ios::binary); keys_dump.write(reinterpret_cast<const char *>( thrust::raw_pointer_cast(h_keys.data())), sizeof(KeyT) * h_keys.size()); std::ofstream values_dump("values", std::ios::binary); values_dump.write(reinterpret_cast<const char *>( thrust::raw_pointer_cast(h_values.data())), sizeof(ValueT) * h_values.size()); } #endif template <typename KeyT, typename ValueT> void InputTestRandom(Input<KeyT, ValueT> &input) { thrust::host_vector<KeyT> h_keys_output(input.get_num_items()); thrust::device_vector<KeyT> keys_output(input.get_num_items()); thrust::host_vector<ValueT> h_values_output(input.get_num_items()); thrust::device_vector<ValueT> values_output(input.get_num_items()); KeyT *d_keys_output = thrust::raw_pointer_cast(keys_output.data()); ValueT *d_values_output = thrust::raw_pointer_cast(values_output.data()); thrust::host_vector<KeyT> h_keys(input.get_num_items()); thrust::host_vector<ValueT> h_values(input.get_num_items()); const thrust::host_vector<int> &h_offsets = input.get_h_offsets(); for (bool stable_sort: { unstable, stable }) { for (bool sort_pairs: { keys_only, pairs }) { for (bool sort_descending: { ascending, descending }) { for (bool sort_buffers: { pointers, double_buffer }) { for (int iteration = 0; iteration < MAX_ITERATIONS; iteration++) { RandomizeInput(h_keys, h_values); #if STORE_ON_FAILURE auto h_keys_backup = h_keys; auto h_values_backup = h_values; #endif input.get_d_keys_vec() = h_keys; input.get_d_values_vec() = h_values; cub::DoubleBuffer<KeyT> keys_buffer(input.get_d_keys(), d_keys_output); cub::DoubleBuffer<ValueT> values_buffer(input.get_d_values(), d_values_output); Sort<KeyT, ValueT>(sort_pairs, sort_descending, sort_buffers, stable_sort, input.get_d_keys(), d_keys_output, input.get_d_values(), d_values_output, input.get_num_items(), input.get_num_segments(), input.get_d_offsets(), &keys_buffer.selector, &values_buffer.selector); HostReferenceSort(sort_pairs, sort_descending, input.get_num_segments(), h_offsets, h_keys, h_values); if (sort_buffers) { if (keys_buffer.selector) { h_keys_output = keys_output; } else { h_keys_output = input.get_d_keys_vec(); } if (values_buffer.selector) { h_values_output = values_output; } else { h_values_output = input.get_d_values_vec(); } } else { h_keys_output = keys_output; h_values_output = values_output; } const bool keys_ok = compare_two_outputs(h_offsets, h_keys, h_keys_output); const bool values_ok = sort_pairs ? compare_two_outputs(h_offsets, h_values, h_values_output) : true; #if STORE_ON_FAILURE if (!keys_ok || !values_ok) { DumpInput<KeyT, ValueT>(sort_pairs, sort_descending, sort_buffers, input, h_keys_backup, h_values_backup); } #endif AssertTrue(keys_ok); AssertTrue(values_ok); input.shuffle(); } } } } } } template <typename KeyT, typename ValueT, bool IsSupportedType = std::is_integral<KeyT>::value> struct EdgeTestDispatch { // Edge cases that needs to be tested const int empty_short_circuit_segment_size = 0; const int copy_short_circuit_segment_size = 1; const int swap_short_circuit_segment_size = 2; const int a_few = 2; const int a_bunch_of = 42; const int a_lot_of = 420; template <typename ActivePolicyT> CUB_RUNTIME_FUNCTION __forceinline__ hipError_t Invoke() { NV_IF_TARGET(NV_IS_HOST, (using SmallAndMediumPolicyT = typename ActivePolicyT::SmallAndMediumSegmentedSortPolicyT; using LargeSegmentPolicyT = typename ActivePolicyT::LargeSegmentPolicy; const int small_segment_max_segment_size = SmallAndMediumPolicyT::SmallPolicyT::ITEMS_PER_TILE; const int items_per_small_segment = SmallAndMediumPolicyT::SmallPolicyT::ITEMS_PER_THREAD; const int medium_segment_max_segment_size = SmallAndMediumPolicyT::MediumPolicyT::ITEMS_PER_TILE; const int single_thread_segment_size = items_per_small_segment; const int large_cached_segment_max_segment_size = LargeSegmentPolicyT::BLOCK_THREADS * LargeSegmentPolicyT::ITEMS_PER_THREAD; for (bool sort_descending : {ascending, descending}) { Input<KeyT, ValueT> edge_cases = InputDescription<KeyT>() .add({a_lot_of, empty_short_circuit_segment_size}) .add({a_lot_of, copy_short_circuit_segment_size}) .add({a_lot_of, swap_short_circuit_segment_size}) .add({a_lot_of, swap_short_circuit_segment_size + 1}) .add({a_lot_of, swap_short_circuit_segment_size + 1}) .add({a_lot_of, single_thread_segment_size - 1}) .add({a_lot_of, single_thread_segment_size}) .add({a_lot_of, single_thread_segment_size + 1}) .add({a_lot_of, single_thread_segment_size * 2 - 1}) .add({a_lot_of, single_thread_segment_size * 2}) .add({a_lot_of, single_thread_segment_size * 2 + 1}) .add({a_bunch_of, small_segment_max_segment_size - 1}) .add({a_bunch_of, small_segment_max_segment_size}) .add({a_bunch_of, small_segment_max_segment_size + 1}) .add({a_bunch_of, medium_segment_max_segment_size - 1}) .add({a_bunch_of, medium_segment_max_segment_size}) .add({a_bunch_of, medium_segment_max_segment_size + 1}) .add({a_bunch_of, large_cached_segment_max_segment_size - 1}) .add({a_bunch_of, large_cached_segment_max_segment_size}) .add({a_bunch_of, large_cached_segment_max_segment_size + 1}) .add({a_few, large_cached_segment_max_segment_size * 2}) .add({a_few, large_cached_segment_max_segment_size * 3}) .add({a_few, large_cached_segment_max_segment_size * 5}) .template gen<ValueT>(sort_descending); InputTest<KeyT, ValueT>(sort_descending, edge_cases); })); return hipSuccess; } }; template <typename KeyT, typename ValueT> struct EdgeTestDispatch<KeyT, ValueT, false> { template <typename ActivePolicyT> CUB_RUNTIME_FUNCTION __forceinline__ hipError_t Invoke() { // Edge case test is using an optimized testing approach which is // incompatible with duplicates. RandomTest is used for other types. return hipSuccess; } }; template <typename KeyT, typename ValueT> void EdgePatternsTest() { int ptx_version = 0; if (CubDebug(PtxVersion(ptx_version))) { return; } using MaxPolicyT = typename cub::DeviceSegmentedSortPolicy<KeyT, ValueT>::MaxPolicy; using EdgeTestDispatchT = EdgeTestDispatch<KeyT, ValueT>; EdgeTestDispatchT dispatch; MaxPolicyT::Invoke(ptx_version, dispatch); } template <typename KeyT, typename ValueT> Input<KeyT, ValueT> GenRandomInput(int max_items, int min_segments, int max_segments, bool descending) { int items_generated {}; const int segments_num = RandomValue(max_segments) + min_segments; thrust::host_vector<int> segment_sizes; segment_sizes.reserve(segments_num); const int max_segment_size = 6000; for (int segment_id = 0; segment_id < segments_num; segment_id++) { const int segment_size_raw = RandomValue(max_segment_size); const int segment_size = segment_size_raw > 0 ? segment_size_raw : 0; if (segment_size + items_generated > max_items) { break; } items_generated += segment_size; segment_sizes.push_back(segment_size); } return Input<KeyT, ValueT>{descending, segment_sizes}; } template <typename KeyT, typename ValueT> void RandomTest(int min_segments, int max_segments) { const int max_items = 10000000; for (int iteration = 0; iteration < MAX_ITERATIONS; iteration++) { Input<KeyT, ValueT> edge_cases = GenRandomInput<KeyT, ValueT>(max_items, min_segments, max_segments, descending); InputTestRandom(edge_cases); } } template <typename KeyT, typename ValueT> void Test() { for (int segment_size: { 1, 1024, 24 * 1024 }) { for (int segments: { 1, 1024 }) { TestSameSizeSegments<KeyT, ValueT>(segment_size, segments); } } RandomTest<KeyT, ValueT>(1 << 2, 1 << 8); RandomTest<KeyT, ValueT>(1 << 9, 1 << 19); EdgePatternsTest<KeyT, ValueT>(); } #if TEST_CDP == 1 template <typename KeyT> __global__ void LauncherKernel( void *tmp_storage, std::size_t temp_storage_bytes, const KeyT *in_keys, KeyT *out_keys, int num_items, int num_segments, const int *offsets) { CubDebug(cub::DeviceSegmentedSort::SortKeys(tmp_storage, temp_storage_bytes, in_keys, out_keys, num_items, num_segments, offsets, offsets + 1)); } template <typename KeyT, typename ValueT> void TestDeviceSideLaunch(Input<KeyT, ValueT> &input) { thrust::host_vector<KeyT> h_keys_output(input.get_num_items()); thrust::device_vector<KeyT> keys_output(input.get_num_items()); thrust::host_vector<ValueT> h_values_output(input.get_num_items()); thrust::device_vector<ValueT> values_output(input.get_num_items()); KeyT *d_keys_output = thrust::raw_pointer_cast(keys_output.data()); thrust::host_vector<KeyT> h_keys(input.get_num_items()); thrust::host_vector<ValueT> h_values(input.get_num_items()); const thrust::host_vector<int> &h_offsets = input.get_h_offsets(); for (int iteration = 0; iteration < MAX_ITERATIONS; iteration++) { RandomizeInput(h_keys, h_values); input.get_d_keys_vec() = h_keys; input.get_d_values_vec() = h_values; const KeyT *d_input = input.get_d_keys(); std::size_t temp_storage_bytes{}; cub::DeviceSegmentedSort::SortKeys(nullptr, temp_storage_bytes, d_input, d_keys_output, input.get_num_items(), input.get_num_segments(), input.get_d_offsets(), input.get_d_offsets() + 1); thrust::device_vector<std::uint8_t> temp_storage(temp_storage_bytes); std::uint8_t *d_temp_storage = thrust::raw_pointer_cast(temp_storage.data()); hipLaunchKernelGGL(( LauncherKernel<KeyT>), dim3(1), dim3(1), 0, 0, d_temp_storage, temp_storage_bytes, d_input, d_keys_output, input.get_num_items(), input.get_num_segments(), input.get_d_offsets()); CubDebugExit(hipDeviceSynchronize()); CubDebugExit(hipPeekAtLastError()); HostReferenceSort(false, false, input.get_num_segments(), h_offsets, h_keys, h_values); h_keys_output = keys_output; const bool keys_ok = compare_two_outputs(h_offsets, h_keys, h_keys_output); AssertTrue(keys_ok); input.shuffle(); } } template <typename KeyT> void TestDeviceSideLaunch(int min_segments, int max_segments) { const int max_items = 10000000; for (int iteration = 0; iteration < MAX_ITERATIONS; iteration++) { Input<KeyT, KeyT> edge_cases = GenRandomInput<KeyT, KeyT>(max_items, min_segments, max_segments, descending); TestDeviceSideLaunch(edge_cases); } } template <typename KeyT> void TestDeviceSideLaunch() { TestDeviceSideLaunch<KeyT>(1 << 2, 1 << 8); TestDeviceSideLaunch<KeyT>(1 << 9, 1 << 19); } #endif // TEST_CDP void TestUnspecifiedRanges() { const std::size_t num_items = 1024 * 1024; const std::size_t max_segments = 42; const std::size_t avg_segment_size = num_items / max_segments; for (int iteration = 0; iteration < MAX_ITERATIONS; iteration++) { thrust::host_vector<int> h_offsets_begin; thrust::host_vector<int> h_offsets_end; h_offsets_begin.reserve(max_segments + 1); h_offsets_end.reserve(max_segments + 1); { int offset = 0; for (std::size_t sid = 0; sid < max_segments; sid++) { const int segment_size = RandomValue(static_cast<int>(avg_segment_size)); const bool segment_is_utilized = RandomValue(100) > 60; if (segment_is_utilized) { h_offsets_begin.push_back(offset); h_offsets_end.push_back(offset + segment_size); } offset += segment_size; } if (h_offsets_begin.empty()) { h_offsets_begin.push_back(avg_segment_size); h_offsets_end.push_back(num_items); } } thrust::device_vector<int> keys(num_items); thrust::device_vector<int> values(num_items); thrust::sequence(keys.rbegin(), keys.rend()); thrust::sequence(values.rbegin(), values.rend()); thrust::device_vector<int> d_offsets_begin = h_offsets_begin; thrust::device_vector<int> d_offsets_end = h_offsets_end; thrust::device_vector<int> expected_keys = keys; thrust::device_vector<int> expected_values = values; const int num_segments = static_cast<int>(h_offsets_begin.size()); for (int sid = 0; sid < num_segments; sid++) { const int segment_begin = h_offsets_begin[sid]; const int segment_end = h_offsets_end[sid]; thrust::sort_by_key(expected_keys.begin() + segment_begin, expected_keys.begin() + segment_end, expected_values.begin() + segment_begin); } thrust::device_vector<int> result_keys = keys; thrust::device_vector<int> result_values = values; { cub::DoubleBuffer<int> keys_buffer( thrust::raw_pointer_cast(keys.data()), thrust::raw_pointer_cast(result_keys.data())); cub::DoubleBuffer<int> values_buffer( thrust::raw_pointer_cast(values.data()), thrust::raw_pointer_cast(result_values.data())); std::size_t temp_storage_bytes{}; std::uint8_t *d_temp_storage{}; CubDebugExit(cub::DeviceSegmentedSort::SortPairs( d_temp_storage, temp_storage_bytes, keys_buffer, values_buffer, num_items, num_segments, thrust::raw_pointer_cast(d_offsets_begin.data()), thrust::raw_pointer_cast(d_offsets_end.data()))); thrust::device_vector<std::uint8_t> temp_storage(temp_storage_bytes); d_temp_storage = thrust::raw_pointer_cast(temp_storage.data()); CubDebugExit(cub::DeviceSegmentedSort::SortPairs( d_temp_storage, temp_storage_bytes, keys_buffer, values_buffer, num_items, num_segments, thrust::raw_pointer_cast(d_offsets_begin.data()), thrust::raw_pointer_cast(d_offsets_end.data()))); for (int sid = 0; sid < num_segments; sid++) { const int segment_begin = h_offsets_begin[sid]; const int segment_end = h_offsets_end[sid]; if (keys_buffer.selector == 0) { thrust::copy( keys.begin() + segment_begin, keys.begin() + segment_end, result_keys.begin() + segment_begin); } if (values_buffer.selector == 0) { thrust::copy( values.begin() + segment_begin, values.begin() + segment_end, result_values.begin() + segment_begin); } } } AssertEquals(result_values, expected_values); AssertEquals(result_keys, expected_keys); thrust::sequence(keys.rbegin(), keys.rend()); thrust::sequence(values.rbegin(), values.rend()); result_keys = keys; result_values = values; { std::size_t temp_storage_bytes{}; std::uint8_t *d_temp_storage{}; CubDebugExit(cub::DeviceSegmentedSort::SortPairs( d_temp_storage, temp_storage_bytes, thrust::raw_pointer_cast(keys.data()), thrust::raw_pointer_cast(result_keys.data()), thrust::raw_pointer_cast(values.data()), thrust::raw_pointer_cast(result_values.data()), num_items, num_segments, thrust::raw_pointer_cast(d_offsets_begin.data()), thrust::raw_pointer_cast(d_offsets_end.data()))); thrust::device_vector<std::uint8_t> temp_storage(temp_storage_bytes); d_temp_storage = thrust::raw_pointer_cast(temp_storage.data()); CubDebugExit(cub::DeviceSegmentedSort::SortPairs( d_temp_storage, temp_storage_bytes, thrust::raw_pointer_cast(keys.data()), thrust::raw_pointer_cast(result_keys.data()), thrust::raw_pointer_cast(values.data()), thrust::raw_pointer_cast(result_values.data()), num_items, num_segments, thrust::raw_pointer_cast(d_offsets_begin.data()), thrust::raw_pointer_cast(d_offsets_end.data()))); } AssertEquals(result_values, expected_values); AssertEquals(result_keys, expected_keys); } } int main(int argc, char** argv) { CommandLineArgs args(argc, argv); // Initialize device CubDebugExit(args.DeviceInit()); // %PARAM% TEST_CDP cdp 0:1 #if TEST_CDP == 0 TestZeroSegments(); TestEmptySegments(1 << 2); TestEmptySegments(1 << 22); #if TEST_HALF_T Test<half_t, std::uint32_t>(); #endif #if TEST_BF_T Test<bfloat16_t, std::uint32_t>(); #endif Test<bool, std::uint64_t>(); Test<std::uint8_t, std::uint64_t>(); Test<std::int64_t, std::uint32_t>(); #elif TEST_CDP == 1 TestDeviceSideLaunch<int>(); #endif // TEST_CDP TestUnspecifiedRanges(); return 0; }
69abbcb740290ee9524f0513e24f5af86c24c975.cu
/****************************************************************************** * Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <cub/device/device_segmented_sort.cuh> #include <nv/target> #include <test_util.h> #include <thrust/count.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/random.h> #include <thrust/reduce.h> #include <thrust/sequence.h> #include <thrust/shuffle.h> #include <thrust/sort.h> #include <fstream> #define TEST_HALF_T !_NVHPC_CUDA #define TEST_BF_T !_NVHPC_CUDA #if TEST_HALF_T #include <cuda_fp16.h> #endif #if TEST_BF_T #include <cuda_bf16.h> #endif using namespace cub; template <typename T> struct UnwrapHalfAndBfloat16 { using Type = T; }; #if TEST_HALF_T template <> struct UnwrapHalfAndBfloat16<half_t> { using Type = __half; }; #endif #if TEST_BF_T template <> struct UnwrapHalfAndBfloat16<bfloat16_t> { using Type = __nv_bfloat16; }; #endif constexpr static int MAX_ITERATIONS = 2; class SizeGroupDescription { public: SizeGroupDescription(const int segments, const int segment_size) : segments(segments) , segment_size(segment_size) {} int segments {}; int segment_size {}; }; template <typename KeyT> struct SegmentChecker { const KeyT *sorted_keys {}; const int *offsets {}; SegmentChecker(const KeyT *sorted_keys, const int *offsets) : sorted_keys(sorted_keys) , offsets(offsets) {} bool operator()(int segment_id) { const int segment_begin = offsets[segment_id]; const int segment_end = offsets[segment_id + 1]; int counter = 0; for (int i = segment_begin; i < segment_end; i++) { if (sorted_keys[i] != static_cast<KeyT>(counter++)) { return false; } } return true; } }; template <typename KeyT> struct DescendingSegmentChecker { const KeyT *sorted_keys{}; const int *offsets{}; DescendingSegmentChecker(const KeyT *sorted_keys, const int *offsets) : sorted_keys(sorted_keys) , offsets(offsets) {} bool operator()(int segment_id) { const int segment_begin = offsets[segment_id]; const int segment_end = offsets[segment_id + 1]; int counter = 0; for (int i = segment_end - 1; i >= segment_begin; i--) { if (sorted_keys[i] != static_cast<KeyT>(counter++)) { return false; } } return true; } }; template <typename KeyT> struct ReversedIota { KeyT *data {}; const int *offsets {}; ReversedIota(KeyT *data, const int *offsets) : data(data) , offsets(offsets) {} void operator()(int segment_id) const { const int segment_begin = offsets[segment_id]; const int segment_end = offsets[segment_id + 1]; const int segment_size = segment_end - segment_begin; int count = 0; for (int i = segment_begin; i < segment_end; i++) { data[i] = static_cast<KeyT>(segment_size - 1 - count++); } } }; template <typename KeyT> struct Iota { KeyT *data{}; const int *offsets{}; Iota(KeyT *data, const int *offsets) : data(data) , offsets(offsets) {} void operator()(int segment_id) const { const int segment_begin = offsets[segment_id]; const int segment_end = offsets[segment_id + 1]; int count = 0; for (int i = segment_begin; i < segment_end; i++) { data[i] = static_cast<KeyT>(count++); } } }; template <typename KeyT, typename ValueT = cub::NullType> class Input { thrust::default_random_engine random_engine; thrust::device_vector<int> d_segment_sizes; thrust::device_vector<int> d_offsets; thrust::host_vector<int> h_offsets; using MaskedValueT = cub::detail::conditional_t< std::is_same<ValueT, cub::NullType>::value, KeyT, ValueT>; bool reverse {}; int num_items {}; thrust::device_vector<KeyT> d_keys; thrust::device_vector<MaskedValueT> d_values; thrust::host_vector<KeyT> h_keys; thrust::host_vector<MaskedValueT> h_values; public: Input(bool reverse, const thrust::host_vector<int> &h_segment_sizes) : d_segment_sizes(h_segment_sizes) , d_offsets(d_segment_sizes.size() + 1) , h_offsets(d_segment_sizes.size() + 1) , reverse(reverse) , num_items(static_cast<int>( thrust::reduce(d_segment_sizes.begin(), d_segment_sizes.end()))) , d_keys(num_items) , d_values(num_items) , h_keys(num_items) , h_values(num_items) { update(); } Input(thrust::host_vector<int> &h_offsets) : d_offsets(h_offsets) , h_offsets(h_offsets) , reverse(false) , num_items(h_offsets.back()) , d_keys(num_items) , d_values(num_items) { } void shuffle() { thrust::shuffle(d_segment_sizes.begin(), d_segment_sizes.end(), random_engine); update(); } int get_num_items() const { return num_items; } int get_num_segments() const { return static_cast<unsigned int>(d_segment_sizes.size()); } const KeyT *get_d_keys() const { return thrust::raw_pointer_cast(d_keys.data()); } thrust::device_vector<KeyT> &get_d_keys_vec() { return d_keys; } thrust::device_vector<MaskedValueT> &get_d_values_vec() { return d_values; } KeyT *get_d_keys() { return thrust::raw_pointer_cast(d_keys.data()); } const thrust::host_vector<int>& get_h_offsets() { return h_offsets; } MaskedValueT *get_d_values() { return thrust::raw_pointer_cast(d_values.data()); } const int *get_d_offsets() const { return thrust::raw_pointer_cast(d_offsets.data()); } template <typename T> bool check_output_implementation(const T *keys_output) { const int *offsets = thrust::raw_pointer_cast(h_offsets.data()); if (reverse) { DescendingSegmentChecker<T> checker{keys_output, offsets}; for (int i = 0; i < get_num_segments(); i++) { if (!checker(i)) { return false; } } } else { SegmentChecker<T> checker{keys_output, offsets}; for (int i = 0; i < get_num_segments(); i++) { if (!checker(i)) { return false; } } } return true; } bool check_output(const KeyT *d_keys_output, const MaskedValueT *d_values_output = nullptr) { KeyT *keys_output = thrust::raw_pointer_cast(h_keys.data()); MaskedValueT *values_output = thrust::raw_pointer_cast(h_values.data()); cudaMemcpy(keys_output, d_keys_output, sizeof(KeyT) * num_items, cudaMemcpyDeviceToHost); const bool keys_ok = check_output_implementation(keys_output); if (std::is_same<ValueT, cub::NullType>::value || d_values_output == nullptr) { return keys_ok; } cudaMemcpy(values_output, d_values_output, sizeof(ValueT) * num_items, cudaMemcpyDeviceToHost); const bool values_ok = check_output_implementation(values_output); return keys_ok && values_ok; } private: void update() { fill_offsets(); gen_keys(); } void fill_offsets() { thrust::copy(d_segment_sizes.begin(), d_segment_sizes.end(), d_offsets.begin()); thrust::exclusive_scan(d_offsets.begin(), d_offsets.end(), d_offsets.begin(), 0u); thrust::copy(d_offsets.begin(), d_offsets.end(), h_offsets.begin()); } void gen_keys() { KeyT *keys_output = thrust::raw_pointer_cast(h_keys.data()); const int *offsets = thrust::raw_pointer_cast(h_offsets.data()); if (reverse) { Iota<KeyT> generator{keys_output, offsets}; for (int i = 0; i < get_num_segments(); i++) { generator(i); } } else { ReversedIota<KeyT> generator{keys_output, offsets}; for (int i = 0; i < get_num_segments(); i++) { generator(i); } } d_keys = h_keys; d_values = d_keys; } }; template <typename KeyT, bool IsIntegralType = std::is_integral<KeyT>::value> class InputDescription { thrust::host_vector<int> segment_sizes; public: InputDescription& add(const SizeGroupDescription &group) { if (static_cast<std::size_t>(group.segment_size) < static_cast<std::size_t>((std::numeric_limits<KeyT>::max)())) { for (int i = 0; i < group.segments; i++) { segment_sizes.push_back(group.segment_size); } } return *this; } template <typename ValueT = cub::NullType> Input<KeyT, ValueT> gen(bool reverse) { return Input<KeyT, ValueT>(reverse, segment_sizes); } }; template <typename KeyT> class InputDescription<KeyT, false> { thrust::host_vector<int> segment_sizes; public: InputDescription& add(const SizeGroupDescription &group) { for (int i = 0; i < group.segments; i++) { segment_sizes.push_back(group.segment_size); } return *this; } template <typename ValueT = cub::NullType> Input<KeyT, ValueT> gen(bool reverse) { return Input<KeyT, ValueT>(reverse, segment_sizes); } }; template <typename WrappedKeyT, typename ValueT> void Sort(bool pairs, bool descending, bool double_buffer, bool stable_sort, void *tmp_storage, std::size_t &temp_storage_bytes, WrappedKeyT *wrapped_input_keys, WrappedKeyT *wrapped_output_keys, ValueT *input_values, ValueT *output_values, int num_items, int num_segments, const int *d_offsets, int *keys_selector = nullptr, int *values_selector = nullptr) { using KeyT = typename UnwrapHalfAndBfloat16<WrappedKeyT>::Type; auto input_keys = reinterpret_cast<KeyT*>(wrapped_input_keys); auto output_keys = reinterpret_cast<KeyT*>(wrapped_output_keys); if (stable_sort) { if (pairs) { if (descending) { if (double_buffer) { cub::DoubleBuffer<KeyT> keys_buffer(input_keys, output_keys); keys_buffer.selector = *keys_selector; cub::DoubleBuffer<ValueT> values_buffer(input_values, output_values); values_buffer.selector = *values_selector; CubDebugExit(cub::DeviceSegmentedSort::StableSortPairsDescending( tmp_storage, temp_storage_bytes, keys_buffer, values_buffer, num_items, num_segments, d_offsets, d_offsets + 1)); *keys_selector = keys_buffer.selector; *values_selector = values_buffer.selector; } else { CubDebugExit(cub::DeviceSegmentedSort::StableSortPairsDescending( tmp_storage, temp_storage_bytes, input_keys, output_keys, input_values, output_values, num_items, num_segments, d_offsets, d_offsets + 1)); } } else { if (double_buffer) { cub::DoubleBuffer<KeyT> keys_buffer(input_keys, output_keys); keys_buffer.selector = *keys_selector; cub::DoubleBuffer<ValueT> values_buffer(input_values, output_values); values_buffer.selector = *values_selector; CubDebugExit( cub::DeviceSegmentedSort::StableSortPairs(tmp_storage, temp_storage_bytes, keys_buffer, values_buffer, num_items, num_segments, d_offsets, d_offsets + 1)); *keys_selector = keys_buffer.selector; *values_selector = values_buffer.selector; } else { CubDebugExit( cub::DeviceSegmentedSort::StableSortPairs(tmp_storage, temp_storage_bytes, input_keys, output_keys, input_values, output_values, num_items, num_segments, d_offsets, d_offsets + 1)); } } } else { if (descending) { if (double_buffer) { cub::DoubleBuffer<KeyT> keys_buffer(input_keys, output_keys); keys_buffer.selector = *keys_selector; CubDebugExit(cub::DeviceSegmentedSort::StableSortKeysDescending( tmp_storage, temp_storage_bytes, keys_buffer, num_items, num_segments, d_offsets, d_offsets + 1)); *keys_selector = keys_buffer.selector; } else { CubDebugExit(cub::DeviceSegmentedSort::StableSortKeysDescending( tmp_storage, temp_storage_bytes, input_keys, output_keys, num_items, num_segments, d_offsets, d_offsets + 1)); } } else { if (double_buffer) { cub::DoubleBuffer<KeyT> keys_buffer(input_keys, output_keys); keys_buffer.selector = *keys_selector; CubDebugExit( cub::DeviceSegmentedSort::StableSortKeys(tmp_storage, temp_storage_bytes, keys_buffer, num_items, num_segments, d_offsets, d_offsets + 1)); *keys_selector = keys_buffer.selector; } else { CubDebugExit( cub::DeviceSegmentedSort::StableSortKeys(tmp_storage, temp_storage_bytes, input_keys, output_keys, num_items, num_segments, d_offsets, d_offsets + 1)); } } } } else { if (pairs) { if (descending) { if (double_buffer) { cub::DoubleBuffer<KeyT> keys_buffer(input_keys, output_keys); keys_buffer.selector = *keys_selector; cub::DoubleBuffer<ValueT> values_buffer(input_values, output_values); values_buffer.selector = *values_selector; CubDebugExit( cub::DeviceSegmentedSort::SortPairsDescending(tmp_storage, temp_storage_bytes, keys_buffer, values_buffer, num_items, num_segments, d_offsets, d_offsets + 1)); *keys_selector = keys_buffer.selector; *values_selector = values_buffer.selector; } else { CubDebugExit( cub::DeviceSegmentedSort::SortPairsDescending(tmp_storage, temp_storage_bytes, input_keys, output_keys, input_values, output_values, num_items, num_segments, d_offsets, d_offsets + 1)); } } else { if (double_buffer) { cub::DoubleBuffer<KeyT> keys_buffer(input_keys, output_keys); keys_buffer.selector = *keys_selector; cub::DoubleBuffer<ValueT> values_buffer(input_values, output_values); values_buffer.selector = *values_selector; CubDebugExit(cub::DeviceSegmentedSort::SortPairs(tmp_storage, temp_storage_bytes, keys_buffer, values_buffer, num_items, num_segments, d_offsets, d_offsets + 1)); *keys_selector = keys_buffer.selector; *values_selector = values_buffer.selector; } else { CubDebugExit(cub::DeviceSegmentedSort::SortPairs(tmp_storage, temp_storage_bytes, input_keys, output_keys, input_values, output_values, num_items, num_segments, d_offsets, d_offsets + 1)); } } } else { if (descending) { if (double_buffer) { cub::DoubleBuffer<KeyT> keys_buffer(input_keys, output_keys); keys_buffer.selector = *keys_selector; CubDebugExit( cub::DeviceSegmentedSort::SortKeysDescending(tmp_storage, temp_storage_bytes, keys_buffer, num_items, num_segments, d_offsets, d_offsets + 1)); *keys_selector = keys_buffer.selector; } else { CubDebugExit( cub::DeviceSegmentedSort::SortKeysDescending(tmp_storage, temp_storage_bytes, input_keys, output_keys, num_items, num_segments, d_offsets, d_offsets + 1)); } } else { if (double_buffer) { cub::DoubleBuffer<KeyT> keys_buffer(input_keys, output_keys); keys_buffer.selector = *keys_selector; CubDebugExit(cub::DeviceSegmentedSort::SortKeys(tmp_storage, temp_storage_bytes, keys_buffer, num_items, num_segments, d_offsets, d_offsets + 1)); *keys_selector = keys_buffer.selector; } else { CubDebugExit(cub::DeviceSegmentedSort::SortKeys(tmp_storage, temp_storage_bytes, input_keys, output_keys, num_items, num_segments, d_offsets, d_offsets + 1)); } } } } } template <typename KeyT, typename ValueT> std::size_t Sort(bool pairs, bool descending, bool double_buffer, bool stable_sort, KeyT *input_keys, KeyT *output_keys, ValueT *input_values, ValueT *output_values, int num_items, int num_segments, const int *d_offsets, int *keys_selector = nullptr, int *values_selector = nullptr) { std::size_t temp_storage_bytes = 42ul; Sort<KeyT, ValueT>(pairs, descending, double_buffer, stable_sort, nullptr, temp_storage_bytes, input_keys, output_keys, input_values, output_values, num_items, num_segments, d_offsets, keys_selector, values_selector); thrust::device_vector<std::uint8_t> temp_storage(temp_storage_bytes); std::uint8_t *d_temp_storage = thrust::raw_pointer_cast(temp_storage.data()); Sort<KeyT, ValueT>(pairs, descending, double_buffer, stable_sort, d_temp_storage, temp_storage_bytes, input_keys, output_keys, input_values, output_values, num_items, num_segments, d_offsets, keys_selector, values_selector); return temp_storage_bytes; } constexpr bool keys_only = false; constexpr bool pairs = true; constexpr bool ascending = false; constexpr bool descending = true; constexpr bool pointers = false; constexpr bool double_buffer = true; constexpr bool unstable = false; constexpr bool stable = true; void TestZeroSegments() { // Type doesn't affect the escape logic, so it should be fine // to test only one set of types here. using KeyT = std::uint8_t; using ValueT = std::uint64_t; for (bool stable_sort: { unstable, stable }) { for (bool sort_pairs: { keys_only, pairs }) { for (bool sort_descending: { ascending, descending }) { for (bool sort_buffer: { pointers, double_buffer }) { cub::DoubleBuffer<KeyT> keys_buffer(nullptr, nullptr); cub::DoubleBuffer<ValueT> values_buffer(nullptr, nullptr); values_buffer.selector = 1; Sort<KeyT, ValueT>(sort_pairs, sort_descending, sort_buffer, stable_sort, nullptr, nullptr, nullptr, nullptr, int{}, int{}, nullptr, &keys_buffer.selector, &values_buffer.selector); AssertEquals(keys_buffer.selector, 0); AssertEquals(values_buffer.selector, 1); } } } } } void TestEmptySegments(int segments) { // Type doesn't affect the escape logic, so it should be fine // to test only one set of types here. using KeyT = std::uint8_t; using ValueT = std::uint64_t; thrust::device_vector<int> offsets(segments + 1, int{}); const int *d_offsets = thrust::raw_pointer_cast(offsets.data()); for (bool sort_stable: { unstable, stable }) { for (bool sort_pairs: { keys_only, pairs }) { for (bool sort_descending: { ascending, descending }) { for (bool sort_buffer: { pointers, double_buffer }) { cub::DoubleBuffer<KeyT> keys_buffer(nullptr, nullptr); cub::DoubleBuffer<ValueT> values_buffer(nullptr, nullptr); values_buffer.selector = 1; Sort<KeyT, ValueT>(sort_pairs, sort_descending, sort_buffer, sort_stable, nullptr, nullptr, nullptr, nullptr, int{}, segments, d_offsets, &keys_buffer.selector, &values_buffer.selector); AssertEquals(keys_buffer.selector, 0); AssertEquals(values_buffer.selector, 1); } } } } } template <typename KeyT, typename ValueT> void TestSameSizeSegments(int segment_size, int segments, bool skip_values = false) { const int num_items = segment_size * segments; thrust::device_vector<int> offsets(segments + 1); thrust::sequence(offsets.begin(), offsets.end(), int{}, segment_size); const int *d_offsets = thrust::raw_pointer_cast(offsets.data()); const KeyT target_key {1}; const ValueT target_value {42}; thrust::device_vector<KeyT> keys_input(num_items); thrust::device_vector<KeyT> keys_output(num_items); KeyT *d_keys_input = thrust::raw_pointer_cast(keys_input.data()); KeyT *d_keys_output = thrust::raw_pointer_cast(keys_output.data()); thrust::device_vector<ValueT> values_input(num_items); thrust::device_vector<ValueT> values_output(num_items); thrust::host_vector<KeyT> host_keys(num_items); thrust::host_vector<ValueT> host_values(num_items); ValueT *d_values_input = thrust::raw_pointer_cast(values_input.data()); ValueT *d_values_output = thrust::raw_pointer_cast(values_output.data()); for (bool stable_sort: { unstable, stable }) { for (bool sort_pairs: { keys_only, pairs }) { if (sort_pairs) { if (skip_values) { continue; } } for (bool sort_descending: { ascending, descending }) { for (bool sort_buffers: { pointers, double_buffer }) { cub::DoubleBuffer<KeyT> keys_buffer(nullptr, nullptr); cub::DoubleBuffer<ValueT> values_buffer(nullptr, nullptr); values_buffer.selector = 1; thrust::fill(keys_input.begin(), keys_input.end(), target_key); thrust::fill(keys_output.begin(), keys_output.end(), KeyT{}); if (sort_pairs) { if (sort_buffers) { thrust::fill(values_input.begin(), values_input.end(), ValueT{}); thrust::fill(values_output.begin(), values_output.end(), target_value); } else { thrust::fill(values_input.begin(), values_input.end(), target_value); thrust::fill(values_output.begin(), values_output.end(), ValueT{}); } } const std::size_t temp_storage_bytes = Sort<KeyT, ValueT>(sort_pairs, sort_descending, sort_buffers, stable_sort, d_keys_input, d_keys_output, d_values_input, d_values_output, num_items, segments, d_offsets, &keys_buffer.selector, &values_buffer.selector); // If temporary storage size is defined by extra keys storage if (sort_buffers) { if (2 * segments * sizeof(unsigned int) < num_items * sizeof(KeyT)) { std::size_t extra_temp_storage_bytes{}; Sort(sort_pairs, sort_descending, pointers, stable_sort, nullptr, extra_temp_storage_bytes, d_keys_input, d_keys_output, d_values_input, d_values_output, num_items, segments, d_offsets, &keys_buffer.selector, &values_buffer.selector); AssertTrue(extra_temp_storage_bytes > temp_storage_bytes); } } { host_keys = keys_buffer.selector || !sort_buffers ? keys_output : keys_input; const std::size_t items_selected = thrust::count(host_keys.begin(), host_keys.end(), target_key); AssertEquals(static_cast<int>(items_selected), num_items); } if (sort_pairs) { host_values = values_buffer.selector || !sort_buffers ? values_output : values_input; const std::size_t items_selected = thrust::count(host_values.begin(), host_values.end(), target_value); AssertEquals(static_cast<int>(items_selected), num_items); } } } } } } template <typename KeyT, typename ValueT> void InputTest(bool sort_descending, Input<KeyT, ValueT> &input) { thrust::device_vector<KeyT> keys_output(input.get_num_items()); KeyT *d_keys_output = thrust::raw_pointer_cast(keys_output.data()); thrust::device_vector<ValueT> values_output(input.get_num_items()); ValueT *d_values_output = thrust::raw_pointer_cast(values_output.data()); for (bool stable_sort: { unstable, stable }) { for (bool sort_pairs : { keys_only, pairs }) { for (bool sort_buffers : {pointers, double_buffer}) { for (int iteration = 0; iteration < MAX_ITERATIONS; iteration++) { thrust::fill(keys_output.begin(), keys_output.end(), KeyT{}); thrust::fill(values_output.begin(), values_output.end(), ValueT{}); cub::DoubleBuffer<KeyT> keys_buffer(input.get_d_keys(), d_keys_output); cub::DoubleBuffer<ValueT> values_buffer(input.get_d_values(), d_values_output); Sort<KeyT, ValueT>(sort_pairs, sort_descending, sort_buffers, stable_sort, input.get_d_keys(), d_keys_output, input.get_d_values(), d_values_output, input.get_num_items(), input.get_num_segments(), input.get_d_offsets(), &keys_buffer.selector, &values_buffer.selector); if (sort_buffers) { if (sort_pairs) { AssertTrue(input.check_output(keys_buffer.Current(), values_buffer.Current())); } else { AssertTrue(input.check_output(keys_buffer.Current())); } } else { if (sort_pairs) { AssertTrue(input.check_output(d_keys_output, d_values_output)); } else { AssertTrue(input.check_output(d_keys_output)); } } input.shuffle(); } } } } } struct ComparisonPredicate { template <typename T> __host__ __device__ bool operator()(const T &lhs, const T &rhs) const { return lhs == rhs; } __host__ __device__ bool operator()(const half_t &lhs, const half_t &rhs) const { return lhs.raw() == rhs.raw(); } }; template <typename T> bool compare_two_outputs(const thrust::host_vector<int> &offsets, const thrust::host_vector<T> &lhs, const thrust::host_vector<T> &rhs) { const auto num_segments = static_cast<unsigned int>(offsets.size() - 1); for (std::size_t segment_id = 0; segment_id < num_segments; segment_id++) { auto lhs_begin = lhs.cbegin() + offsets[segment_id]; auto lhs_end = lhs.cbegin() + offsets[segment_id + 1]; auto rhs_begin = rhs.cbegin() + offsets[segment_id]; auto err = thrust::mismatch(lhs_begin, lhs_end, rhs_begin, ComparisonPredicate{}); if (err.first != lhs_end) { const auto idx = thrust::distance(lhs_begin, err.first); const auto segment_size = std::distance(lhs_begin, lhs_end); std::cerr << "Mismatch in segment " << segment_id << " at position " << idx << " / " << segment_size << ": " << static_cast<std::uint64_t>(lhs_begin[idx]) << " vs " << static_cast<std::uint64_t>(rhs_begin[idx]) << " (" << typeid(lhs_begin[idx]).name() << ")" << std::endl; return false; } } return true; } template <typename ValueT> void RandomizeInput(thrust::host_vector<bool> &h_keys, thrust::host_vector<ValueT> &h_values) { for (std::size_t i = 0; i < h_keys.size(); i++) { h_keys[i] = RandomValue((std::numeric_limits<std::uint8_t>::max)()) > 128; h_values[i] = RandomValue((std::numeric_limits<ValueT>::max)()); } } template <typename KeyT, typename ValueT> void RandomizeInput(thrust::host_vector<KeyT> &h_keys, thrust::host_vector<ValueT> &h_values) { for (std::size_t i = 0; i < h_keys.size(); i++) { h_keys[i] = RandomValue((std::numeric_limits<KeyT>::max)()); h_values[i] = RandomValue((std::numeric_limits<ValueT>::max)()); } } #if TEST_HALF_T void RandomizeInput(thrust::host_vector<half_t> &h_keys, thrust::host_vector<std::uint32_t> &h_values) { for (std::size_t i = 0; i < h_keys.size(); i++) { h_keys[i] = RandomValue((std::numeric_limits<int>::max)()); h_values[i] = RandomValue((std::numeric_limits<std::uint32_t>::max)()); } } #endif #if TEST_BF_T void RandomizeInput(thrust::host_vector<bfloat16_t> &h_keys, thrust::host_vector<std::uint32_t> &h_values) { for (std::size_t i = 0; i < h_keys.size(); i++) { h_keys[i] = RandomValue((std::numeric_limits<int>::max)()); h_values[i] = RandomValue((std::numeric_limits<std::uint32_t>::max)()); } } #endif template <typename KeyT, typename ValueT> void HostReferenceSort(bool sort_pairs, bool sort_descending, unsigned int num_segments, const thrust::host_vector<int> &h_offsets, thrust::host_vector<KeyT> &h_keys, thrust::host_vector<ValueT> &h_values) { for (unsigned int segment_i = 0; segment_i < num_segments; segment_i++) { const int segment_begin = h_offsets[segment_i]; const int segment_end = h_offsets[segment_i + 1]; if (sort_pairs) { if (sort_descending) { thrust::stable_sort_by_key(h_keys.begin() + segment_begin, h_keys.begin() + segment_end, h_values.begin() + segment_begin, thrust::greater<KeyT>{}); } else { thrust::stable_sort_by_key(h_keys.begin() + segment_begin, h_keys.begin() + segment_end, h_values.begin() + segment_begin); } } else { if (sort_descending) { thrust::stable_sort(h_keys.begin() + segment_begin, h_keys.begin() + segment_end, thrust::greater<KeyT>{}); } else { thrust::stable_sort(h_keys.begin() + segment_begin, h_keys.begin() + segment_end); } } } } #if STORE_ON_FAILURE template <typename KeyT, typename ValueT> void DumpInput(bool sort_pairs, bool sort_descending, bool sort_buffers, Input<KeyT, ValueT> &input, thrust::host_vector<KeyT> &h_keys, thrust::host_vector<ValueT> &h_values) { const thrust::host_vector<int> &h_offsets = input.get_h_offsets(); std::cout << "sort pairs: " << sort_pairs << "\n"; std::cout << "sort descending: " << sort_descending << "\n"; std::cout << "sort buffers: " << sort_buffers << "\n"; std::cout << "num_items: " << input.get_num_items() << "\n"; std::cout << "num_segments: " << input.get_num_segments() << "\n"; std::cout << "key type: " << typeid(h_keys[0]).name() << "\n"; std::cout << "value type: " << typeid(h_values[0]).name() << "\n"; std::cout << "offset type: " << typeid(h_offsets[0]).name() << "\n"; std::ofstream offsets_dump("offsets", std::ios::binary); offsets_dump.write(reinterpret_cast<const char *>( thrust::raw_pointer_cast(h_offsets.data())), sizeof(int) * h_offsets.size()); std::ofstream keys_dump("keys", std::ios::binary); keys_dump.write(reinterpret_cast<const char *>( thrust::raw_pointer_cast(h_keys.data())), sizeof(KeyT) * h_keys.size()); std::ofstream values_dump("values", std::ios::binary); values_dump.write(reinterpret_cast<const char *>( thrust::raw_pointer_cast(h_values.data())), sizeof(ValueT) * h_values.size()); } #endif template <typename KeyT, typename ValueT> void InputTestRandom(Input<KeyT, ValueT> &input) { thrust::host_vector<KeyT> h_keys_output(input.get_num_items()); thrust::device_vector<KeyT> keys_output(input.get_num_items()); thrust::host_vector<ValueT> h_values_output(input.get_num_items()); thrust::device_vector<ValueT> values_output(input.get_num_items()); KeyT *d_keys_output = thrust::raw_pointer_cast(keys_output.data()); ValueT *d_values_output = thrust::raw_pointer_cast(values_output.data()); thrust::host_vector<KeyT> h_keys(input.get_num_items()); thrust::host_vector<ValueT> h_values(input.get_num_items()); const thrust::host_vector<int> &h_offsets = input.get_h_offsets(); for (bool stable_sort: { unstable, stable }) { for (bool sort_pairs: { keys_only, pairs }) { for (bool sort_descending: { ascending, descending }) { for (bool sort_buffers: { pointers, double_buffer }) { for (int iteration = 0; iteration < MAX_ITERATIONS; iteration++) { RandomizeInput(h_keys, h_values); #if STORE_ON_FAILURE auto h_keys_backup = h_keys; auto h_values_backup = h_values; #endif input.get_d_keys_vec() = h_keys; input.get_d_values_vec() = h_values; cub::DoubleBuffer<KeyT> keys_buffer(input.get_d_keys(), d_keys_output); cub::DoubleBuffer<ValueT> values_buffer(input.get_d_values(), d_values_output); Sort<KeyT, ValueT>(sort_pairs, sort_descending, sort_buffers, stable_sort, input.get_d_keys(), d_keys_output, input.get_d_values(), d_values_output, input.get_num_items(), input.get_num_segments(), input.get_d_offsets(), &keys_buffer.selector, &values_buffer.selector); HostReferenceSort(sort_pairs, sort_descending, input.get_num_segments(), h_offsets, h_keys, h_values); if (sort_buffers) { if (keys_buffer.selector) { h_keys_output = keys_output; } else { h_keys_output = input.get_d_keys_vec(); } if (values_buffer.selector) { h_values_output = values_output; } else { h_values_output = input.get_d_values_vec(); } } else { h_keys_output = keys_output; h_values_output = values_output; } const bool keys_ok = compare_two_outputs(h_offsets, h_keys, h_keys_output); const bool values_ok = sort_pairs ? compare_two_outputs(h_offsets, h_values, h_values_output) : true; #if STORE_ON_FAILURE if (!keys_ok || !values_ok) { DumpInput<KeyT, ValueT>(sort_pairs, sort_descending, sort_buffers, input, h_keys_backup, h_values_backup); } #endif AssertTrue(keys_ok); AssertTrue(values_ok); input.shuffle(); } } } } } } template <typename KeyT, typename ValueT, bool IsSupportedType = std::is_integral<KeyT>::value> struct EdgeTestDispatch { // Edge cases that needs to be tested const int empty_short_circuit_segment_size = 0; const int copy_short_circuit_segment_size = 1; const int swap_short_circuit_segment_size = 2; const int a_few = 2; const int a_bunch_of = 42; const int a_lot_of = 420; template <typename ActivePolicyT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Invoke() { NV_IF_TARGET(NV_IS_HOST, (using SmallAndMediumPolicyT = typename ActivePolicyT::SmallAndMediumSegmentedSortPolicyT; using LargeSegmentPolicyT = typename ActivePolicyT::LargeSegmentPolicy; const int small_segment_max_segment_size = SmallAndMediumPolicyT::SmallPolicyT::ITEMS_PER_TILE; const int items_per_small_segment = SmallAndMediumPolicyT::SmallPolicyT::ITEMS_PER_THREAD; const int medium_segment_max_segment_size = SmallAndMediumPolicyT::MediumPolicyT::ITEMS_PER_TILE; const int single_thread_segment_size = items_per_small_segment; const int large_cached_segment_max_segment_size = LargeSegmentPolicyT::BLOCK_THREADS * LargeSegmentPolicyT::ITEMS_PER_THREAD; for (bool sort_descending : {ascending, descending}) { Input<KeyT, ValueT> edge_cases = InputDescription<KeyT>() .add({a_lot_of, empty_short_circuit_segment_size}) .add({a_lot_of, copy_short_circuit_segment_size}) .add({a_lot_of, swap_short_circuit_segment_size}) .add({a_lot_of, swap_short_circuit_segment_size + 1}) .add({a_lot_of, swap_short_circuit_segment_size + 1}) .add({a_lot_of, single_thread_segment_size - 1}) .add({a_lot_of, single_thread_segment_size}) .add({a_lot_of, single_thread_segment_size + 1}) .add({a_lot_of, single_thread_segment_size * 2 - 1}) .add({a_lot_of, single_thread_segment_size * 2}) .add({a_lot_of, single_thread_segment_size * 2 + 1}) .add({a_bunch_of, small_segment_max_segment_size - 1}) .add({a_bunch_of, small_segment_max_segment_size}) .add({a_bunch_of, small_segment_max_segment_size + 1}) .add({a_bunch_of, medium_segment_max_segment_size - 1}) .add({a_bunch_of, medium_segment_max_segment_size}) .add({a_bunch_of, medium_segment_max_segment_size + 1}) .add({a_bunch_of, large_cached_segment_max_segment_size - 1}) .add({a_bunch_of, large_cached_segment_max_segment_size}) .add({a_bunch_of, large_cached_segment_max_segment_size + 1}) .add({a_few, large_cached_segment_max_segment_size * 2}) .add({a_few, large_cached_segment_max_segment_size * 3}) .add({a_few, large_cached_segment_max_segment_size * 5}) .template gen<ValueT>(sort_descending); InputTest<KeyT, ValueT>(sort_descending, edge_cases); })); return cudaSuccess; } }; template <typename KeyT, typename ValueT> struct EdgeTestDispatch<KeyT, ValueT, false> { template <typename ActivePolicyT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Invoke() { // Edge case test is using an optimized testing approach which is // incompatible with duplicates. RandomTest is used for other types. return cudaSuccess; } }; template <typename KeyT, typename ValueT> void EdgePatternsTest() { int ptx_version = 0; if (CubDebug(PtxVersion(ptx_version))) { return; } using MaxPolicyT = typename cub::DeviceSegmentedSortPolicy<KeyT, ValueT>::MaxPolicy; using EdgeTestDispatchT = EdgeTestDispatch<KeyT, ValueT>; EdgeTestDispatchT dispatch; MaxPolicyT::Invoke(ptx_version, dispatch); } template <typename KeyT, typename ValueT> Input<KeyT, ValueT> GenRandomInput(int max_items, int min_segments, int max_segments, bool descending) { int items_generated {}; const int segments_num = RandomValue(max_segments) + min_segments; thrust::host_vector<int> segment_sizes; segment_sizes.reserve(segments_num); const int max_segment_size = 6000; for (int segment_id = 0; segment_id < segments_num; segment_id++) { const int segment_size_raw = RandomValue(max_segment_size); const int segment_size = segment_size_raw > 0 ? segment_size_raw : 0; if (segment_size + items_generated > max_items) { break; } items_generated += segment_size; segment_sizes.push_back(segment_size); } return Input<KeyT, ValueT>{descending, segment_sizes}; } template <typename KeyT, typename ValueT> void RandomTest(int min_segments, int max_segments) { const int max_items = 10000000; for (int iteration = 0; iteration < MAX_ITERATIONS; iteration++) { Input<KeyT, ValueT> edge_cases = GenRandomInput<KeyT, ValueT>(max_items, min_segments, max_segments, descending); InputTestRandom(edge_cases); } } template <typename KeyT, typename ValueT> void Test() { for (int segment_size: { 1, 1024, 24 * 1024 }) { for (int segments: { 1, 1024 }) { TestSameSizeSegments<KeyT, ValueT>(segment_size, segments); } } RandomTest<KeyT, ValueT>(1 << 2, 1 << 8); RandomTest<KeyT, ValueT>(1 << 9, 1 << 19); EdgePatternsTest<KeyT, ValueT>(); } #if TEST_CDP == 1 template <typename KeyT> __global__ void LauncherKernel( void *tmp_storage, std::size_t temp_storage_bytes, const KeyT *in_keys, KeyT *out_keys, int num_items, int num_segments, const int *offsets) { CubDebug(cub::DeviceSegmentedSort::SortKeys(tmp_storage, temp_storage_bytes, in_keys, out_keys, num_items, num_segments, offsets, offsets + 1)); } template <typename KeyT, typename ValueT> void TestDeviceSideLaunch(Input<KeyT, ValueT> &input) { thrust::host_vector<KeyT> h_keys_output(input.get_num_items()); thrust::device_vector<KeyT> keys_output(input.get_num_items()); thrust::host_vector<ValueT> h_values_output(input.get_num_items()); thrust::device_vector<ValueT> values_output(input.get_num_items()); KeyT *d_keys_output = thrust::raw_pointer_cast(keys_output.data()); thrust::host_vector<KeyT> h_keys(input.get_num_items()); thrust::host_vector<ValueT> h_values(input.get_num_items()); const thrust::host_vector<int> &h_offsets = input.get_h_offsets(); for (int iteration = 0; iteration < MAX_ITERATIONS; iteration++) { RandomizeInput(h_keys, h_values); input.get_d_keys_vec() = h_keys; input.get_d_values_vec() = h_values; const KeyT *d_input = input.get_d_keys(); std::size_t temp_storage_bytes{}; cub::DeviceSegmentedSort::SortKeys(nullptr, temp_storage_bytes, d_input, d_keys_output, input.get_num_items(), input.get_num_segments(), input.get_d_offsets(), input.get_d_offsets() + 1); thrust::device_vector<std::uint8_t> temp_storage(temp_storage_bytes); std::uint8_t *d_temp_storage = thrust::raw_pointer_cast(temp_storage.data()); LauncherKernel<KeyT><<<1, 1>>>( d_temp_storage, temp_storage_bytes, d_input, d_keys_output, input.get_num_items(), input.get_num_segments(), input.get_d_offsets()); CubDebugExit(cudaDeviceSynchronize()); CubDebugExit(cudaPeekAtLastError()); HostReferenceSort(false, false, input.get_num_segments(), h_offsets, h_keys, h_values); h_keys_output = keys_output; const bool keys_ok = compare_two_outputs(h_offsets, h_keys, h_keys_output); AssertTrue(keys_ok); input.shuffle(); } } template <typename KeyT> void TestDeviceSideLaunch(int min_segments, int max_segments) { const int max_items = 10000000; for (int iteration = 0; iteration < MAX_ITERATIONS; iteration++) { Input<KeyT, KeyT> edge_cases = GenRandomInput<KeyT, KeyT>(max_items, min_segments, max_segments, descending); TestDeviceSideLaunch(edge_cases); } } template <typename KeyT> void TestDeviceSideLaunch() { TestDeviceSideLaunch<KeyT>(1 << 2, 1 << 8); TestDeviceSideLaunch<KeyT>(1 << 9, 1 << 19); } #endif // TEST_CDP void TestUnspecifiedRanges() { const std::size_t num_items = 1024 * 1024; const std::size_t max_segments = 42; const std::size_t avg_segment_size = num_items / max_segments; for (int iteration = 0; iteration < MAX_ITERATIONS; iteration++) { thrust::host_vector<int> h_offsets_begin; thrust::host_vector<int> h_offsets_end; h_offsets_begin.reserve(max_segments + 1); h_offsets_end.reserve(max_segments + 1); { int offset = 0; for (std::size_t sid = 0; sid < max_segments; sid++) { const int segment_size = RandomValue(static_cast<int>(avg_segment_size)); const bool segment_is_utilized = RandomValue(100) > 60; if (segment_is_utilized) { h_offsets_begin.push_back(offset); h_offsets_end.push_back(offset + segment_size); } offset += segment_size; } if (h_offsets_begin.empty()) { h_offsets_begin.push_back(avg_segment_size); h_offsets_end.push_back(num_items); } } thrust::device_vector<int> keys(num_items); thrust::device_vector<int> values(num_items); thrust::sequence(keys.rbegin(), keys.rend()); thrust::sequence(values.rbegin(), values.rend()); thrust::device_vector<int> d_offsets_begin = h_offsets_begin; thrust::device_vector<int> d_offsets_end = h_offsets_end; thrust::device_vector<int> expected_keys = keys; thrust::device_vector<int> expected_values = values; const int num_segments = static_cast<int>(h_offsets_begin.size()); for (int sid = 0; sid < num_segments; sid++) { const int segment_begin = h_offsets_begin[sid]; const int segment_end = h_offsets_end[sid]; thrust::sort_by_key(expected_keys.begin() + segment_begin, expected_keys.begin() + segment_end, expected_values.begin() + segment_begin); } thrust::device_vector<int> result_keys = keys; thrust::device_vector<int> result_values = values; { cub::DoubleBuffer<int> keys_buffer( thrust::raw_pointer_cast(keys.data()), thrust::raw_pointer_cast(result_keys.data())); cub::DoubleBuffer<int> values_buffer( thrust::raw_pointer_cast(values.data()), thrust::raw_pointer_cast(result_values.data())); std::size_t temp_storage_bytes{}; std::uint8_t *d_temp_storage{}; CubDebugExit(cub::DeviceSegmentedSort::SortPairs( d_temp_storage, temp_storage_bytes, keys_buffer, values_buffer, num_items, num_segments, thrust::raw_pointer_cast(d_offsets_begin.data()), thrust::raw_pointer_cast(d_offsets_end.data()))); thrust::device_vector<std::uint8_t> temp_storage(temp_storage_bytes); d_temp_storage = thrust::raw_pointer_cast(temp_storage.data()); CubDebugExit(cub::DeviceSegmentedSort::SortPairs( d_temp_storage, temp_storage_bytes, keys_buffer, values_buffer, num_items, num_segments, thrust::raw_pointer_cast(d_offsets_begin.data()), thrust::raw_pointer_cast(d_offsets_end.data()))); for (int sid = 0; sid < num_segments; sid++) { const int segment_begin = h_offsets_begin[sid]; const int segment_end = h_offsets_end[sid]; if (keys_buffer.selector == 0) { thrust::copy( keys.begin() + segment_begin, keys.begin() + segment_end, result_keys.begin() + segment_begin); } if (values_buffer.selector == 0) { thrust::copy( values.begin() + segment_begin, values.begin() + segment_end, result_values.begin() + segment_begin); } } } AssertEquals(result_values, expected_values); AssertEquals(result_keys, expected_keys); thrust::sequence(keys.rbegin(), keys.rend()); thrust::sequence(values.rbegin(), values.rend()); result_keys = keys; result_values = values; { std::size_t temp_storage_bytes{}; std::uint8_t *d_temp_storage{}; CubDebugExit(cub::DeviceSegmentedSort::SortPairs( d_temp_storage, temp_storage_bytes, thrust::raw_pointer_cast(keys.data()), thrust::raw_pointer_cast(result_keys.data()), thrust::raw_pointer_cast(values.data()), thrust::raw_pointer_cast(result_values.data()), num_items, num_segments, thrust::raw_pointer_cast(d_offsets_begin.data()), thrust::raw_pointer_cast(d_offsets_end.data()))); thrust::device_vector<std::uint8_t> temp_storage(temp_storage_bytes); d_temp_storage = thrust::raw_pointer_cast(temp_storage.data()); CubDebugExit(cub::DeviceSegmentedSort::SortPairs( d_temp_storage, temp_storage_bytes, thrust::raw_pointer_cast(keys.data()), thrust::raw_pointer_cast(result_keys.data()), thrust::raw_pointer_cast(values.data()), thrust::raw_pointer_cast(result_values.data()), num_items, num_segments, thrust::raw_pointer_cast(d_offsets_begin.data()), thrust::raw_pointer_cast(d_offsets_end.data()))); } AssertEquals(result_values, expected_values); AssertEquals(result_keys, expected_keys); } } int main(int argc, char** argv) { CommandLineArgs args(argc, argv); // Initialize device CubDebugExit(args.DeviceInit()); // %PARAM% TEST_CDP cdp 0:1 #if TEST_CDP == 0 TestZeroSegments(); TestEmptySegments(1 << 2); TestEmptySegments(1 << 22); #if TEST_HALF_T Test<half_t, std::uint32_t>(); #endif #if TEST_BF_T Test<bfloat16_t, std::uint32_t>(); #endif Test<bool, std::uint64_t>(); Test<std::uint8_t, std::uint64_t>(); Test<std::int64_t, std::uint32_t>(); #elif TEST_CDP == 1 TestDeviceSideLaunch<int>(); #endif // TEST_CDP TestUnspecifiedRanges(); return 0; }
71efe974de59931592ed409c3b95fbb505709cfe.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <hip/hip_runtime.h> //#include <hiprand/hiprand.h> // includes random num stuff //#include <hiprand/hiprand_kernel.h> // more rand stuff //#include <hip/hip_texture_types.h> #include <stdio.h> #include <stdlib.h> #include "gpu_main.h" #include "params.h" // define texture memory texture<float, 2> texGray; texture<float, 2> texRed; texture<float, 2> texGreen; texture<float, 2> texBlue; /*************************************************************************/ int spreadColor(GPU_Palette* P){ hipLaunchKernelGGL(( dev_spreadColor), dim3(P->gBlocks), dim3(P->gThreads) , 0, 0, P-> red, P->green, P->blue); return 0; } int reduceToEdges(GPU_Palette* P){ hipLaunchKernelGGL(( dev_reduceToEdges), dim3(P->gBlocks), dim3(P->gThreads) , 0, 0, P->gray); return 0; } int colorAroundEdges(GPU_Palette* P){ hipLaunchKernelGGL(( dev_colorAroundEdges), dim3(P->gBlocks), dim3(P->gThreads) , 0, 0, P-> red, P->green, P->blue, P->gray); return 0; } /*************************************************************************/ __global__ void dev_colorAroundEdges(float* red, float* green, float* blue, float* gray) { int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); if(tex2D(texGray, x, y) == 1){ red[vecIdx] = 1; green[vecIdx] = 1; blue[vecIdx] = 1; } else if (tex2D(texGray, x + 1, y) == 1 || tex2D(texGray, x - 1, y) == 1 || tex2D(texGray, x, y + 1) == 1 ||tex2D(texGray, x , y - 1) == 1) { /* if adjacent to an edge, leave original color */ } else { red[vecIdx] = 0; green[vecIdx] = 0; blue[vecIdx] = 0; //printf("edge found! at {%d %d} with {%f %f %f}/{%f %f %f} \n", x, y, // red[vecIdx], green[vecIdx], blue[vecIdx], // tex2D(texRed, x, y), tex2D(texGreen, x, y), tex2D(texBlue, x, y)); } } /*************************************************************************/ __global__ void dev_spreadColor(float* red, float* green, float* blue) { int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); float r, g, b; r = tex2D(texRed, x, y); g = tex2D(texGreen, x, y); b = tex2D(texBlue, x, y); float spreadCutoff = 0.4; if (r < spreadCutoff & g < spreadCutoff & b < spreadCutoff) { int count = 0; float accR = 0; float accG = 0; float accB = 0; for (int i = -1; i <= 1; i++) for (int j = -1; j <= 1; j++) { float r2, g2, b2; if ((r2 = tex2D(texRed, x + i, y + j)) != 0 | (g2 = tex2D(texGreen, x + i, y + j)) != 0 | (b2 = tex2D(texBlue, x + i, y + j)) != 0) { if (r2 != 1 || b2 != 1 || g2 != 1) { accR += r2; accG += g2; accB += b2; count++; } } } /* note that ++count prevents div 0 errors and allows for slight fading coming away from edges */ if (count != 0) { red[vecIdx] = accR/count; green[vecIdx] = accG/count; blue[vecIdx] = accB/count; } } } /*************************************************************************/ /* * updates gray to be all only edges, edges are white, default is black */ __global__ void dev_reduceToEdges(float* gray) { int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); /* partial derivitives in terms of x, y, and {r, g, b} */ float dry, dgy, dby, drx, dgx, dbx; dry = partial_diff('y', 'r', x, y); dgy = partial_diff('y', 'g', x, y); dby = partial_diff('y', 'b', x, y); drx = partial_diff('x', 'r', x, y); dgx = partial_diff('x', 'g', x, y); dbx = partial_diff('x', 'b', x, y); /* use dot products to determine how much change is happening in a given direction at a pixel */ float gxx, gyy, gxy; gxx = drx * drx + dgx * dgx + dbx * dbx; gyy = dry * dry + dgy * dgy + dby * dby; gxy = abs(drx * dry) + abs(dgx * dgy) + abs(dbx * dby); float isEdge; float cutTop = 0.08; /* compare each directional metrc to cutoff value obtained through trial and error */ if (gxx > cutTop || gyy > cutTop || gxy > cutTop) isEdge = 1; // edge is white else { isEdge = 0; // default background is black } gray[vecIdx] = isEdge; } /*************************************************************************/ /* * Takes the partial derivitive along axis {'x', 'y'} for * color {'r', 'g', 'b'} at point (x, y). Partial taken using * five-point stencil method. */ __device__ float partial_diff(char axis, char color, int x, int y) { float val[5]; int i = 0; switch (axis) { case 'x': switch (color) { case 'r': for (i = -2; i <= 2; i++) val[i + 2] = tex2D(texRed, x + i, y); break; case 'g': for (i = -2; i <= 2; i++) val[i + 2] = tex2D(texGreen, x + i, y); break; case 'b': for (i = -2; i <= 2; i++) val[i + 2] = tex2D(texBlue, x + i, y); break; default: printf("err"); } break; case 'y': switch (color) { case 'r': for (i = -2; i <= 2; i++) val[i + 2] = tex2D(texRed, x, y + i); break; case 'g': for (i = -2; i <= 2; i++) val[i + 2] = tex2D(texGreen, x, y + i); break; case 'b': for (i = -2; i <= 2; i++) val[i + 2] = tex2D(texBlue, x, y + i); break; default: printf("err"); } break; default: printf("err"); } float pxy = val[2]; float dF = ( -(val[4]) + 8 * (val[3]) - 8 * (val[1]) + (val[0]))/12; return dF; } /*************************************************************************/ GPU_Palette initGPUPalette(AParams* PARAMS){ // load GPU_Palette P; P.gTPB = THREADS_PER_BLOCK; // threads per block //P.gDIM = 800; // assumes the image is 800x800 P.gWidth = PARAMS->width; P.gHeight = PARAMS->height; // 800x800 palette = 25x25 grid of 32x32 threadblocks P.gSize = P.gWidth * P.gHeight * sizeof(float); P.gThreads.x = P.gTPB; P.gThreads.y = P.gTPB; P.gThreads.z = 1; // 3D of threads allowed P.gBlocks.x = (P.gWidth + P.gTPB - 1)/P.gTPB; P.gBlocks.y = (P.gHeight + P.gTPB - 1)/P.gTPB; P.gBlocks.z = 1; // only 2D of blocks allowed // allocate memory for the palette hipMalloc((void**) &P.gray, P.gSize); // black and white (avg of rgb) hipMalloc((void**) &P.red, P.gSize); // r hipMalloc((void**) &P.green, P.gSize); // g hipMalloc((void**) &P.blue, P.gSize); // b // create texture memory and bind to black and white data hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); // hipBindTexture2D(NULL, texGray, P.gray, desc, P.gDIM, // P.gDIM, sizeof(float) * P.gDIM); hipBindTexture2D(NULL, texBlue, P.blue, desc, P.gWidth, P.gHeight, sizeof(float) * P.gWidth); hipBindTexture2D(NULL, texGreen, P.green, desc, P.gWidth, P.gHeight, sizeof(float) * P.gWidth); hipBindTexture2D(NULL, texRed, P.red, desc, P.gWidth, P.gHeight, sizeof(float) * P.gWidth); hipBindTexture2D(NULL, texGray, P.gray, desc, P.gWidth, P.gHeight, sizeof(float) * P.gWidth); return P; } /*************************************************************************/ int freeGPUPalette(GPU_Palette* P) { // free texture memory hipUnbindTexture(texGray); hipUnbindTexture(texRed); hipUnbindTexture(texGreen); hipUnbindTexture(texBlue); // free gpu memory hipFree(P->gray); hipFree(P->red); hipFree(P->green); hipFree(P->blue); return 0; } /*************************************************************************/
71efe974de59931592ed409c3b95fbb505709cfe.cu
#include <math.h> #include <cuda.h> //#include <curand.h> // includes random num stuff //#include <curand_kernel.h> // more rand stuff //#include <cuda_texture_types.h> #include <stdio.h> #include <stdlib.h> #include "gpu_main.h" #include "params.h" // define texture memory texture<float, 2> texGray; texture<float, 2> texRed; texture<float, 2> texGreen; texture<float, 2> texBlue; /*************************************************************************/ int spreadColor(GPU_Palette* P){ dev_spreadColor<<< P->gBlocks, P->gThreads >>>(P-> red, P->green, P->blue); return 0; } int reduceToEdges(GPU_Palette* P){ dev_reduceToEdges<<< P->gBlocks, P->gThreads >>>(P->gray); return 0; } int colorAroundEdges(GPU_Palette* P){ dev_colorAroundEdges<<< P->gBlocks, P->gThreads >>>(P-> red, P->green, P->blue, P->gray); return 0; } /*************************************************************************/ __global__ void dev_colorAroundEdges(float* red, float* green, float* blue, float* gray) { int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); if(tex2D(texGray, x, y) == 1){ red[vecIdx] = 1; green[vecIdx] = 1; blue[vecIdx] = 1; } else if (tex2D(texGray, x + 1, y) == 1 || tex2D(texGray, x - 1, y) == 1 || tex2D(texGray, x, y + 1) == 1 ||tex2D(texGray, x , y - 1) == 1) { /* if adjacent to an edge, leave original color */ } else { red[vecIdx] = 0; green[vecIdx] = 0; blue[vecIdx] = 0; //printf("edge found! at {%d %d} with {%f %f %f}/{%f %f %f} \n", x, y, // red[vecIdx], green[vecIdx], blue[vecIdx], // tex2D(texRed, x, y), tex2D(texGreen, x, y), tex2D(texBlue, x, y)); } } /*************************************************************************/ __global__ void dev_spreadColor(float* red, float* green, float* blue) { int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); float r, g, b; r = tex2D(texRed, x, y); g = tex2D(texGreen, x, y); b = tex2D(texBlue, x, y); float spreadCutoff = 0.4; if (r < spreadCutoff & g < spreadCutoff & b < spreadCutoff) { int count = 0; float accR = 0; float accG = 0; float accB = 0; for (int i = -1; i <= 1; i++) for (int j = -1; j <= 1; j++) { float r2, g2, b2; if ((r2 = tex2D(texRed, x + i, y + j)) != 0 | (g2 = tex2D(texGreen, x + i, y + j)) != 0 | (b2 = tex2D(texBlue, x + i, y + j)) != 0) { if (r2 != 1 || b2 != 1 || g2 != 1) { accR += r2; accG += g2; accB += b2; count++; } } } /* note that ++count prevents div 0 errors and allows for slight fading coming away from edges */ if (count != 0) { red[vecIdx] = accR/count; green[vecIdx] = accG/count; blue[vecIdx] = accB/count; } } } /*************************************************************************/ /* * updates gray to be all only edges, edges are white, default is black */ __global__ void dev_reduceToEdges(float* gray) { int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); /* partial derivitives in terms of x, y, and {r, g, b} */ float dry, dgy, dby, drx, dgx, dbx; dry = partial_diff('y', 'r', x, y); dgy = partial_diff('y', 'g', x, y); dby = partial_diff('y', 'b', x, y); drx = partial_diff('x', 'r', x, y); dgx = partial_diff('x', 'g', x, y); dbx = partial_diff('x', 'b', x, y); /* use dot products to determine how much change is happening in a given direction at a pixel */ float gxx, gyy, gxy; gxx = drx * drx + dgx * dgx + dbx * dbx; gyy = dry * dry + dgy * dgy + dby * dby; gxy = abs(drx * dry) + abs(dgx * dgy) + abs(dbx * dby); float isEdge; float cutTop = 0.08; /* compare each directional metrc to cutoff value obtained through trial and error */ if (gxx > cutTop || gyy > cutTop || gxy > cutTop) isEdge = 1; // edge is white else { isEdge = 0; // default background is black } gray[vecIdx] = isEdge; } /*************************************************************************/ /* * Takes the partial derivitive along axis {'x', 'y'} for * color {'r', 'g', 'b'} at point (x, y). Partial taken using * five-point stencil method. */ __device__ float partial_diff(char axis, char color, int x, int y) { float val[5]; int i = 0; switch (axis) { case 'x': switch (color) { case 'r': for (i = -2; i <= 2; i++) val[i + 2] = tex2D(texRed, x + i, y); break; case 'g': for (i = -2; i <= 2; i++) val[i + 2] = tex2D(texGreen, x + i, y); break; case 'b': for (i = -2; i <= 2; i++) val[i + 2] = tex2D(texBlue, x + i, y); break; default: printf("err"); } break; case 'y': switch (color) { case 'r': for (i = -2; i <= 2; i++) val[i + 2] = tex2D(texRed, x, y + i); break; case 'g': for (i = -2; i <= 2; i++) val[i + 2] = tex2D(texGreen, x, y + i); break; case 'b': for (i = -2; i <= 2; i++) val[i + 2] = tex2D(texBlue, x, y + i); break; default: printf("err"); } break; default: printf("err"); } float pxy = val[2]; float dF = ( -(val[4]) + 8 * (val[3]) - 8 * (val[1]) + (val[0]))/12; return dF; } /*************************************************************************/ GPU_Palette initGPUPalette(AParams* PARAMS){ // load GPU_Palette P; P.gTPB = THREADS_PER_BLOCK; // threads per block //P.gDIM = 800; // assumes the image is 800x800 P.gWidth = PARAMS->width; P.gHeight = PARAMS->height; // 800x800 palette = 25x25 grid of 32x32 threadblocks P.gSize = P.gWidth * P.gHeight * sizeof(float); P.gThreads.x = P.gTPB; P.gThreads.y = P.gTPB; P.gThreads.z = 1; // 3D of threads allowed P.gBlocks.x = (P.gWidth + P.gTPB - 1)/P.gTPB; P.gBlocks.y = (P.gHeight + P.gTPB - 1)/P.gTPB; P.gBlocks.z = 1; // only 2D of blocks allowed // allocate memory for the palette cudaMalloc((void**) &P.gray, P.gSize); // black and white (avg of rgb) cudaMalloc((void**) &P.red, P.gSize); // r cudaMalloc((void**) &P.green, P.gSize); // g cudaMalloc((void**) &P.blue, P.gSize); // b // create texture memory and bind to black and white data cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); // cudaBindTexture2D(NULL, texGray, P.gray, desc, P.gDIM, // P.gDIM, sizeof(float) * P.gDIM); cudaBindTexture2D(NULL, texBlue, P.blue, desc, P.gWidth, P.gHeight, sizeof(float) * P.gWidth); cudaBindTexture2D(NULL, texGreen, P.green, desc, P.gWidth, P.gHeight, sizeof(float) * P.gWidth); cudaBindTexture2D(NULL, texRed, P.red, desc, P.gWidth, P.gHeight, sizeof(float) * P.gWidth); cudaBindTexture2D(NULL, texGray, P.gray, desc, P.gWidth, P.gHeight, sizeof(float) * P.gWidth); return P; } /*************************************************************************/ int freeGPUPalette(GPU_Palette* P) { // free texture memory cudaUnbindTexture(texGray); cudaUnbindTexture(texRed); cudaUnbindTexture(texGreen); cudaUnbindTexture(texBlue); // free gpu memory cudaFree(P->gray); cudaFree(P->red); cudaFree(P->green); cudaFree(P->blue); return 0; } /*************************************************************************/
a32adae8385df3474f691e758fa26d4145b40047.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/AccumulateType.h> #include <ATen/ceil_div.h> #include <ATen/NamedTensorUtils.h> #include <ATen/NumericUtils.h> #include <ATen/native/Pool.h> #include <ATen/hip/Atomic.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/hip/NumericLimits.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } template <typename scalar_t> __global__ static void max_pool3d_with_indices_single_out_frame( scalar_t* inputData, PackedTensorAccessor64<scalar_t, 4> output, PackedTensorAccessor64<int64_t, 4> indices, int itime, int iheight, int iwidth, int kT, int kH, int kW, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW, int offsetZ) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time int64_t slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature // For int64_t data type, see https://github.com/pytorch/pytorch/issues/52822 if (oRow < output.size(2) && oColumn < output.size(3)) { int tStart = oFrame * dT - pT; int hStart = oRow * dH - pH; int wStart = oColumn * dW - pW; int tEnd = min(tStart + (kT - 1) * dilationT + 1, itime); int hEnd = min(hStart + (kH - 1) * dilationH + 1, iheight); int wEnd = min(wStart + (kW - 1) * dilationW + 1, iwidth); while(tStart < 0) tStart += dilationT; while(hStart < 0) hStart += dilationH; while(wStart < 0) wStart += dilationW; int maxIndex = tStart * iheight * iwidth + hStart * iwidth + wStart; inputData += slice * itime * iheight * iwidth; scalar_t max = at::numeric_limits<scalar_t>::lower_bound(); // -Infinity for (int t = tStart; t < tEnd; t += dilationT) { for (int h = hStart; h < hEnd; h += dilationH) { for (int w = wStart; w < wEnd; w += dilationW) { int index = t * iheight * iwidth + h * iwidth + w; scalar_t val = inputData[index]; if ((max < val) || at::_isnan(val)) { max = val; maxIndex = index; } } } } output[slice][oFrame][oRow][oColumn] = max; indices[slice][oFrame][oRow][oColumn] = maxIndex; } } template <typename scalar_t> void max_pool3d_with_indices_out_frame( scalar_t* input_data, const Tensor& output, const Tensor& indices, int totalZ, int itime, int iheight, int iwidth, int otime, int oheight, int owidth, int kT, int kH, int kW, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW) { int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(ceil_div(owidth, static_cast<int>(block.x)), ceil_div(oheight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); hipLaunchKernelGGL(( max_pool3d_with_indices_single_out_frame) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, output.packed_accessor64<scalar_t, 4>(), indices.packed_accessor64<int64_t, 4>(), itime, iheight, iwidth, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, offsetZ); C10_HIP_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } #undef UPDATE_OUTPUT_KERNEL_WIDTH template <typename scalar_t> __global__ static void max_pool3d_with_indices_backward_single_out_frame( scalar_t *gradInputData, PackedTensorAccessor64<scalar_t, 4> gradOutput, PackedTensorAccessor64<int64_t, 4> indices, int itime, int iheight, int iwidth, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int offsetZ) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // output slice/feature if (oRow < gradOutput.size(2) && oColumn < gradOutput.size(3)) { int maxIndex = indices[slice][oFrame][oRow][oColumn]; if (maxIndex != -1) { gpuAtomicAddNoReturn(&gradInputData[slice * itime * iheight * iwidth + maxIndex], gradOutput[slice][oFrame][oRow][oColumn]); } } } template <typename scalar_t> void max_pool3d_with_indices_backward_out_frame( scalar_t *gradInputData, const Tensor& gradOutput, const Tensor& indices, int64_t totalZ, int itime, int iheight, int iwidth, int oheight, int owidth, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH) { int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(ceil_div(owidth, static_cast<int>(block.x)), ceil_div(oheight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); hipLaunchKernelGGL(( max_pool3d_with_indices_backward_single_out_frame) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInputData, gradOutput.packed_accessor64<scalar_t, 4>(), indices.packed_accessor64<int64_t, 4>(), itime, iheight, iwidth, dT, dH, dW, pT, pH, pW, dilationT, dilationH, offsetZ); C10_HIP_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } void max_pool3d_with_indices_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input, "input", 3 }; checkAllSameGPU(__func__, {output_arg, indices_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, "max_pool3d: kernel_size must either be a single int, or a tuple of three ints") const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3, "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints") const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]); TORCH_CHECK(padding.size() == 1 || padding.size() == 3, "max_pool3d: padding must be either be a single int, or a tuple of three ints"); const int pT = safe_downcast<int, int64_t>(padding[0]); const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]); const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3, "max_pool3d: dilation must be either a single int, or a tuple of three ints"); const int dilationT = safe_downcast<int, int64_t>(dilation[0]); const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]); const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t itime = input.size(-3); const int64_t iheight = input.size(-2); const int64_t iwidth = input.size(-1); const int64_t otime = pooling_output_shape<int64_t>(itime, kT, pT, dT, dilationT, ceil_mode); const int64_t oheight = pooling_output_shape<int64_t>(iheight, kH, pH, dH, dilationH, ceil_mode); const int64_t owidth = pooling_output_shape<int64_t>(iwidth, kW, pW, dW, dilationW, ceil_mode); pool3d_shape_check( input, nslices, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, itime, iheight, iwidth, otime, oheight, owidth, "max_pool3d_with_indices_out_cuda_template()"); if (input.ndimension() == 4) { output.resize_({ nslices, otime, oheight, owidth}); indices.resize_({nslices, otime, oheight, owidth}); } else { output.resize_({nbatch, nslices, otime, oheight, owidth}); indices.resize_({nbatch, nslices, otime, oheight, owidth}); } if (input.numel() == 0) { return; } Tensor work_input = input.contiguous(); Tensor work_output = output; Tensor work_indices = indices; if (input.ndimension() == 5) { // Collapse batch and feature dimensions. work_input = work_input.reshape({nbatch * nslices, itime, iheight, iwidth}); work_output = work_output.reshape({nbatch * nslices, otime, oheight, owidth}); work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth}); } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool3d_with_indices_out_frame", [&]{ scalar_t *input_data = work_input.data_ptr<scalar_t>(); int64_t totalZ = otime * nslices * nbatch; max_pool3d_with_indices_out_frame( input_data, work_output, work_indices, totalZ, itime, iheight, iwidth, otime, oheight, owidth, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW); } ); } void max_pool3d_with_indices_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, const Tensor& indices, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput, "gradOutput", 2 }; TensorArg input_arg{ input, "input", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU(__func__, {gradInput_arg, gradOutput_arg, input_arg, indices_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, "max_pool3d: kernel_size must either be a single int, or a tuple of three ints") const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3, "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints") const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]); TORCH_CHECK(padding.size() == 1 || padding.size() == 3, "max_pool3d: padding must be either be a single int, or a tuple of three ints"); const int pT = safe_downcast<int, int64_t>(padding[0]); const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]); const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3, "max_pool3d: dilation must be either a single int, or a tuple of three ints"); const int dilationT = safe_downcast<int, int64_t>(dilation[0]); const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]); const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]); TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5), "max_pool2d_with_indices_backward_out_cuda_template(): ", "Expected 4D or 5D input tensor, but got ", input.sizes()); TORCH_CHECK((gradOutput.ndimension() == 4 || gradOutput.ndimension() == 5), "max_pool2d_with_indices_backward_out_cuda_template(): ", "Expected 4D or 5D gradOutput tensor, but got ", gradOutput.sizes()); // Resize and initialize result tensor. gradInput.resize_as_(input); gradInput.zero_(); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t otime = gradOutput.size(-3); const int64_t oheight = gradOutput.size(-2); const int64_t owidth = gradOutput.size(-1); const int64_t itime = gradInput.size(-3); const int64_t iheight = gradInput.size(-2); const int64_t iwidth = gradInput.size(-1); max_pool3d_backward_shape_check( input, gradOutput, indices, nslices, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, itime, iheight, iwidth, otime, oheight, owidth, "max_pool3d_with_indices_backward_out_cuda_template()"); if (gradOutput.numel() == 0) { return; } Tensor work_grad_input = gradInput; Tensor work_grad_output = gradOutput.contiguous(); Tensor work_indices = indices.contiguous(); if (input.ndimension() == 5) { // Collapse batch and feature dimensions. work_grad_input = work_grad_input.reshape({nbatch * nslices, itime, iheight, iwidth}); work_grad_output = work_grad_output.reshape({nbatch * nslices, otime, oheight, owidth}); work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth}); } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool3d_with_indices_backward_out_frame", [&] { const int64_t totalZ = otime * nslices * nbatch; scalar_t *grad_input_data = work_grad_input.data_ptr<scalar_t>(); max_pool3d_with_indices_backward_out_frame( grad_input_data, work_grad_output, work_indices, totalZ, itime, iheight, iwidth, oheight, owidth, dT, dH, dW, pT, pH, pW, dilationT, dilationH); } ); } } // namespace std::tuple<Tensor&, Tensor&> max_pool3d_with_indices_out_cuda(const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor& output, Tensor& indices) { max_pool3d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> max_pool3d_with_indices_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { NoNamesGuard guard; Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); max_pool3d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); guard.reset(); namedinference::propagate_names(output, input); namedinference::propagate_names(indices, input); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& max_pool3d_with_indices_backward_out_cuda(const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_out_cuda"); max_pool3d_with_indices_backward_out_cuda_template( gradInput, gradOutput, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } Tensor max_pool3d_with_indices_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); max_pool3d_with_indices_backward_out_cuda_template( gradInput, gradOutput, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } } // at::native } // at
a32adae8385df3474f691e758fa26d4145b40047.cu
#include <ATen/AccumulateType.h> #include <ATen/ceil_div.h> #include <ATen/NamedTensorUtils.h> #include <ATen/NumericUtils.h> #include <ATen/native/Pool.h> #include <ATen/cuda/Atomic.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/NumericLimits.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } template <typename scalar_t> __global__ static void max_pool3d_with_indices_single_out_frame( scalar_t* inputData, PackedTensorAccessor64<scalar_t, 4> output, PackedTensorAccessor64<int64_t, 4> indices, int itime, int iheight, int iwidth, int kT, int kH, int kW, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW, int offsetZ) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time int64_t slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature // For int64_t data type, see https://github.com/pytorch/pytorch/issues/52822 if (oRow < output.size(2) && oColumn < output.size(3)) { int tStart = oFrame * dT - pT; int hStart = oRow * dH - pH; int wStart = oColumn * dW - pW; int tEnd = min(tStart + (kT - 1) * dilationT + 1, itime); int hEnd = min(hStart + (kH - 1) * dilationH + 1, iheight); int wEnd = min(wStart + (kW - 1) * dilationW + 1, iwidth); while(tStart < 0) tStart += dilationT; while(hStart < 0) hStart += dilationH; while(wStart < 0) wStart += dilationW; int maxIndex = tStart * iheight * iwidth + hStart * iwidth + wStart; inputData += slice * itime * iheight * iwidth; scalar_t max = at::numeric_limits<scalar_t>::lower_bound(); // -Infinity for (int t = tStart; t < tEnd; t += dilationT) { for (int h = hStart; h < hEnd; h += dilationH) { for (int w = wStart; w < wEnd; w += dilationW) { int index = t * iheight * iwidth + h * iwidth + w; scalar_t val = inputData[index]; if ((max < val) || at::_isnan(val)) { max = val; maxIndex = index; } } } } output[slice][oFrame][oRow][oColumn] = max; indices[slice][oFrame][oRow][oColumn] = maxIndex; } } template <typename scalar_t> void max_pool3d_with_indices_out_frame( scalar_t* input_data, const Tensor& output, const Tensor& indices, int totalZ, int itime, int iheight, int iwidth, int otime, int oheight, int owidth, int kT, int kH, int kW, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int dilationW) { int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(ceil_div(owidth, static_cast<int>(block.x)), ceil_div(oheight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); max_pool3d_with_indices_single_out_frame <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( input_data, output.packed_accessor64<scalar_t, 4>(), indices.packed_accessor64<int64_t, 4>(), itime, iheight, iwidth, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, offsetZ); C10_CUDA_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } #undef UPDATE_OUTPUT_KERNEL_WIDTH template <typename scalar_t> __global__ static void max_pool3d_with_indices_backward_single_out_frame( scalar_t *gradInputData, PackedTensorAccessor64<scalar_t, 4> gradOutput, PackedTensorAccessor64<int64_t, 4> indices, int itime, int iheight, int iwidth, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH, int offsetZ) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // output frame/time int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // output slice/feature if (oRow < gradOutput.size(2) && oColumn < gradOutput.size(3)) { int maxIndex = indices[slice][oFrame][oRow][oColumn]; if (maxIndex != -1) { gpuAtomicAddNoReturn(&gradInputData[slice * itime * iheight * iwidth + maxIndex], gradOutput[slice][oFrame][oRow][oColumn]); } } } template <typename scalar_t> void max_pool3d_with_indices_backward_out_frame( scalar_t *gradInputData, const Tensor& gradOutput, const Tensor& indices, int64_t totalZ, int itime, int iheight, int iwidth, int oheight, int owidth, int dT, int dH, int dW, int pT, int pH, int pW, int dilationT, int dilationH) { int offsetZ = 0; dim3 block(32, 8); while (totalZ > 0) { dim3 grid(ceil_div(owidth, static_cast<int>(block.x)), ceil_div(oheight, static_cast<int>(block.y)), totalZ > 65535 ? 65535 : totalZ); max_pool3d_with_indices_backward_single_out_frame <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( gradInputData, gradOutput.packed_accessor64<scalar_t, 4>(), indices.packed_accessor64<int64_t, 4>(), itime, iheight, iwidth, dT, dH, dW, pT, pH, pW, dilationT, dilationH, offsetZ); C10_CUDA_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } void max_pool3d_with_indices_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input, "input", 3 }; checkAllSameGPU(__func__, {output_arg, indices_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, "max_pool3d: kernel_size must either be a single int, or a tuple of three ints") const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3, "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints") const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]); TORCH_CHECK(padding.size() == 1 || padding.size() == 3, "max_pool3d: padding must be either be a single int, or a tuple of three ints"); const int pT = safe_downcast<int, int64_t>(padding[0]); const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]); const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3, "max_pool3d: dilation must be either a single int, or a tuple of three ints"); const int dilationT = safe_downcast<int, int64_t>(dilation[0]); const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]); const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t itime = input.size(-3); const int64_t iheight = input.size(-2); const int64_t iwidth = input.size(-1); const int64_t otime = pooling_output_shape<int64_t>(itime, kT, pT, dT, dilationT, ceil_mode); const int64_t oheight = pooling_output_shape<int64_t>(iheight, kH, pH, dH, dilationH, ceil_mode); const int64_t owidth = pooling_output_shape<int64_t>(iwidth, kW, pW, dW, dilationW, ceil_mode); pool3d_shape_check( input, nslices, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, itime, iheight, iwidth, otime, oheight, owidth, "max_pool3d_with_indices_out_cuda_template()"); if (input.ndimension() == 4) { output.resize_({ nslices, otime, oheight, owidth}); indices.resize_({nslices, otime, oheight, owidth}); } else { output.resize_({nbatch, nslices, otime, oheight, owidth}); indices.resize_({nbatch, nslices, otime, oheight, owidth}); } if (input.numel() == 0) { return; } Tensor work_input = input.contiguous(); Tensor work_output = output; Tensor work_indices = indices; if (input.ndimension() == 5) { // Collapse batch and feature dimensions. work_input = work_input.reshape({nbatch * nslices, itime, iheight, iwidth}); work_output = work_output.reshape({nbatch * nslices, otime, oheight, owidth}); work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth}); } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool3d_with_indices_out_frame", [&]{ scalar_t *input_data = work_input.data_ptr<scalar_t>(); int64_t totalZ = otime * nslices * nbatch; max_pool3d_with_indices_out_frame( input_data, work_output, work_indices, totalZ, itime, iheight, iwidth, otime, oheight, owidth, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW); } ); } void max_pool3d_with_indices_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, const Tensor& indices, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput, "gradOutput", 2 }; TensorArg input_arg{ input, "input", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU(__func__, {gradInput_arg, gradOutput_arg, input_arg, indices_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 3, "max_pool3d: kernel_size must either be a single int, or a tuple of three ints") const int kT = safe_downcast<int, int64_t>(kernel_size[0]); const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]); const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]); TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 3, "max_pool3d: stride must either be omitted, a single int, or a tuple of three ints") const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]); const int dH = stride.empty() ? kH : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[1]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dT : safe_downcast<int, int64_t>(stride[2]); TORCH_CHECK(padding.size() == 1 || padding.size() == 3, "max_pool3d: padding must be either be a single int, or a tuple of three ints"); const int pT = safe_downcast<int, int64_t>(padding[0]); const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]); const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 3, "max_pool3d: dilation must be either a single int, or a tuple of three ints"); const int dilationT = safe_downcast<int, int64_t>(dilation[0]); const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]); const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]); TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5), "max_pool2d_with_indices_backward_out_cuda_template(): ", "Expected 4D or 5D input tensor, but got ", input.sizes()); TORCH_CHECK((gradOutput.ndimension() == 4 || gradOutput.ndimension() == 5), "max_pool2d_with_indices_backward_out_cuda_template(): ", "Expected 4D or 5D gradOutput tensor, but got ", gradOutput.sizes()); // Resize and initialize result tensor. gradInput.resize_as_(input); gradInput.zero_(); const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1; const int64_t nslices = input.size(-4); const int64_t otime = gradOutput.size(-3); const int64_t oheight = gradOutput.size(-2); const int64_t owidth = gradOutput.size(-1); const int64_t itime = gradInput.size(-3); const int64_t iheight = gradInput.size(-2); const int64_t iwidth = gradInput.size(-1); max_pool3d_backward_shape_check( input, gradOutput, indices, nslices, kT, kH, kW, dT, dH, dW, pT, pH, pW, dilationT, dilationH, dilationW, itime, iheight, iwidth, otime, oheight, owidth, "max_pool3d_with_indices_backward_out_cuda_template()"); if (gradOutput.numel() == 0) { return; } Tensor work_grad_input = gradInput; Tensor work_grad_output = gradOutput.contiguous(); Tensor work_indices = indices.contiguous(); if (input.ndimension() == 5) { // Collapse batch and feature dimensions. work_grad_input = work_grad_input.reshape({nbatch * nslices, itime, iheight, iwidth}); work_grad_output = work_grad_output.reshape({nbatch * nslices, otime, oheight, owidth}); work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth}); } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool3d_with_indices_backward_out_frame", [&] { const int64_t totalZ = otime * nslices * nbatch; scalar_t *grad_input_data = work_grad_input.data_ptr<scalar_t>(); max_pool3d_with_indices_backward_out_frame( grad_input_data, work_grad_output, work_indices, totalZ, itime, iheight, iwidth, oheight, owidth, dT, dH, dW, pT, pH, pW, dilationT, dilationH); } ); } } // namespace std::tuple<Tensor&, Tensor&> max_pool3d_with_indices_out_cuda(const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, Tensor& output, Tensor& indices) { max_pool3d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> max_pool3d_with_indices_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { NoNamesGuard guard; Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); max_pool3d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); guard.reset(); namedinference::propagate_names(output, input); namedinference::propagate_names(indices, input); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& max_pool3d_with_indices_backward_out_cuda(const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_out_cuda"); max_pool3d_with_indices_backward_out_cuda_template( gradInput, gradOutput, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } Tensor max_pool3d_with_indices_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("max_pool3d_with_indices_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); max_pool3d_with_indices_backward_out_cuda_template( gradInput, gradOutput, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } } // at::native } // at
a2afb5ea369b4903b36ba112af81d12da4979dd9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include "../include/kernel.cuh" __global__ void cuda_element_add (const float *A, const float *B, float *C, int length) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { C[i] = A[i] + B[i]; } } __global__ void cuda_element_add_patch (const float *A, const float *B, float *C, const size_t done, const size_t num_elements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (done+i < num_elements) { C[i] = A[i] + B[i]; } } __global__ void cuda_element_sub (const float *A, const float *B, float *C, int length) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { C[i] = A[i] - B[i]; } } __global__ void cuda_element_sub_patch (const float *A, const float *B, float *C, const size_t done, const size_t num_elements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (done+i < num_elements) { C[i] = A[i] - B[i]; } } __global__ void cuda_element_mul (const float *A, const float *B, float *C, int length) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { C[i] = A[i] * B[i]; } } __global__ void cuda_element_mul_patch (const float *A, const float *B, float *C, const size_t done, const size_t num_elements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (done+i < num_elements) { C[i] = A[i] * B[i]; } } __global__ void cuda_element_div (const float *A, const float *B, float *C, int length) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { if (B[i] != 0) C[i] = A[i] / B[i]; else C[i] = 0.0; } } __global__ void cuda_element_div_patch (const float *A, const float *B, float *C, const size_t done, const size_t num_elements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (done+i < num_elements) { if (B[i] != 0) C[i] = A[i] / B[i]; else C[i] = 0.0; } } /****************************************************** ***************************************************** * CUDA kernels for matrix multiplication ***************************************************** *******************************************************/ __global__ void cuda_matrix_mul_basic (const float *A, const float *B, float *C, const size_t M, const size_t N, const size_t K) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i<M&&j<N) { float sum=0; for (int l=0; l<K; l++) { sum += A[i*K+l]*B[l*K+j]; } C[i*K+j]=sum; } } __global__ void cuda_matrix_mul_patch (const float *A, const float *B, float *C, const int M, const int N, const int K, const int A_w, const int B_w) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i<M && j<N) { float sum=0; for (int l=0; l<K; l++) { sum += A[i*A_w+l]*B[l*B_w+j]; } C[i*B_w+j]+=sum; } } __global__ void cuda_matrix_mul_patch_tiled (const float *A, const float *B, float *C, const int M, const int N, const int K, const int A_w, const int B_w) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int len_tile = blockDim.x, si=threadIdx.y, sj=threadIdx.x; int sidx = si*len_tile+sj; extern __shared__ float smem[]; float *sA = &smem[0]; float *sB = &smem[len_tile*len_tile]; float sum = 0.f; for (int tile=0; tile<K; tile+=len_tile) { if (tile+sj<K && i<M) sA[sidx] = A[i*A_w+(tile+sj)]; else sA[sidx] = 0.f; if (tile+si<K && j<N) sB[sidx] = B[(tile+si)*B_w+j]; else sB[sidx] = 0.f; __syncthreads(); for (int k=0; k<len_tile; k++) sum += sA[si*len_tile+k]*sB[k*len_tile+sj]; __syncthreads(); } if (i<M && j<N) C[i*B_w+j] += sum; } /****************************************************** ***************************************************** * CUDA kernels for matrix transposition ***************************************************** *******************************************************/ __global__ void cuda_matrix_transpose_basic (const float *in, float *out, const size_t M, const size_t N) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; out[j*M+i] = in[i*N+j]; }
a2afb5ea369b4903b36ba112af81d12da4979dd9.cu
#include <cstdio> #include "../include/kernel.cuh" __global__ void cuda_element_add (const float *A, const float *B, float *C, int length) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { C[i] = A[i] + B[i]; } } __global__ void cuda_element_add_patch (const float *A, const float *B, float *C, const size_t done, const size_t num_elements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (done+i < num_elements) { C[i] = A[i] + B[i]; } } __global__ void cuda_element_sub (const float *A, const float *B, float *C, int length) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { C[i] = A[i] - B[i]; } } __global__ void cuda_element_sub_patch (const float *A, const float *B, float *C, const size_t done, const size_t num_elements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (done+i < num_elements) { C[i] = A[i] - B[i]; } } __global__ void cuda_element_mul (const float *A, const float *B, float *C, int length) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { C[i] = A[i] * B[i]; } } __global__ void cuda_element_mul_patch (const float *A, const float *B, float *C, const size_t done, const size_t num_elements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (done+i < num_elements) { C[i] = A[i] * B[i]; } } __global__ void cuda_element_div (const float *A, const float *B, float *C, int length) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { if (B[i] != 0) C[i] = A[i] / B[i]; else C[i] = 0.0; } } __global__ void cuda_element_div_patch (const float *A, const float *B, float *C, const size_t done, const size_t num_elements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (done+i < num_elements) { if (B[i] != 0) C[i] = A[i] / B[i]; else C[i] = 0.0; } } /****************************************************** ***************************************************** * CUDA kernels for matrix multiplication ***************************************************** *******************************************************/ __global__ void cuda_matrix_mul_basic (const float *A, const float *B, float *C, const size_t M, const size_t N, const size_t K) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i<M&&j<N) { float sum=0; for (int l=0; l<K; l++) { sum += A[i*K+l]*B[l*K+j]; } C[i*K+j]=sum; } } __global__ void cuda_matrix_mul_patch (const float *A, const float *B, float *C, const int M, const int N, const int K, const int A_w, const int B_w) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i<M && j<N) { float sum=0; for (int l=0; l<K; l++) { sum += A[i*A_w+l]*B[l*B_w+j]; } C[i*B_w+j]+=sum; } } __global__ void cuda_matrix_mul_patch_tiled (const float *A, const float *B, float *C, const int M, const int N, const int K, const int A_w, const int B_w) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int len_tile = blockDim.x, si=threadIdx.y, sj=threadIdx.x; int sidx = si*len_tile+sj; extern __shared__ float smem[]; float *sA = &smem[0]; float *sB = &smem[len_tile*len_tile]; float sum = 0.f; for (int tile=0; tile<K; tile+=len_tile) { if (tile+sj<K && i<M) sA[sidx] = A[i*A_w+(tile+sj)]; else sA[sidx] = 0.f; if (tile+si<K && j<N) sB[sidx] = B[(tile+si)*B_w+j]; else sB[sidx] = 0.f; __syncthreads(); for (int k=0; k<len_tile; k++) sum += sA[si*len_tile+k]*sB[k*len_tile+sj]; __syncthreads(); } if (i<M && j<N) C[i*B_w+j] += sum; } /****************************************************** ***************************************************** * CUDA kernels for matrix transposition ***************************************************** *******************************************************/ __global__ void cuda_matrix_transpose_basic (const float *in, float *out, const size_t M, const size_t N) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; out[j*M+i] = in[i*N+j]; }
8793650b3fe82dce0681672a032b8d9717da897d.hip
// !!! This is a file automatically generated by hipify!!! /*#include <GL/glut.h> #include <stdio.h> #include <hip/hip_runtime.h> #define DIM 500 __global__ void kernel( unsigned char *ptr, int ticks ) { // map from threadIdx/BlockIdx to pixel position int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; // now calculate the value at that position float fx = x - DIM/2; float fy = y - DIM/2; float d = sqrtf( fx * fx + fy * fy ); float fact=16.0f; if(d<fact) fact=d; unsigned char color = (unsigned char)((255.0f * cos(d/2.0f - ticks/8.0f))/(d/fact)); ptr[offset*4 + 0] = 0; ptr[offset*4 + 1] = color*0.88f; ptr[offset*4 + 2] = color*0.92f; ptr[offset*4 + 3] = 255; } void display_cb() { glClear(GL_COLOR_BUFFER_BIT); glColor3f(1,1,0); unsigned char *gpu_bitmap; unsigned char *cpu_bitmap=(unsigned char*)malloc(sizeof(unsigned char)*DIM*DIM*4); hipMalloc( (void**)&gpu_bitmap, DIM*DIM*4 ); dim3 blocks(DIM/16,DIM/16); dim3 threads(16,16); int ticks = 0; while(ticks<50) { kernel<<<blocks,threads>>>( gpu_bitmap, ticks ); ticks++; hipMemcpy( cpu_bitmap, gpu_bitmap,DIM*DIM*4 , hipMemcpyDeviceToHost ); //visualize cpu_bitmap glBegin(GL_POINTS); for(int x=0; x<DIM; x++) { for(int y=0; y<DIM; y++) { int offset = x + y * DIM; glColor3f((cpu_bitmap [offset*4 + 0]/255.0f), (cpu_bitmap [offset*4 + 1]/255.0f), (cpu_bitmap [offset*4 + 1]/255.0f)); glVertex2f(x,y); } } glEnd(); glutSwapBuffers(); } hipFree( gpu_bitmap ); glutPostRedisplay(); } void reshape_cb (int w, int h) { if (w==0||h==0) return; glViewport(0,0,w,h); glMatrixMode (GL_PROJECTION); glLoadIdentity (); gluOrtho2D(0,w,0,h); glMatrixMode (GL_MODELVIEW); glLoadIdentity (); } void initialize() { glutInitDisplayMode (GLUT_RGBA|GLUT_DOUBLE); glutInitWindowSize (DIM,DIM); glutInitWindowPosition (100,100); glutCreateWindow ("Ventana OpenGL"); glutDisplayFunc (display_cb); glutReshapeFunc (reshape_cb); glClearColor(0.f,0.f,0.f,1.f); } int main (int argc, char **argv) { glutInit (&argc, argv); initialize(); glutMainLoop(); return 0; } */
8793650b3fe82dce0681672a032b8d9717da897d.cu
/*#include <GL/glut.h> #include <stdio.h> #include <cuda.h> #define DIM 500 __global__ void kernel( unsigned char *ptr, int ticks ) { // map from threadIdx/BlockIdx to pixel position int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; // now calculate the value at that position float fx = x - DIM/2; float fy = y - DIM/2; float d = sqrtf( fx * fx + fy * fy ); float fact=16.0f; if(d<fact) fact=d; unsigned char color = (unsigned char)((255.0f * cos(d/2.0f - ticks/8.0f))/(d/fact)); ptr[offset*4 + 0] = 0; ptr[offset*4 + 1] = color*0.88f; ptr[offset*4 + 2] = color*0.92f; ptr[offset*4 + 3] = 255; } void display_cb() { glClear(GL_COLOR_BUFFER_BIT); glColor3f(1,1,0); unsigned char *gpu_bitmap; unsigned char *cpu_bitmap=(unsigned char*)malloc(sizeof(unsigned char)*DIM*DIM*4); cudaMalloc( (void**)&gpu_bitmap, DIM*DIM*4 ); dim3 blocks(DIM/16,DIM/16); dim3 threads(16,16); int ticks = 0; while(ticks<50) { kernel<<<blocks,threads>>>( gpu_bitmap, ticks ); ticks++; cudaMemcpy( cpu_bitmap, gpu_bitmap,DIM*DIM*4 , cudaMemcpyDeviceToHost ); //visualize cpu_bitmap glBegin(GL_POINTS); for(int x=0; x<DIM; x++) { for(int y=0; y<DIM; y++) { int offset = x + y * DIM; glColor3f((cpu_bitmap [offset*4 + 0]/255.0f), (cpu_bitmap [offset*4 + 1]/255.0f), (cpu_bitmap [offset*4 + 1]/255.0f)); glVertex2f(x,y); } } glEnd(); glutSwapBuffers(); } cudaFree( gpu_bitmap ); glutPostRedisplay(); } void reshape_cb (int w, int h) { if (w==0||h==0) return; glViewport(0,0,w,h); glMatrixMode (GL_PROJECTION); glLoadIdentity (); gluOrtho2D(0,w,0,h); glMatrixMode (GL_MODELVIEW); glLoadIdentity (); } void initialize() { glutInitDisplayMode (GLUT_RGBA|GLUT_DOUBLE); glutInitWindowSize (DIM,DIM); glutInitWindowPosition (100,100); glutCreateWindow ("Ventana OpenGL"); glutDisplayFunc (display_cb); glutReshapeFunc (reshape_cb); glClearColor(0.f,0.f,0.f,1.f); } int main (int argc, char **argv) { glutInit (&argc, argv); initialize(); glutMainLoop(); return 0; } */
4b0d0bb3af6381d6264a093a9c6ea40c116c5922.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: P = M * N. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <stdio.h> #include "matrixmul.h" //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// // Matrix multiplication kernel thread specification __global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P ) { int width = M.width; float curr_sum = 0.0; int row = threadIdx.x; int col = threadIdx.y; if( col < width && row < width ){ for (int i = 0; i< width; i++){ float M_val = M.elements[ threadIdx.y * width + i ]; float N_val = N.elements[ i * width + threadIdx.x ]; curr_sum += M_val * N_val; } } P.elements[threadIdx.y* width + threadIdx.x ] = curr_sum; } #endif // #ifndef _MATRIXMUL_KERNEL_H_
4b0d0bb3af6381d6264a093a9c6ea40c116c5922.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: P = M * N. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <stdio.h> #include "matrixmul.h" //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// // Matrix multiplication kernel thread specification __global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P ) { int width = M.width; float curr_sum = 0.0; int row = threadIdx.x; int col = threadIdx.y; if( col < width && row < width ){ for (int i = 0; i< width; i++){ float M_val = M.elements[ threadIdx.y * width + i ]; float N_val = N.elements[ i * width + threadIdx.x ]; curr_sum += M_val * N_val; } } P.elements[threadIdx.y* width + threadIdx.x ] = curr_sum; } #endif // #ifndef _MATRIXMUL_KERNEL_H_
75d529eb00cef76408bbe6b7e89c0a262159c07a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "HOGConvolution.h" #include "HOGUtils.h" #include "cutil.h" dim3 blockGridRows; dim3 blockGridColumns; dim3 threadBlockRows; dim3 threadBlockColumns; #define convKernelRadius 1 #define convKernelWidth (2 * convKernelRadius + 1) __device__ __constant__ float d_Kernel[convKernelWidth]; float *h_Kernel; #define convRowTileWidth 128 #define convKernelRadiusAligned 16 #define convColumnTileWidth 16 #define convColumnTileHeight 48 float4 *convBuffer4; float1 *convBuffer1; int convWidth; int convHeight; const int convKernelSize = convKernelWidth * sizeof(float); bool convUseGrayscale; template<int i> __device__ float1 convolutionRow(float1 *data) { float1 val = data[convKernelRadius-i]; val.x *= d_Kernel[i]; val.x += convolutionRow<i-1>(data).x; return val; } template<> __device__ float1 convolutionRow<-1>(float1 *data){float1 zero; zero.x = 0; return zero;} template<int i> __device__ float1 convolutionColumn(float1 *data) { float1 val = data[(convKernelRadius-i)*convColumnTileWidth]; val.x *= d_Kernel[i]; val.x += convolutionColumn<i-1>(data).x; return val; } template<> __device__ float1 convolutionColumn<-1>(float1 *data){float1 zero; zero.x = 0; return zero;} template<int i> __device__ float4 convolutionRow(float4 *data) { float4 val = data[convKernelRadius-i]; val.x *= d_Kernel[i]; val.y *= d_Kernel[i]; val.z *= d_Kernel[i]; val.w *= d_Kernel[i]; float4 val2 = convolutionRow<i-1>(data); val.x += val2.x; val.y += val2.y; val.z += val2.z; val.w += val2.w; return val; } template<> __device__ float4 convolutionRow<-1>(float4 *data) { float4 zero; zero.x = 0; zero.y = 0; zero.z = 0; zero.w = 0; return zero; } template<int i> __device__ float4 convolutionColumn(float4 *data) { float4 val = data[(convKernelRadius-i)*convColumnTileWidth]; val.x *= d_Kernel[i]; val.y *= d_Kernel[i]; val.z *= d_Kernel[i]; val.w *= d_Kernel[i]; float4 val2 = convolutionColumn<i-1>(data); val.x += val2.x; val.y += val2.y; val.z += val2.z; val.w += val2.w; return val; } template<> __device__ float4 convolutionColumn<-1>(float4 *data) { float4 zero; zero.x = 0; zero.y = 0; zero.z = 0; zero.w = 0; return zero; } __global__ void convolutionRowGPU1(float1 *d_Result, float1 *d_Data, int dataW, int dataH) { float1 zero; zero.x = 0; const int rowStart = IMUL(blockIdx.y, dataW); __shared__ float1 data[convKernelRadius + convRowTileWidth + convKernelRadius]; const int tileStart = IMUL(blockIdx.x, convRowTileWidth); const int tileEnd = tileStart + convRowTileWidth - 1; const int apronStart = tileStart - convKernelRadius; const int apronEnd = tileEnd + convKernelRadius; const int tileEndClamped = min(tileEnd, dataW - 1); const int apronStartClamped = max(apronStart, 0); const int apronEndClamped = min(apronEnd, dataW - 1); const int apronStartAligned = tileStart - convKernelRadiusAligned; const int loadPos = apronStartAligned + threadIdx.x; if(loadPos >= apronStart) { const int smemPos = loadPos - apronStart; data[smemPos] = ((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ? d_Data[rowStart + loadPos] : zero; } __syncthreads(); const int writePos = tileStart + threadIdx.x; if(writePos <= tileEndClamped) { const int smemPos = writePos - apronStart; float1 sum = convolutionRow<2 * convKernelRadius>(data + smemPos); d_Result[rowStart + writePos] = sum; } } __global__ void convolutionRowGPU4(float4 *d_Result, float4 *d_Data, int dataW, int dataH) { float4 zero; zero.x = 0; zero.y = 0; zero.z = 0; zero.w = 0; const int rowStart = IMUL(blockIdx.y, dataW); __shared__ float4 data[convKernelRadius + convRowTileWidth + convKernelRadius]; const int tileStart = IMUL(blockIdx.x, convRowTileWidth); const int tileEnd = tileStart + convRowTileWidth - 1; const int apronStart = tileStart - convKernelRadius; const int apronEnd = tileEnd + convKernelRadius; const int tileEndClamped = min(tileEnd, dataW - 1); const int apronStartClamped = max(apronStart, 0); const int apronEndClamped = min(apronEnd, dataW - 1); const int apronStartAligned = tileStart - convKernelRadiusAligned; const int loadPos = apronStartAligned + threadIdx.x; if(loadPos >= apronStart) { const int smemPos = loadPos - apronStart; data[smemPos] = ((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ? d_Data[rowStart + loadPos] : zero; } __syncthreads(); const int writePos = tileStart + threadIdx.x; if(writePos <= tileEndClamped) { const int smemPos = writePos - apronStart; float4 sum = convolutionRow<2 * convKernelRadius>(data + smemPos); d_Result[rowStart + writePos] = sum; } } __global__ void convolutionColumnGPU1to2 ( float2 *d_Result, float1 *d_Data, float1 *d_DataRow, int dataW, int dataH, int smemStride, int gmemStride) { float1 rowValue; float1 zero; zero.x = 0; float2 result; const int columnStart = IMUL(blockIdx.x, convColumnTileWidth) + threadIdx.x; __shared__ float1 data[convColumnTileWidth * (convKernelRadius + convColumnTileHeight + convKernelRadius)]; const int tileStart = IMUL(blockIdx.y, convColumnTileHeight); const int tileEnd = tileStart + convColumnTileHeight - 1; const int apronStart = tileStart - convKernelRadius; const int apronEnd = tileEnd + convKernelRadius; const int tileEndClamped = min(tileEnd, dataH - 1); const int apronStartClamped = max(apronStart, 0); const int apronEndClamped = min(apronEnd, dataH - 1); int smemPos = IMUL(threadIdx.y, convColumnTileWidth) + threadIdx.x; int gmemPos = IMUL(apronStart + threadIdx.y, dataW) + columnStart; for(int y = apronStart + threadIdx.y; y <= apronEnd; y += blockDim.y) { data[smemPos] = ((y >= apronStartClamped) && (y <= apronEndClamped)) ? d_Data[gmemPos] : zero; smemPos += smemStride; gmemPos += gmemStride; } __syncthreads(); smemPos = IMUL(threadIdx.y + convKernelRadius, convColumnTileWidth) + threadIdx.x; gmemPos = IMUL(tileStart + threadIdx.y , dataW) + columnStart; for(int y = tileStart + threadIdx.y; y <= tileEndClamped; y += blockDim.y) { float1 sum = convolutionColumn<2 * convKernelRadius>(data + smemPos); rowValue = d_DataRow[gmemPos]; result.x = sqrtf(sum.x * sum.x + rowValue.x * rowValue.x); result.y = atan2f(sum.x, rowValue.x) * RADTODEG; d_Result[gmemPos] = result; smemPos += smemStride; gmemPos += gmemStride; } } __global__ void convolutionColumnGPU4to2 ( float2 *d_Result, float4 *d_Data, float4 *d_DataRow, int dataW, int dataH, int smemStride, int gmemStride) { //float3 max12, mag4; float3 mag1, mag2, mag3; float3 max34, magMax; float2 result; float4 rowValue; float4 zero; zero.x = 0; zero.y = 0; zero.z = 0; zero.w = 0; const int columnStart = IMUL(blockIdx.x, convColumnTileWidth) + threadIdx.x; __shared__ float4 data[convColumnTileWidth * (convKernelRadius + convColumnTileHeight + convKernelRadius)]; const int tileStart = IMUL(blockIdx.y, convColumnTileHeight); const int tileEnd = tileStart + convColumnTileHeight - 1; const int apronStart = tileStart - convKernelRadius; const int apronEnd = tileEnd + convKernelRadius; const int tileEndClamped = min(tileEnd, dataH - 1); const int apronStartClamped = max(apronStart, 0); const int apronEndClamped = min(apronEnd, dataH - 1); int smemPos = IMUL(threadIdx.y, convColumnTileWidth) + threadIdx.x; int gmemPos = IMUL(apronStart + threadIdx.y, dataW) + columnStart; for(int y = apronStart + threadIdx.y; y <= apronEnd; y += blockDim.y) { data[smemPos] = ((y >= apronStartClamped) && (y <= apronEndClamped)) ? d_Data[gmemPos] : zero; smemPos += smemStride; gmemPos += gmemStride; } __syncthreads(); smemPos = IMUL(threadIdx.y + convKernelRadius, convColumnTileWidth) + threadIdx.x; gmemPos = IMUL(tileStart + threadIdx.y , dataW) + columnStart; for(int y = tileStart + threadIdx.y; y <= tileEndClamped; y += blockDim.y) { float4 sum = convolutionColumn<2 * convKernelRadius>(data + smemPos); rowValue = d_DataRow[gmemPos]; mag1.x = sqrtf(sum.x * sum.x + rowValue.x * rowValue.x); mag1.y = sum.x; mag1.z = rowValue.x; mag2.x = sqrtf(sum.y * sum.y + rowValue.y * rowValue.y); mag2.y = sum.y; mag2.z = rowValue.y; mag3.x = sqrtf(sum.z * sum.z + rowValue.z * rowValue.z); mag3.y = sum.z; mag3.z = rowValue.z; max34 = (mag2.x > mag3.x) ? mag2 : mag3; magMax = (mag1.x > max34.x) ? mag1 : max34; result.x = magMax.x; result.y = atan2f(magMax.y, magMax.z); result.y = result.y * 180 / PI + 180; result.y = int(result.y) % 180; //TODO-> if semicerc d_Result[gmemPos] = result; smemPos += smemStride; gmemPos += gmemStride; } } __host__ void InitConvolution(int width, int height, bool useGrayscale) { convUseGrayscale = useGrayscale; h_Kernel = (float *)malloc(convKernelSize); h_Kernel[0] = 1.0f; h_Kernel[1] = 0; h_Kernel[2] = -1.0f; cutilSafeCall( hipMemcpyToSymbol(d_Kernel, h_Kernel, convKernelSize) ); if (useGrayscale) cutilSafeCall(hipMalloc((void**) &convBuffer1, sizeof(float1) * width * height)); else cutilSafeCall(hipMalloc((void**) &convBuffer4, sizeof(float4) * width * height)); } __host__ void SetConvolutionSize(int width, int height) { convWidth = width; convHeight = height; blockGridRows = dim3(iDivUp(convWidth, convRowTileWidth), convHeight); blockGridColumns = dim3(iDivUp(convWidth, convColumnTileWidth), iDivUp(convHeight, convColumnTileHeight)); threadBlockRows = dim3(convKernelRadiusAligned + convRowTileWidth + convKernelRadius); threadBlockColumns = dim3(convColumnTileWidth, 8); } __host__ void CloseConvolution() { if (convUseGrayscale) cutilSafeCall(hipFree(convBuffer1)); else cutilSafeCall(hipFree(convBuffer4)); free(h_Kernel); } __host__ void ComputeColorGradients1to2(float1* inputImage, float2* outputImage) { hipLaunchKernelGGL(( convolutionRowGPU1), dim3(blockGridRows), dim3(threadBlockRows), 0, 0, convBuffer1, inputImage, convWidth, convHeight); hipLaunchKernelGGL(( convolutionColumnGPU1to2), dim3(blockGridColumns), dim3(threadBlockColumns), 0, 0, outputImage, inputImage, convBuffer1, convWidth, convHeight, convColumnTileWidth * threadBlockColumns.y, convWidth * threadBlockColumns.y); } __host__ void ComputeColorGradients4to2(float4* inputImage, float2* outputImage) { hipLaunchKernelGGL(( convolutionRowGPU4), dim3(blockGridRows), dim3(threadBlockRows), 0, 0, convBuffer4, inputImage, convWidth, convHeight); hipLaunchKernelGGL(( convolutionColumnGPU4to2), dim3(blockGridColumns), dim3(threadBlockColumns), 0, 0, outputImage, inputImage, convBuffer4, convWidth, convHeight, convColumnTileWidth * threadBlockColumns.y, convWidth * threadBlockColumns.y); }
75d529eb00cef76408bbe6b7e89c0a262159c07a.cu
#include "HOGConvolution.h" #include "HOGUtils.h" #include "cutil.h" dim3 blockGridRows; dim3 blockGridColumns; dim3 threadBlockRows; dim3 threadBlockColumns; #define convKernelRadius 1 #define convKernelWidth (2 * convKernelRadius + 1) __device__ __constant__ float d_Kernel[convKernelWidth]; float *h_Kernel; #define convRowTileWidth 128 #define convKernelRadiusAligned 16 #define convColumnTileWidth 16 #define convColumnTileHeight 48 float4 *convBuffer4; float1 *convBuffer1; int convWidth; int convHeight; const int convKernelSize = convKernelWidth * sizeof(float); bool convUseGrayscale; template<int i> __device__ float1 convolutionRow(float1 *data) { float1 val = data[convKernelRadius-i]; val.x *= d_Kernel[i]; val.x += convolutionRow<i-1>(data).x; return val; } template<> __device__ float1 convolutionRow<-1>(float1 *data){float1 zero; zero.x = 0; return zero;} template<int i> __device__ float1 convolutionColumn(float1 *data) { float1 val = data[(convKernelRadius-i)*convColumnTileWidth]; val.x *= d_Kernel[i]; val.x += convolutionColumn<i-1>(data).x; return val; } template<> __device__ float1 convolutionColumn<-1>(float1 *data){float1 zero; zero.x = 0; return zero;} template<int i> __device__ float4 convolutionRow(float4 *data) { float4 val = data[convKernelRadius-i]; val.x *= d_Kernel[i]; val.y *= d_Kernel[i]; val.z *= d_Kernel[i]; val.w *= d_Kernel[i]; float4 val2 = convolutionRow<i-1>(data); val.x += val2.x; val.y += val2.y; val.z += val2.z; val.w += val2.w; return val; } template<> __device__ float4 convolutionRow<-1>(float4 *data) { float4 zero; zero.x = 0; zero.y = 0; zero.z = 0; zero.w = 0; return zero; } template<int i> __device__ float4 convolutionColumn(float4 *data) { float4 val = data[(convKernelRadius-i)*convColumnTileWidth]; val.x *= d_Kernel[i]; val.y *= d_Kernel[i]; val.z *= d_Kernel[i]; val.w *= d_Kernel[i]; float4 val2 = convolutionColumn<i-1>(data); val.x += val2.x; val.y += val2.y; val.z += val2.z; val.w += val2.w; return val; } template<> __device__ float4 convolutionColumn<-1>(float4 *data) { float4 zero; zero.x = 0; zero.y = 0; zero.z = 0; zero.w = 0; return zero; } __global__ void convolutionRowGPU1(float1 *d_Result, float1 *d_Data, int dataW, int dataH) { float1 zero; zero.x = 0; const int rowStart = IMUL(blockIdx.y, dataW); __shared__ float1 data[convKernelRadius + convRowTileWidth + convKernelRadius]; const int tileStart = IMUL(blockIdx.x, convRowTileWidth); const int tileEnd = tileStart + convRowTileWidth - 1; const int apronStart = tileStart - convKernelRadius; const int apronEnd = tileEnd + convKernelRadius; const int tileEndClamped = min(tileEnd, dataW - 1); const int apronStartClamped = max(apronStart, 0); const int apronEndClamped = min(apronEnd, dataW - 1); const int apronStartAligned = tileStart - convKernelRadiusAligned; const int loadPos = apronStartAligned + threadIdx.x; if(loadPos >= apronStart) { const int smemPos = loadPos - apronStart; data[smemPos] = ((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ? d_Data[rowStart + loadPos] : zero; } __syncthreads(); const int writePos = tileStart + threadIdx.x; if(writePos <= tileEndClamped) { const int smemPos = writePos - apronStart; float1 sum = convolutionRow<2 * convKernelRadius>(data + smemPos); d_Result[rowStart + writePos] = sum; } } __global__ void convolutionRowGPU4(float4 *d_Result, float4 *d_Data, int dataW, int dataH) { float4 zero; zero.x = 0; zero.y = 0; zero.z = 0; zero.w = 0; const int rowStart = IMUL(blockIdx.y, dataW); __shared__ float4 data[convKernelRadius + convRowTileWidth + convKernelRadius]; const int tileStart = IMUL(blockIdx.x, convRowTileWidth); const int tileEnd = tileStart + convRowTileWidth - 1; const int apronStart = tileStart - convKernelRadius; const int apronEnd = tileEnd + convKernelRadius; const int tileEndClamped = min(tileEnd, dataW - 1); const int apronStartClamped = max(apronStart, 0); const int apronEndClamped = min(apronEnd, dataW - 1); const int apronStartAligned = tileStart - convKernelRadiusAligned; const int loadPos = apronStartAligned + threadIdx.x; if(loadPos >= apronStart) { const int smemPos = loadPos - apronStart; data[smemPos] = ((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ? d_Data[rowStart + loadPos] : zero; } __syncthreads(); const int writePos = tileStart + threadIdx.x; if(writePos <= tileEndClamped) { const int smemPos = writePos - apronStart; float4 sum = convolutionRow<2 * convKernelRadius>(data + smemPos); d_Result[rowStart + writePos] = sum; } } __global__ void convolutionColumnGPU1to2 ( float2 *d_Result, float1 *d_Data, float1 *d_DataRow, int dataW, int dataH, int smemStride, int gmemStride) { float1 rowValue; float1 zero; zero.x = 0; float2 result; const int columnStart = IMUL(blockIdx.x, convColumnTileWidth) + threadIdx.x; __shared__ float1 data[convColumnTileWidth * (convKernelRadius + convColumnTileHeight + convKernelRadius)]; const int tileStart = IMUL(blockIdx.y, convColumnTileHeight); const int tileEnd = tileStart + convColumnTileHeight - 1; const int apronStart = tileStart - convKernelRadius; const int apronEnd = tileEnd + convKernelRadius; const int tileEndClamped = min(tileEnd, dataH - 1); const int apronStartClamped = max(apronStart, 0); const int apronEndClamped = min(apronEnd, dataH - 1); int smemPos = IMUL(threadIdx.y, convColumnTileWidth) + threadIdx.x; int gmemPos = IMUL(apronStart + threadIdx.y, dataW) + columnStart; for(int y = apronStart + threadIdx.y; y <= apronEnd; y += blockDim.y) { data[smemPos] = ((y >= apronStartClamped) && (y <= apronEndClamped)) ? d_Data[gmemPos] : zero; smemPos += smemStride; gmemPos += gmemStride; } __syncthreads(); smemPos = IMUL(threadIdx.y + convKernelRadius, convColumnTileWidth) + threadIdx.x; gmemPos = IMUL(tileStart + threadIdx.y , dataW) + columnStart; for(int y = tileStart + threadIdx.y; y <= tileEndClamped; y += blockDim.y) { float1 sum = convolutionColumn<2 * convKernelRadius>(data + smemPos); rowValue = d_DataRow[gmemPos]; result.x = sqrtf(sum.x * sum.x + rowValue.x * rowValue.x); result.y = atan2f(sum.x, rowValue.x) * RADTODEG; d_Result[gmemPos] = result; smemPos += smemStride; gmemPos += gmemStride; } } __global__ void convolutionColumnGPU4to2 ( float2 *d_Result, float4 *d_Data, float4 *d_DataRow, int dataW, int dataH, int smemStride, int gmemStride) { //float3 max12, mag4; float3 mag1, mag2, mag3; float3 max34, magMax; float2 result; float4 rowValue; float4 zero; zero.x = 0; zero.y = 0; zero.z = 0; zero.w = 0; const int columnStart = IMUL(blockIdx.x, convColumnTileWidth) + threadIdx.x; __shared__ float4 data[convColumnTileWidth * (convKernelRadius + convColumnTileHeight + convKernelRadius)]; const int tileStart = IMUL(blockIdx.y, convColumnTileHeight); const int tileEnd = tileStart + convColumnTileHeight - 1; const int apronStart = tileStart - convKernelRadius; const int apronEnd = tileEnd + convKernelRadius; const int tileEndClamped = min(tileEnd, dataH - 1); const int apronStartClamped = max(apronStart, 0); const int apronEndClamped = min(apronEnd, dataH - 1); int smemPos = IMUL(threadIdx.y, convColumnTileWidth) + threadIdx.x; int gmemPos = IMUL(apronStart + threadIdx.y, dataW) + columnStart; for(int y = apronStart + threadIdx.y; y <= apronEnd; y += blockDim.y) { data[smemPos] = ((y >= apronStartClamped) && (y <= apronEndClamped)) ? d_Data[gmemPos] : zero; smemPos += smemStride; gmemPos += gmemStride; } __syncthreads(); smemPos = IMUL(threadIdx.y + convKernelRadius, convColumnTileWidth) + threadIdx.x; gmemPos = IMUL(tileStart + threadIdx.y , dataW) + columnStart; for(int y = tileStart + threadIdx.y; y <= tileEndClamped; y += blockDim.y) { float4 sum = convolutionColumn<2 * convKernelRadius>(data + smemPos); rowValue = d_DataRow[gmemPos]; mag1.x = sqrtf(sum.x * sum.x + rowValue.x * rowValue.x); mag1.y = sum.x; mag1.z = rowValue.x; mag2.x = sqrtf(sum.y * sum.y + rowValue.y * rowValue.y); mag2.y = sum.y; mag2.z = rowValue.y; mag3.x = sqrtf(sum.z * sum.z + rowValue.z * rowValue.z); mag3.y = sum.z; mag3.z = rowValue.z; max34 = (mag2.x > mag3.x) ? mag2 : mag3; magMax = (mag1.x > max34.x) ? mag1 : max34; result.x = magMax.x; result.y = atan2f(magMax.y, magMax.z); result.y = result.y * 180 / PI + 180; result.y = int(result.y) % 180; //TODO-> if semicerc d_Result[gmemPos] = result; smemPos += smemStride; gmemPos += gmemStride; } } __host__ void InitConvolution(int width, int height, bool useGrayscale) { convUseGrayscale = useGrayscale; h_Kernel = (float *)malloc(convKernelSize); h_Kernel[0] = 1.0f; h_Kernel[1] = 0; h_Kernel[2] = -1.0f; cutilSafeCall( cudaMemcpyToSymbol(d_Kernel, h_Kernel, convKernelSize) ); if (useGrayscale) cutilSafeCall(cudaMalloc((void**) &convBuffer1, sizeof(float1) * width * height)); else cutilSafeCall(cudaMalloc((void**) &convBuffer4, sizeof(float4) * width * height)); } __host__ void SetConvolutionSize(int width, int height) { convWidth = width; convHeight = height; blockGridRows = dim3(iDivUp(convWidth, convRowTileWidth), convHeight); blockGridColumns = dim3(iDivUp(convWidth, convColumnTileWidth), iDivUp(convHeight, convColumnTileHeight)); threadBlockRows = dim3(convKernelRadiusAligned + convRowTileWidth + convKernelRadius); threadBlockColumns = dim3(convColumnTileWidth, 8); } __host__ void CloseConvolution() { if (convUseGrayscale) cutilSafeCall(cudaFree(convBuffer1)); else cutilSafeCall(cudaFree(convBuffer4)); free(h_Kernel); } __host__ void ComputeColorGradients1to2(float1* inputImage, float2* outputImage) { convolutionRowGPU1<<<blockGridRows, threadBlockRows>>>(convBuffer1, inputImage, convWidth, convHeight); convolutionColumnGPU1to2<<<blockGridColumns, threadBlockColumns>>>(outputImage, inputImage, convBuffer1, convWidth, convHeight, convColumnTileWidth * threadBlockColumns.y, convWidth * threadBlockColumns.y); } __host__ void ComputeColorGradients4to2(float4* inputImage, float2* outputImage) { convolutionRowGPU4<<<blockGridRows, threadBlockRows>>>(convBuffer4, inputImage, convWidth, convHeight); convolutionColumnGPU4to2<<<blockGridColumns, threadBlockColumns>>>(outputImage, inputImage, convBuffer4, convWidth, convHeight, convColumnTileWidth * threadBlockColumns.y, convWidth * threadBlockColumns.y); }
0053b9d3f161d4452d5717697ddafa16f5cc5020.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> __global__ void global_reduce_kernel(float * d_out, float * d_in) { int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; // do reduction in global mem for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { d_in[myId] += d_in[myId + s]; } __syncthreads(); // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = d_in[myId]; } } __global__ void shmem_reduce_kernel(float * d_out, const float * d_in) { // sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ float sdata[]; int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; // load shared mem from global mem sdata[tid] = d_in[myId]; __syncthreads(); // make sure entire block is loaded! // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = sdata[0]; } } void reduce(float * d_out, float * d_intermediate, float * d_in, int size, bool usesSharedMemory) { // assumes that size is not greater than maxThreadsPerBlock^2 // and that size is a multiple of maxThreadsPerBlock const int maxThreadsPerBlock = 1024; int threads = maxThreadsPerBlock; int blocks = size / maxThreadsPerBlock; if (usesSharedMemory) { hipLaunchKernelGGL(( shmem_reduce_kernel), dim3(blocks), dim3(threads), threads * sizeof(float), 0, d_intermediate, d_in); } else { hipLaunchKernelGGL(( global_reduce_kernel), dim3(blocks), dim3(threads), 0, 0, d_intermediate, d_in); } // now we're down to one block left, so reduce it threads = blocks; // launch one thread for each block in prev step blocks = 1; if (usesSharedMemory) { hipLaunchKernelGGL(( shmem_reduce_kernel), dim3(blocks), dim3(threads), threads * sizeof(float), 0, d_out, d_intermediate); } else { hipLaunchKernelGGL(( global_reduce_kernel), dim3(blocks), dim3(threads), 0, 0, d_out, d_intermediate); } } __device__ void warpReduce(volatile float* sdata, int tid) { sdata[tid] += sdata[tid + 32]; sdata[tid] += sdata[tid + 16]; sdata[tid] += sdata[tid + 8]; sdata[tid] += sdata[tid + 4]; sdata[tid] += sdata[tid + 2]; sdata[tid] += sdata[tid + 1]; } __global__ void optimized_shmem_reduce_kernel(float * d_out, const float * d_in) { // sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ float sdata[]; int tid = threadIdx.x; int myId = 2 * (tid + blockDim.x * blockIdx.x); // Perform first step of the reduction right when you read the items from global to shared memory sdata[tid] = d_in[myId] + d_in[myId + 1]; __syncthreads(); // make sure entire block is loaded! // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) { /** When s <= 32, there is only one warp left. Since instructions are SIMD synchronous within a warp, when s <= 32: - dont need to __syncthreads() - dont need if (tid < s) because it doesnt save any work That is why we can unroll the last warp of this loop (when s <= 32), this is done in function warpReduce. */ if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); // make sure all adds at one stage are done! } if (tid < 32) { warpReduce(sdata, tid); } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = sdata[0]; } } void optimized_reduce(float * d_out, float * d_intermediate, float * d_in, int size) { /** Optimizations mentioned by the course instructor: - Processing multiple items per thread, instead of just one - Perform first step of the reduction right when you read the items from global to shared memory - Take advantage of the fact that warps are synchronous when doing the last steps of the reduction */ // assumes that size is not greater than maxThreadsPerBlock^2 // and that size is a multiple of maxThreadsPerBlock const int maxThreadsPerBlock = 1024; int threads = maxThreadsPerBlock; int blocks = size / maxThreadsPerBlock / 2; hipLaunchKernelGGL(( optimized_shmem_reduce_kernel), dim3(blocks), dim3(threads), threads * sizeof(float), 0, d_intermediate, d_in); // now we're down to one block left, so reduce it threads = blocks; // launch one thread for each block in prev step blocks = 1; hipLaunchKernelGGL(( optimized_shmem_reduce_kernel), dim3(blocks), dim3(threads), threads * sizeof(float), 0, d_out, d_intermediate); } int main(int argc, char **argv) { int deviceCount; hipGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "error: no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } int dev = 0; hipSetDevice(dev); hipDeviceProp_t devProps; if (hipGetDeviceProperties(&devProps, dev) == 0) { printf("Using device %d:\n", dev); printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n", devProps.name, (int)devProps.totalGlobalMem, (int)devProps.major, (int)devProps.minor, (int)devProps.clockRate); } const int ARRAY_SIZE = 1 << 20; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; float sum = 0.0f; for(int i = 0; i < ARRAY_SIZE; i++) { // generate random float in [-1.0f, 1.0f] // When using random() here the result is always -5.174542. // This might be because of how bad this random generator is. // I think that when generating 2^20 random numbers between [-1, 1] their sum should be around 0. // h_in[i] = -1.0f + (float)random()/((float)RAND_MAX/2.0f); h_in[i] = 0.01f; // 0.01 * 2^20 = 10485.76 sum += h_in[i]; } // declare GPU memory pointers float * d_in, * d_intermediate, * d_out; // allocate GPU memory hipMalloc((void **) &d_in, ARRAY_BYTES); hipMalloc((void **) &d_intermediate, ARRAY_BYTES); // overallocated hipMalloc((void **) &d_out, sizeof(float)); // transfer the input array to the GPU hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); int whichKernel = 0; if (argc == 2) { whichKernel = atoi(argv[1]); } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // launch the kernel switch(whichKernel) { case 0: printf("Running global reduce\n"); hipEventRecord(start, 0); for (int i = 0; i < 100; i++) { reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, false); // global reduce will return wrong sum in this case // is is because it will write the sum to the same memory adress 100 times // to test that out uncomment below and have a look at the partial sums after each iteration // only the first one is correct. // But I don't know why each sum is not just double the previous one, // float h_out; // hipMemcpy(&h_out, d_out, sizeof(float), hipMemcpyDeviceToHost); // printf("\tpartial sum: %f\n", h_out); } hipEventRecord(stop, 0); break; case 1: printf("Running reduce with shared mem\n"); hipEventRecord(start, 0); for (int i = 0; i < 100; i++) { reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, true); } hipEventRecord(stop, 0); break; case 2: printf("Running optimized reduce with shared mem\n"); hipEventRecord(start, 0); for (int i = 0; i < 100; i++) { optimized_reduce(d_out, d_intermediate, d_in, ARRAY_SIZE); } hipEventRecord(stop, 0); break; default: fprintf(stderr, "error: ran no kernel\n"); exit(EXIT_FAILURE); } hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); elapsedTime /= 100.0f; // 100 trials // copy back the sum from GPU float h_out; hipMemcpy(&h_out, d_out, sizeof(float), hipMemcpyDeviceToHost); printf("average time elapsed: %f\n", elapsedTime); printf("\nResults:\n"); printf("\tserial sum: %f\n", sum); printf("\treduce sum: %f\n", h_out); // free GPU memory allocation hipFree(d_in); hipFree(d_intermediate); hipFree(d_out); return 0; }
0053b9d3f161d4452d5717697ddafa16f5cc5020.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> __global__ void global_reduce_kernel(float * d_out, float * d_in) { int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; // do reduction in global mem for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { d_in[myId] += d_in[myId + s]; } __syncthreads(); // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = d_in[myId]; } } __global__ void shmem_reduce_kernel(float * d_out, const float * d_in) { // sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ float sdata[]; int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; // load shared mem from global mem sdata[tid] = d_in[myId]; __syncthreads(); // make sure entire block is loaded! // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = sdata[0]; } } void reduce(float * d_out, float * d_intermediate, float * d_in, int size, bool usesSharedMemory) { // assumes that size is not greater than maxThreadsPerBlock^2 // and that size is a multiple of maxThreadsPerBlock const int maxThreadsPerBlock = 1024; int threads = maxThreadsPerBlock; int blocks = size / maxThreadsPerBlock; if (usesSharedMemory) { shmem_reduce_kernel<<<blocks, threads, threads * sizeof(float)>>> (d_intermediate, d_in); } else { global_reduce_kernel<<<blocks, threads>>> (d_intermediate, d_in); } // now we're down to one block left, so reduce it threads = blocks; // launch one thread for each block in prev step blocks = 1; if (usesSharedMemory) { shmem_reduce_kernel<<<blocks, threads, threads * sizeof(float)>>> (d_out, d_intermediate); } else { global_reduce_kernel<<<blocks, threads>>> (d_out, d_intermediate); } } __device__ void warpReduce(volatile float* sdata, int tid) { sdata[tid] += sdata[tid + 32]; sdata[tid] += sdata[tid + 16]; sdata[tid] += sdata[tid + 8]; sdata[tid] += sdata[tid + 4]; sdata[tid] += sdata[tid + 2]; sdata[tid] += sdata[tid + 1]; } __global__ void optimized_shmem_reduce_kernel(float * d_out, const float * d_in) { // sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ float sdata[]; int tid = threadIdx.x; int myId = 2 * (tid + blockDim.x * blockIdx.x); // Perform first step of the reduction right when you read the items from global to shared memory sdata[tid] = d_in[myId] + d_in[myId + 1]; __syncthreads(); // make sure entire block is loaded! // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) { /** When s <= 32, there is only one warp left. Since instructions are SIMD synchronous within a warp, when s <= 32: - don’t need to __syncthreads() - don’t need “if (tid < s)” because it doesn’t save any work That is why we can unroll the last warp of this loop (when s <= 32), this is done in function warpReduce. */ if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); // make sure all adds at one stage are done! } if (tid < 32) { warpReduce(sdata, tid); } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = sdata[0]; } } void optimized_reduce(float * d_out, float * d_intermediate, float * d_in, int size) { /** Optimizations mentioned by the course instructor: - Processing multiple items per thread, instead of just one - Perform first step of the reduction right when you read the items from global to shared memory - Take advantage of the fact that warps are synchronous when doing the last steps of the reduction */ // assumes that size is not greater than maxThreadsPerBlock^2 // and that size is a multiple of maxThreadsPerBlock const int maxThreadsPerBlock = 1024; int threads = maxThreadsPerBlock; int blocks = size / maxThreadsPerBlock / 2; optimized_shmem_reduce_kernel<<<blocks, threads, threads * sizeof(float)>>> (d_intermediate, d_in); // now we're down to one block left, so reduce it threads = blocks; // launch one thread for each block in prev step blocks = 1; optimized_shmem_reduce_kernel<<<blocks, threads, threads * sizeof(float)>>> (d_out, d_intermediate); } int main(int argc, char **argv) { int deviceCount; cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "error: no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } int dev = 0; cudaSetDevice(dev); cudaDeviceProp devProps; if (cudaGetDeviceProperties(&devProps, dev) == 0) { printf("Using device %d:\n", dev); printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n", devProps.name, (int)devProps.totalGlobalMem, (int)devProps.major, (int)devProps.minor, (int)devProps.clockRate); } const int ARRAY_SIZE = 1 << 20; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; float sum = 0.0f; for(int i = 0; i < ARRAY_SIZE; i++) { // generate random float in [-1.0f, 1.0f] // When using random() here the result is always -5.174542. // This might be because of how bad this random generator is. // I think that when generating 2^20 random numbers between [-1, 1] their sum should be around 0. // h_in[i] = -1.0f + (float)random()/((float)RAND_MAX/2.0f); h_in[i] = 0.01f; // 0.01 * 2^20 = 10485.76 sum += h_in[i]; } // declare GPU memory pointers float * d_in, * d_intermediate, * d_out; // allocate GPU memory cudaMalloc((void **) &d_in, ARRAY_BYTES); cudaMalloc((void **) &d_intermediate, ARRAY_BYTES); // overallocated cudaMalloc((void **) &d_out, sizeof(float)); // transfer the input array to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); int whichKernel = 0; if (argc == 2) { whichKernel = atoi(argv[1]); } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // launch the kernel switch(whichKernel) { case 0: printf("Running global reduce\n"); cudaEventRecord(start, 0); for (int i = 0; i < 100; i++) { reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, false); // global reduce will return wrong sum in this case // is is because it will write the sum to the same memory adress 100 times // to test that out uncomment below and have a look at the partial sums after each iteration // only the first one is correct. // But I don't know why each sum is not just double the previous one, // float h_out; // cudaMemcpy(&h_out, d_out, sizeof(float), cudaMemcpyDeviceToHost); // printf("\tpartial sum: %f\n", h_out); } cudaEventRecord(stop, 0); break; case 1: printf("Running reduce with shared mem\n"); cudaEventRecord(start, 0); for (int i = 0; i < 100; i++) { reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, true); } cudaEventRecord(stop, 0); break; case 2: printf("Running optimized reduce with shared mem\n"); cudaEventRecord(start, 0); for (int i = 0; i < 100; i++) { optimized_reduce(d_out, d_intermediate, d_in, ARRAY_SIZE); } cudaEventRecord(stop, 0); break; default: fprintf(stderr, "error: ran no kernel\n"); exit(EXIT_FAILURE); } cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); elapsedTime /= 100.0f; // 100 trials // copy back the sum from GPU float h_out; cudaMemcpy(&h_out, d_out, sizeof(float), cudaMemcpyDeviceToHost); printf("average time elapsed: %f\n", elapsedTime); printf("\nResults:\n"); printf("\tserial sum: %f\n", sum); printf("\treduce sum: %f\n", h_out); // free GPU memory allocation cudaFree(d_in); cudaFree(d_intermediate); cudaFree(d_out); return 0; }
mhTranspose.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <torch/types.h> #include <hipsparse.h> #include "computeUtil.h" __global__ void mhtranspose(const int nnz, const int h, const int * permute, float * attention, float * out) { int hid = blockIdx.y; int nid = blockIdx.x * 32 + threadIdx.x; if(nid < nnz) { int idx = permute[nid]; out[nid * h + hid] = attention[idx * h + hid]; } } __global__ void mhtranspose4(const int nnz, const int h, int * permute, float * attention, float * out) { int hid = threadIdx.y << 2; int nid = blockIdx.x * 32 + threadIdx.x; if(nid < nnz) { int idx = permute[nid]; float att[4]; Load<float4, float>(att, attention, idx * h + hid); Store<float4, float>(out, att, nid * h + hid); } } torch::Tensor mhtranspose_cuda( torch::Tensor permute, torch::Tensor attention // E * H ) { const auto nnz = permute.size(0); const auto h = attention.size(1); auto devid = permute.device().index(); auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid); auto out = torch::empty({nnz, h}, options); if((h & 3) == 0) { hipLaunchKernelGGL(( mhtranspose4), dim3(dim3(CEIL(nnz, 32), 1, 1)), dim3(dim3(32, CEIL(h, 4), 1)), 0, 0, nnz, h, permute.data_ptr<int>(), attention.data_ptr<float>(), out.data_ptr<float>()); } else { hipLaunchKernelGGL(( mhtranspose), dim3(dim3(CEIL(nnz, 32), h, 1)), dim3(dim3(32, 1, 1)), 0, 0, nnz, h, permute.data_ptr<int>(), attention.data_ptr<float>(), out.data_ptr<float>()); } return out; } void csr2cscKernel(int m, int n, int nnz, int *csrRowPtr, int *csrColInd, int *csrVal, int *cscColPtr, int *cscRowInd, int *cscVal ) { hipsparseHandle_t handle; size_t bufferSize = 0; void* buffer = NULL; checkCuSparseError(hipsparseCsr2cscEx2_bufferSize(handle, m, n, nnz, csrVal, csrRowPtr, csrColInd, cscVal, cscColPtr, cscRowInd, HIP_R_32I, HIPSPARSE_ACTION_SYMBOLIC, HIPSPARSE_INDEX_BASE_ZERO, HIPSPARSE_CSR2CSC_ALG1, &bufferSize )); checkCudaError(hipMalloc((void**)&buffer, bufferSize * sizeof(float))); checkCuSparseError(hipsparseCsr2cscEx2(handle, m, n, nnz, csrVal, csrRowPtr, csrColInd, cscVal, cscColPtr, cscRowInd, HIP_R_32I, HIPSPARSE_ACTION_NUMERIC, HIPSPARSE_INDEX_BASE_ZERO, HIPSPARSE_CSR2CSC_ALG1, buffer )); checkCudaError(hipFree(buffer)); } std::vector<torch::Tensor> csr2csc_cuda( torch::Tensor csrRowPtr, torch::Tensor csrColInd, torch::Tensor csrVal) { const auto n = csrRowPtr.size(0) - 1; const auto nnz = csrColInd.size(0); auto devid = csrRowPtr.device().index(); auto optionsI = torch::TensorOptions().dtype(torch::kInt32).device(torch::kCUDA, devid); auto cscColPtr = torch::empty({n + 1}, optionsI); auto cscRowInd = torch::empty({nnz}, optionsI); auto cscVal = torch::empty({nnz}, optionsI); csr2cscKernel(n, n, nnz, csrRowPtr.data_ptr<int>(), csrColInd.data_ptr<int>(), csrVal.data_ptr<int>(), cscColPtr.data_ptr<int>(), cscRowInd.data_ptr<int>(), cscVal.data_ptr<int>()); return {cscColPtr, cscRowInd, cscVal}; }
mhTranspose.cu
#include <cuda.h> #include <torch/types.h> #include <cusparse.h> #include "computeUtil.h" __global__ void mhtranspose(const int nnz, const int h, const int * permute, float * attention, float * out) { int hid = blockIdx.y; int nid = blockIdx.x * 32 + threadIdx.x; if(nid < nnz) { int idx = permute[nid]; out[nid * h + hid] = attention[idx * h + hid]; } } __global__ void mhtranspose4(const int nnz, const int h, int * permute, float * attention, float * out) { int hid = threadIdx.y << 2; int nid = blockIdx.x * 32 + threadIdx.x; if(nid < nnz) { int idx = permute[nid]; float att[4]; Load<float4, float>(att, attention, idx * h + hid); Store<float4, float>(out, att, nid * h + hid); } } torch::Tensor mhtranspose_cuda( torch::Tensor permute, torch::Tensor attention // E * H ) { const auto nnz = permute.size(0); const auto h = attention.size(1); auto devid = permute.device().index(); auto options = torch::TensorOptions().dtype(torch::kFloat32).device(torch::kCUDA, devid); auto out = torch::empty({nnz, h}, options); if((h & 3) == 0) { mhtranspose4<<<dim3(CEIL(nnz, 32), 1, 1), dim3(32, CEIL(h, 4), 1)>>>(nnz, h, permute.data_ptr<int>(), attention.data_ptr<float>(), out.data_ptr<float>()); } else { mhtranspose<<<dim3(CEIL(nnz, 32), h, 1), dim3(32, 1, 1)>>>(nnz, h, permute.data_ptr<int>(), attention.data_ptr<float>(), out.data_ptr<float>()); } return out; } void csr2cscKernel(int m, int n, int nnz, int *csrRowPtr, int *csrColInd, int *csrVal, int *cscColPtr, int *cscRowInd, int *cscVal ) { cusparseHandle_t handle; size_t bufferSize = 0; void* buffer = NULL; checkCuSparseError(cusparseCsr2cscEx2_bufferSize(handle, m, n, nnz, csrVal, csrRowPtr, csrColInd, cscVal, cscColPtr, cscRowInd, CUDA_R_32I, CUSPARSE_ACTION_SYMBOLIC, CUSPARSE_INDEX_BASE_ZERO, CUSPARSE_CSR2CSC_ALG1, &bufferSize )); checkCudaError(cudaMalloc((void**)&buffer, bufferSize * sizeof(float))); checkCuSparseError(cusparseCsr2cscEx2(handle, m, n, nnz, csrVal, csrRowPtr, csrColInd, cscVal, cscColPtr, cscRowInd, CUDA_R_32I, CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO, CUSPARSE_CSR2CSC_ALG1, buffer )); checkCudaError(cudaFree(buffer)); } std::vector<torch::Tensor> csr2csc_cuda( torch::Tensor csrRowPtr, torch::Tensor csrColInd, torch::Tensor csrVal) { const auto n = csrRowPtr.size(0) - 1; const auto nnz = csrColInd.size(0); auto devid = csrRowPtr.device().index(); auto optionsI = torch::TensorOptions().dtype(torch::kInt32).device(torch::kCUDA, devid); auto cscColPtr = torch::empty({n + 1}, optionsI); auto cscRowInd = torch::empty({nnz}, optionsI); auto cscVal = torch::empty({nnz}, optionsI); csr2cscKernel(n, n, nnz, csrRowPtr.data_ptr<int>(), csrColInd.data_ptr<int>(), csrVal.data_ptr<int>(), cscColPtr.data_ptr<int>(), cscRowInd.data_ptr<int>(), cscVal.data_ptr<int>()); return {cscColPtr, cscRowInd, cscVal}; }
122d57b808471fccb0502b337ce29e14e52e9a7c.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (C) 2013-2018 Altera Corporation, San Jose, California, USA. All rights reserved. // Permission is hereby granted, free of charge, to any person obtaining a copy of this // software and associated documentation files (the "Software"), to deal in the Software // without ion, including without limitation the rights to use, copy, modify, merge, // publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to // whom the Software is furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in all copies or // substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES // OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT // HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, // WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR // OTHER DEALINGS IN THE SOFTWARE. // // This agreement shall be governed in all respects by the laws of the State of California and // by the laws of the United States of America. #include <stdio.h> #include <stdlib.h> #include <time.h> #include <string.h> #include <math.h> #include <hip/hip_runtime.h> #include "options.h" #include "scoped_ptrs.h" using namespace aocl_utils; #define MANUAL_VECTOR 8 #define NUM_THREADS_PER_WG 64 #define BLOOM_1 5 #define BLOOM_2 0x7FFFF #define BLOOM_SIZE 14 #define docEndingTag 0xFFFFFFFF // Params uint block_size = 64; uint repeat = 100; uint total_num_docs = 256*1024; uint total_doc_size = 0; uint total_doc_size_no_padding = 0; // Host Buffers scoped_aligned_ptr<uint> h_docWordFrequencies_dimm1; scoped_aligned_ptr<uint> h_docWordFrequencies_dimm2; scoped_aligned_ptr<ulong> h_profileWeights; scoped_aligned_ptr<ulong> h_docInfo; scoped_aligned_ptr<uint> h_isWordInProfileHash; scoped_aligned_ptr<uint> h_startingDocID; scoped_aligned_ptr<uint> h_numItemsPerThread; scoped_aligned_ptr<ulong> h_profileScore; scoped_aligned_ptr<uint> h_docSizes; static uint m_z = 1; static uint m_w = 1; static uint rand_desh() { m_z = 36969 * (m_z & 65535) + (m_z >> 16); m_w = 18000 * (m_w & 65535) + (m_w >> 16); return (m_z << 16) + m_w; } double sampleNormal() { double u = ((double) rand() / (RAND_MAX)) * 2 - 1; double v = ((double) rand() / (RAND_MAX)) * 2 - 1; double r = u * u + v * v; if (r == 0 || r > 1) return sampleNormal(); double c = sqrt(-2 * log(r) / r); return u * c; } #define DOC_LEN_SIGMA 100 #define AVG_DOC_LEN 350 uint get_doc_length() { int len = sampleNormal() * DOC_LEN_SIGMA + AVG_DOC_LEN; if (len < 10) { len = 10; } // Arbitray lower bound; return (uint) len; } // High-resolution timer. double getCurrentTimestamp() { #ifdef _WIN32 // Windows // Use the high-resolution performance counter. static LARGE_INTEGER ticks_per_second = {}; if(ticks_per_second.QuadPart == 0) { // First call - get the frequency. QueryPerformanceFrequency(&ticks_per_second); } LARGE_INTEGER counter; QueryPerformanceCounter(&counter); double seconds = double(counter.QuadPart) / double(ticks_per_second.QuadPart); return seconds; #else // Linux timespec a; clock_gettime(CLOCK_MONOTONIC, &a); return (double(a.tv_nsec) * 1.0e-9) + double(a.tv_sec); #endif } void setupData() { h_startingDocID.reset( total_num_docs ); h_numItemsPerThread.reset( total_num_docs ); h_profileScore.reset( total_num_docs ); h_docInfo.reset( total_num_docs ); h_docSizes.reset( total_num_docs ); total_doc_size = 0; total_doc_size_no_padding = 0; for (uint i=0; i<total_num_docs; i++) { uint unpadded_size = get_doc_length(); uint size = unpadded_size & (~(2*block_size-1)); if (unpadded_size & ((2*block_size-1))) size += 2*block_size; // Multiple of block_size h_startingDocID[i] = total_doc_size/2; h_numItemsPerThread[i] = size / (2*block_size); ulong start_line = total_doc_size / (2*block_size); ulong end_line = start_line + size / (2*block_size) - 1; total_doc_size += size; total_doc_size_no_padding += unpadded_size; h_docSizes[i] = unpadded_size; h_profileScore[i] = -1; h_docInfo[i] = (start_line << 32) | end_line; } h_isWordInProfileHash.reset( (1L << BLOOM_SIZE) ); h_docWordFrequencies_dimm1.reset( total_doc_size/2 ); h_docWordFrequencies_dimm2.reset( total_doc_size/2 ); printf("Creating Documents total_terms=%d (no_pad=%d)\n", total_doc_size, total_doc_size_no_padding); for (uint i=0; i<total_doc_size/2; i++) { h_docWordFrequencies_dimm1[i] = docEndingTag; h_docWordFrequencies_dimm2[i] = docEndingTag; } for (uint doci=0; doci < total_num_docs; doci++) { uint start = h_startingDocID[doci]; uint size = h_docSizes[doci]; for (uint i = 0; i < size/2; i++) { uint term = (rand_desh()%((1L << 24)-1)); uint freq = (rand_desh()%254)+1; h_docWordFrequencies_dimm1[start + i] = (term << 8) | freq; term = (rand_desh()%((1L << 24)-1)); freq = (rand_desh()%254)+1; h_docWordFrequencies_dimm2[start + i] = (term << 8) | freq; } if (size%2) { uint term = (rand_desh()%((1L << 24)-1)); uint freq = (rand_desh()%254)+1; h_docWordFrequencies_dimm1[start + size/2] = (term << 8) | freq; } } h_profileWeights.reset( (1L << 24) ); for (uint i=0; i<(1L << BLOOM_SIZE); i++) { h_isWordInProfileHash[i] = 0x0; } printf("Creating Profile\n"); for (uint i=0; i<(1L << 24); i++) { h_profileWeights[i] = 0; } for (uint i=0; i<16384; i++) { uint entry = (rand_desh()%(1<<24)); h_profileWeights[entry] = 10; uint hash1 = entry >> BLOOM_1; //this gives me the top 16 bits of the 24bit word id h_isWordInProfileHash[ hash1 >> 5 ] |= 1 << (hash1 & 0x1f); uint hash2 = entry & BLOOM_2; //this gives me the bottom 16 bits of the 24bit word id h_isWordInProfileHash[ hash2 >> 5 ] |= 1 << (hash2 & 0x1f); } } void runOnCPU() { // go through each document in turn, and compute the score scoped_aligned_ptr<ulong> cpu_profileScore; cpu_profileScore.reset( total_num_docs ); uint total = 0; uint falsies = 0; for (uint doci=0; doci < total_num_docs; doci++) { cpu_profileScore[doci] = 0.0; uint start = h_startingDocID[doci]; uint size = h_docSizes[doci]; for (uint i = 0; i < size/2 + (size%2); i++) { uint curr_entry = h_docWordFrequencies_dimm1[start + i]; uint frequency = curr_entry & 0x00ff; uint word_id = curr_entry >> 8; uint hash1 = word_id >> BLOOM_1; //this gives me the top 16 bits of the 24bit word id bool inh1 = h_isWordInProfileHash[ hash1 >> 5 ] & ( 1 << (hash1 & 0x1f)); uint hash2 = word_id & BLOOM_2; //this gives me the bottom 16 bits of the 24bit word id bool inh2 = h_isWordInProfileHash[ hash2 >> 5 ] & ( 1 << (hash2 & 0x1f)); if (inh1 && inh2) { total++; if (h_profileWeights[word_id] == 0) falsies++; cpu_profileScore[doci] += h_profileWeights[word_id] * (ulong)frequency; } } for (uint i = 0; i < size/2; i++) { uint curr_entry = h_docWordFrequencies_dimm2[start + i]; uint frequency = curr_entry & 0x00ff; uint word_id = curr_entry >> 8; uint hash1 = word_id >> BLOOM_1; //this gives me the top 16 bits of the 24bit word id bool inh1 = h_isWordInProfileHash[ hash1 >> 5 ] & ( 1 << (hash1 & 0x1f)); uint hash2 = word_id & BLOOM_2; //this gives me the bottom 16 bits of the 24bit word id bool inh2 = h_isWordInProfileHash[ hash2 >> 5 ] & ( 1 << (hash2 & 0x1f)); if (inh1 && inh2) { total++; if (h_profileWeights[word_id] == 0) falsies++; cpu_profileScore[doci] += h_profileWeights[word_id] * (ulong)frequency; } } } printf( "total_access = %d , falsies = %d, percentage = %f hit= %g\n", \ total, falsies, total * 1.0f / total_doc_size, (total-falsies)*1.0f/total_doc_size ); // compare the final scores for (uint doci = 0; doci < total_num_docs; doci++) { if (cpu_profileScore[doci] != h_profileScore[doci]) { printf("FAILED\n : doc[%d] score: CPU = %lu, Device = %lu\n", \ doci, cpu_profileScore[doci], h_profileScore[doci]); return; } } printf( "Verification: PASS\n" ); } __device__ ulong mulfp( ulong weight, uint freq ) { uint part1 = weight & 0xFFFFF; // lower 24-bits of weight uint part2 = (weight >> 24) & 0xFFFF; // next 16-bits uint res1 = part1 * freq; uint res2 = part2 * freq; return (ulong)res1 + (((ulong)res2) << 24); } __global__ void compute ( const uint* __restrict__ docWordFrequencies_dimm1, const uint* __restrict__ docWordFrequencies_dimm2, const ulong*__restrict__ profileWeights_dimm1, const ulong*__restrict__ profileWeights_dimm2, const uint* __restrict__ isWordInProfileHash, uint* __restrict__ profileScorePerGroup_highbits_dimm1, uint* __restrict__ profileScorePerGroup_lowbits_dimm2 ) { uint curr_entry[MANUAL_VECTOR]; uint word_id[MANUAL_VECTOR]; uint freq[MANUAL_VECTOR]; uint hash1[MANUAL_VECTOR]; uint hash2[MANUAL_VECTOR]; bool is_end[MANUAL_VECTOR]; bool make_access[MANUAL_VECTOR]; __shared__ ulong partial[NUM_THREADS_PER_WG/MANUAL_VECTOR]; int gid = blockIdx.x * blockDim.x + threadIdx.x; ulong sum = 0; //#pragma unroll for (uint i=0; i<MANUAL_VECTOR; i++) { curr_entry[i] = docWordFrequencies_dimm1[gid*MANUAL_VECTOR + i]; freq[i] = curr_entry[i] & 0xff; word_id[i] = curr_entry[i] >> 8; is_end[i] = curr_entry[i] == docEndingTag; hash1[i] = word_id[i] >> BLOOM_1; hash2[i] = word_id[i] & BLOOM_2; make_access[i] = !is_end[i] && ((isWordInProfileHash[ hash1[i] >> 5 ] >> (hash1[i] & 0x1f)) & 0x1) && ((isWordInProfileHash[ hash2[i] >> 5 ] >> (hash2[i] & 0x1f)) & 0x1); if (make_access[i]) { sum += mulfp(profileWeights_dimm1[word_id[i]],freq[i]); } } //#pragma unroll for (uint i=0; i<MANUAL_VECTOR; i++) { curr_entry[i] = docWordFrequencies_dimm2[gid*MANUAL_VECTOR + i]; freq[i] = curr_entry[i] & 0xff; word_id[i] = curr_entry[i] >> 8; is_end[i] = curr_entry[i] == docEndingTag; hash1[i] = word_id[i] >> BLOOM_1; hash2[i] = word_id[i] & BLOOM_2; make_access[i] = !is_end[i] && ((isWordInProfileHash[ hash1[i] >> 5 ] >> (hash1[i] & 0x1f)) & 0x1) && ((isWordInProfileHash[ hash2[i] >> 5 ] >> (hash2[i] & 0x1f)) & 0x1); if (make_access[i]) { sum += mulfp(profileWeights_dimm2[word_id[i]],freq[i]); } } partial[threadIdx.x] = sum; __syncthreads(); if (threadIdx.x == 0) { ulong4 res= *(ulong4*)&partial[0]; ulong4 res2= *(ulong4*)&partial[4]; ulong final_result = res.x + res.y + res.z + res.w + res2.x + res2.y + res2.z + res2.w; profileScorePerGroup_highbits_dimm1[blockIdx.x] = (uint) (final_result >> 32); profileScorePerGroup_lowbits_dimm2[blockIdx.x] = (uint) (final_result & 0xFFFFFFFF); } } __global__ void reduction( const ulong* __restrict__ docInfo, const uint* __restrict__ partial_highbits_dimm1, const uint* __restrict__ partial_lowbits_dimm2, ulong* __restrict__ result) { int gid = blockIdx.x * blockDim.x + threadIdx.x; ulong info = docInfo[gid]; unsigned start = info >> 32; unsigned end = info & 0xFFFFFFFF; ulong total = 0; #pragma unroll 2 for (unsigned i=start; i<=end; i++) { ulong upper = partial_highbits_dimm1[i]; ulong lower = partial_lowbits_dimm2[i]; ulong sum = (upper << 32) | lower; total += sum; } result[gid] = total; } int main(int argc, char** argv) { Options options(argc, argv); // Optional argument to specify the problem size. if(options.has("n")) { total_num_docs = options.get<uint>("n"); } printf("Total number of documents: %u\n", total_num_docs); if(options.has("p")) { repeat = options.get<uint>("p"); } printf("Kernel execution count: %u\n", repeat); srand(2); printf("RAND_MAX: %d\n", RAND_MAX); printf("Allocating and setting up data\n"); setupData(); size_t local_size = (block_size / MANUAL_VECTOR); size_t global_size = total_doc_size / 2 / MANUAL_VECTOR / local_size; size_t local_size_reduction = block_size; size_t global_size_reduction = total_num_docs / block_size; uint* d_docWordFrequencies_dimm1; uint* d_docWordFrequencies_dimm2; uint* d_partialSums_dimm1; uint* d_partialSums_dimm2; ulong* d_profileWeights_dimm1; ulong* d_profileWeights_dimm2; uint* d_isWordInProfileHash; ulong* d_docInfo; ulong* d_profileScore; hipMalloc((void**)&d_docWordFrequencies_dimm1, sizeof(uint) * total_doc_size/2); hipMalloc((void**)&d_docWordFrequencies_dimm2, sizeof(uint) * total_doc_size/2); hipMalloc((void**)&d_partialSums_dimm1, sizeof(uint) * total_doc_size/(2*block_size)); hipMalloc((void**)&d_partialSums_dimm2, sizeof(uint) * total_doc_size/(2*block_size)); hipMalloc((void**)&d_profileWeights_dimm1, sizeof(ulong) * (1L << 24)); hipMalloc((void**)&d_profileWeights_dimm2, sizeof(ulong) * (1L << 24)); hipMalloc((void**)&d_isWordInProfileHash, sizeof(uint) * (1L << BLOOM_SIZE)); hipMalloc((void**)&d_docInfo, sizeof(ulong) * total_num_docs); hipMalloc((void**)&d_profileScore, sizeof(ulong) * total_num_docs); hipMemcpy(d_docWordFrequencies_dimm1, h_docWordFrequencies_dimm1, sizeof(uint) * total_doc_size/2, hipMemcpyHostToDevice); hipMemcpy(d_docWordFrequencies_dimm2, h_docWordFrequencies_dimm2, sizeof(uint) * total_doc_size/2, hipMemcpyHostToDevice); hipMemcpy(d_profileWeights_dimm1, h_profileWeights, sizeof(ulong) * (1L << 24), hipMemcpyHostToDevice); hipMemcpy(d_profileWeights_dimm2, h_profileWeights, sizeof(ulong) * (1L << 24), hipMemcpyHostToDevice); hipMemcpy(d_isWordInProfileHash, h_isWordInProfileHash, sizeof(uint) * (1L << BLOOM_SIZE), hipMemcpyHostToDevice); hipMemcpy(d_docInfo, h_docInfo, sizeof(ulong) * total_num_docs, hipMemcpyHostToDevice); const double start_time = getCurrentTimestamp(); for (uint i=0; i<repeat; i++) { hipLaunchKernelGGL(( compute), dim3(global_size), dim3(local_size), 0, 0, d_docWordFrequencies_dimm1, d_docWordFrequencies_dimm2, d_profileWeights_dimm1, d_profileWeights_dimm2, d_isWordInProfileHash, d_partialSums_dimm1, d_partialSums_dimm2); hipLaunchKernelGGL(( reduction), dim3(global_size_reduction), dim3(local_size_reduction), 0, 0, d_docInfo, d_partialSums_dimm1, d_partialSums_dimm2, d_profileScore); } hipDeviceSynchronize(); const double end_time = getCurrentTimestamp(); double kernelExecutionTime = (end_time - start_time)/repeat; printf("======================================================\n"); printf("Kernel Time = %f ms (averaged over %d times)\n", kernelExecutionTime * 1000.0f, repeat ); printf("Throughput = %f\n", total_doc_size_no_padding / kernelExecutionTime / 1.0e+6f ); hipMemcpy(h_profileScore, d_profileScore, sizeof(ulong) * total_num_docs, hipMemcpyDeviceToHost); hipFree(d_docWordFrequencies_dimm1); hipFree(d_docWordFrequencies_dimm2); hipFree(d_partialSums_dimm1); hipFree(d_partialSums_dimm2); hipFree(d_profileWeights_dimm1); hipFree(d_profileWeights_dimm2); hipFree(d_isWordInProfileHash); hipFree(d_docInfo); hipFree(d_profileScore); printf("Done\n"); runOnCPU(); }
122d57b808471fccb0502b337ce29e14e52e9a7c.cu
// Copyright (C) 2013-2018 Altera Corporation, San Jose, California, USA. All rights reserved. // Permission is hereby granted, free of charge, to any person obtaining a copy of this // software and associated documentation files (the "Software"), to deal in the Software // without ion, including without limitation the rights to use, copy, modify, merge, // publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to // whom the Software is furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in all copies or // substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES // OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT // HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, // WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR // OTHER DEALINGS IN THE SOFTWARE. // // This agreement shall be governed in all respects by the laws of the State of California and // by the laws of the United States of America. #include <stdio.h> #include <stdlib.h> #include <time.h> #include <string.h> #include <math.h> #include <cuda.h> #include "options.h" #include "scoped_ptrs.h" using namespace aocl_utils; #define MANUAL_VECTOR 8 #define NUM_THREADS_PER_WG 64 #define BLOOM_1 5 #define BLOOM_2 0x7FFFF #define BLOOM_SIZE 14 #define docEndingTag 0xFFFFFFFF // Params uint block_size = 64; uint repeat = 100; uint total_num_docs = 256*1024; uint total_doc_size = 0; uint total_doc_size_no_padding = 0; // Host Buffers scoped_aligned_ptr<uint> h_docWordFrequencies_dimm1; scoped_aligned_ptr<uint> h_docWordFrequencies_dimm2; scoped_aligned_ptr<ulong> h_profileWeights; scoped_aligned_ptr<ulong> h_docInfo; scoped_aligned_ptr<uint> h_isWordInProfileHash; scoped_aligned_ptr<uint> h_startingDocID; scoped_aligned_ptr<uint> h_numItemsPerThread; scoped_aligned_ptr<ulong> h_profileScore; scoped_aligned_ptr<uint> h_docSizes; static uint m_z = 1; static uint m_w = 1; static uint rand_desh() { m_z = 36969 * (m_z & 65535) + (m_z >> 16); m_w = 18000 * (m_w & 65535) + (m_w >> 16); return (m_z << 16) + m_w; } double sampleNormal() { double u = ((double) rand() / (RAND_MAX)) * 2 - 1; double v = ((double) rand() / (RAND_MAX)) * 2 - 1; double r = u * u + v * v; if (r == 0 || r > 1) return sampleNormal(); double c = sqrt(-2 * log(r) / r); return u * c; } #define DOC_LEN_SIGMA 100 #define AVG_DOC_LEN 350 uint get_doc_length() { int len = sampleNormal() * DOC_LEN_SIGMA + AVG_DOC_LEN; if (len < 10) { len = 10; } // Arbitray lower bound; return (uint) len; } // High-resolution timer. double getCurrentTimestamp() { #ifdef _WIN32 // Windows // Use the high-resolution performance counter. static LARGE_INTEGER ticks_per_second = {}; if(ticks_per_second.QuadPart == 0) { // First call - get the frequency. QueryPerformanceFrequency(&ticks_per_second); } LARGE_INTEGER counter; QueryPerformanceCounter(&counter); double seconds = double(counter.QuadPart) / double(ticks_per_second.QuadPart); return seconds; #else // Linux timespec a; clock_gettime(CLOCK_MONOTONIC, &a); return (double(a.tv_nsec) * 1.0e-9) + double(a.tv_sec); #endif } void setupData() { h_startingDocID.reset( total_num_docs ); h_numItemsPerThread.reset( total_num_docs ); h_profileScore.reset( total_num_docs ); h_docInfo.reset( total_num_docs ); h_docSizes.reset( total_num_docs ); total_doc_size = 0; total_doc_size_no_padding = 0; for (uint i=0; i<total_num_docs; i++) { uint unpadded_size = get_doc_length(); uint size = unpadded_size & (~(2*block_size-1)); if (unpadded_size & ((2*block_size-1))) size += 2*block_size; // Multiple of block_size h_startingDocID[i] = total_doc_size/2; h_numItemsPerThread[i] = size / (2*block_size); ulong start_line = total_doc_size / (2*block_size); ulong end_line = start_line + size / (2*block_size) - 1; total_doc_size += size; total_doc_size_no_padding += unpadded_size; h_docSizes[i] = unpadded_size; h_profileScore[i] = -1; h_docInfo[i] = (start_line << 32) | end_line; } h_isWordInProfileHash.reset( (1L << BLOOM_SIZE) ); h_docWordFrequencies_dimm1.reset( total_doc_size/2 ); h_docWordFrequencies_dimm2.reset( total_doc_size/2 ); printf("Creating Documents total_terms=%d (no_pad=%d)\n", total_doc_size, total_doc_size_no_padding); for (uint i=0; i<total_doc_size/2; i++) { h_docWordFrequencies_dimm1[i] = docEndingTag; h_docWordFrequencies_dimm2[i] = docEndingTag; } for (uint doci=0; doci < total_num_docs; doci++) { uint start = h_startingDocID[doci]; uint size = h_docSizes[doci]; for (uint i = 0; i < size/2; i++) { uint term = (rand_desh()%((1L << 24)-1)); uint freq = (rand_desh()%254)+1; h_docWordFrequencies_dimm1[start + i] = (term << 8) | freq; term = (rand_desh()%((1L << 24)-1)); freq = (rand_desh()%254)+1; h_docWordFrequencies_dimm2[start + i] = (term << 8) | freq; } if (size%2) { uint term = (rand_desh()%((1L << 24)-1)); uint freq = (rand_desh()%254)+1; h_docWordFrequencies_dimm1[start + size/2] = (term << 8) | freq; } } h_profileWeights.reset( (1L << 24) ); for (uint i=0; i<(1L << BLOOM_SIZE); i++) { h_isWordInProfileHash[i] = 0x0; } printf("Creating Profile\n"); for (uint i=0; i<(1L << 24); i++) { h_profileWeights[i] = 0; } for (uint i=0; i<16384; i++) { uint entry = (rand_desh()%(1<<24)); h_profileWeights[entry] = 10; uint hash1 = entry >> BLOOM_1; //this gives me the top 16 bits of the 24bit word id h_isWordInProfileHash[ hash1 >> 5 ] |= 1 << (hash1 & 0x1f); uint hash2 = entry & BLOOM_2; //this gives me the bottom 16 bits of the 24bit word id h_isWordInProfileHash[ hash2 >> 5 ] |= 1 << (hash2 & 0x1f); } } void runOnCPU() { // go through each document in turn, and compute the score scoped_aligned_ptr<ulong> cpu_profileScore; cpu_profileScore.reset( total_num_docs ); uint total = 0; uint falsies = 0; for (uint doci=0; doci < total_num_docs; doci++) { cpu_profileScore[doci] = 0.0; uint start = h_startingDocID[doci]; uint size = h_docSizes[doci]; for (uint i = 0; i < size/2 + (size%2); i++) { uint curr_entry = h_docWordFrequencies_dimm1[start + i]; uint frequency = curr_entry & 0x00ff; uint word_id = curr_entry >> 8; uint hash1 = word_id >> BLOOM_1; //this gives me the top 16 bits of the 24bit word id bool inh1 = h_isWordInProfileHash[ hash1 >> 5 ] & ( 1 << (hash1 & 0x1f)); uint hash2 = word_id & BLOOM_2; //this gives me the bottom 16 bits of the 24bit word id bool inh2 = h_isWordInProfileHash[ hash2 >> 5 ] & ( 1 << (hash2 & 0x1f)); if (inh1 && inh2) { total++; if (h_profileWeights[word_id] == 0) falsies++; cpu_profileScore[doci] += h_profileWeights[word_id] * (ulong)frequency; } } for (uint i = 0; i < size/2; i++) { uint curr_entry = h_docWordFrequencies_dimm2[start + i]; uint frequency = curr_entry & 0x00ff; uint word_id = curr_entry >> 8; uint hash1 = word_id >> BLOOM_1; //this gives me the top 16 bits of the 24bit word id bool inh1 = h_isWordInProfileHash[ hash1 >> 5 ] & ( 1 << (hash1 & 0x1f)); uint hash2 = word_id & BLOOM_2; //this gives me the bottom 16 bits of the 24bit word id bool inh2 = h_isWordInProfileHash[ hash2 >> 5 ] & ( 1 << (hash2 & 0x1f)); if (inh1 && inh2) { total++; if (h_profileWeights[word_id] == 0) falsies++; cpu_profileScore[doci] += h_profileWeights[word_id] * (ulong)frequency; } } } printf( "total_access = %d , falsies = %d, percentage = %f hit= %g\n", \ total, falsies, total * 1.0f / total_doc_size, (total-falsies)*1.0f/total_doc_size ); // compare the final scores for (uint doci = 0; doci < total_num_docs; doci++) { if (cpu_profileScore[doci] != h_profileScore[doci]) { printf("FAILED\n : doc[%d] score: CPU = %lu, Device = %lu\n", \ doci, cpu_profileScore[doci], h_profileScore[doci]); return; } } printf( "Verification: PASS\n" ); } __device__ ulong mulfp( ulong weight, uint freq ) { uint part1 = weight & 0xFFFFF; // lower 24-bits of weight uint part2 = (weight >> 24) & 0xFFFF; // next 16-bits uint res1 = part1 * freq; uint res2 = part2 * freq; return (ulong)res1 + (((ulong)res2) << 24); } __global__ void compute ( const uint* __restrict__ docWordFrequencies_dimm1, const uint* __restrict__ docWordFrequencies_dimm2, const ulong*__restrict__ profileWeights_dimm1, const ulong*__restrict__ profileWeights_dimm2, const uint* __restrict__ isWordInProfileHash, uint* __restrict__ profileScorePerGroup_highbits_dimm1, uint* __restrict__ profileScorePerGroup_lowbits_dimm2 ) { uint curr_entry[MANUAL_VECTOR]; uint word_id[MANUAL_VECTOR]; uint freq[MANUAL_VECTOR]; uint hash1[MANUAL_VECTOR]; uint hash2[MANUAL_VECTOR]; bool is_end[MANUAL_VECTOR]; bool make_access[MANUAL_VECTOR]; __shared__ ulong partial[NUM_THREADS_PER_WG/MANUAL_VECTOR]; int gid = blockIdx.x * blockDim.x + threadIdx.x; ulong sum = 0; //#pragma unroll for (uint i=0; i<MANUAL_VECTOR; i++) { curr_entry[i] = docWordFrequencies_dimm1[gid*MANUAL_VECTOR + i]; freq[i] = curr_entry[i] & 0xff; word_id[i] = curr_entry[i] >> 8; is_end[i] = curr_entry[i] == docEndingTag; hash1[i] = word_id[i] >> BLOOM_1; hash2[i] = word_id[i] & BLOOM_2; make_access[i] = !is_end[i] && ((isWordInProfileHash[ hash1[i] >> 5 ] >> (hash1[i] & 0x1f)) & 0x1) && ((isWordInProfileHash[ hash2[i] >> 5 ] >> (hash2[i] & 0x1f)) & 0x1); if (make_access[i]) { sum += mulfp(profileWeights_dimm1[word_id[i]],freq[i]); } } //#pragma unroll for (uint i=0; i<MANUAL_VECTOR; i++) { curr_entry[i] = docWordFrequencies_dimm2[gid*MANUAL_VECTOR + i]; freq[i] = curr_entry[i] & 0xff; word_id[i] = curr_entry[i] >> 8; is_end[i] = curr_entry[i] == docEndingTag; hash1[i] = word_id[i] >> BLOOM_1; hash2[i] = word_id[i] & BLOOM_2; make_access[i] = !is_end[i] && ((isWordInProfileHash[ hash1[i] >> 5 ] >> (hash1[i] & 0x1f)) & 0x1) && ((isWordInProfileHash[ hash2[i] >> 5 ] >> (hash2[i] & 0x1f)) & 0x1); if (make_access[i]) { sum += mulfp(profileWeights_dimm2[word_id[i]],freq[i]); } } partial[threadIdx.x] = sum; __syncthreads(); if (threadIdx.x == 0) { ulong4 res= *(ulong4*)&partial[0]; ulong4 res2= *(ulong4*)&partial[4]; ulong final_result = res.x + res.y + res.z + res.w + res2.x + res2.y + res2.z + res2.w; profileScorePerGroup_highbits_dimm1[blockIdx.x] = (uint) (final_result >> 32); profileScorePerGroup_lowbits_dimm2[blockIdx.x] = (uint) (final_result & 0xFFFFFFFF); } } __global__ void reduction( const ulong* __restrict__ docInfo, const uint* __restrict__ partial_highbits_dimm1, const uint* __restrict__ partial_lowbits_dimm2, ulong* __restrict__ result) { int gid = blockIdx.x * blockDim.x + threadIdx.x; ulong info = docInfo[gid]; unsigned start = info >> 32; unsigned end = info & 0xFFFFFFFF; ulong total = 0; #pragma unroll 2 for (unsigned i=start; i<=end; i++) { ulong upper = partial_highbits_dimm1[i]; ulong lower = partial_lowbits_dimm2[i]; ulong sum = (upper << 32) | lower; total += sum; } result[gid] = total; } int main(int argc, char** argv) { Options options(argc, argv); // Optional argument to specify the problem size. if(options.has("n")) { total_num_docs = options.get<uint>("n"); } printf("Total number of documents: %u\n", total_num_docs); if(options.has("p")) { repeat = options.get<uint>("p"); } printf("Kernel execution count: %u\n", repeat); srand(2); printf("RAND_MAX: %d\n", RAND_MAX); printf("Allocating and setting up data\n"); setupData(); size_t local_size = (block_size / MANUAL_VECTOR); size_t global_size = total_doc_size / 2 / MANUAL_VECTOR / local_size; size_t local_size_reduction = block_size; size_t global_size_reduction = total_num_docs / block_size; uint* d_docWordFrequencies_dimm1; uint* d_docWordFrequencies_dimm2; uint* d_partialSums_dimm1; uint* d_partialSums_dimm2; ulong* d_profileWeights_dimm1; ulong* d_profileWeights_dimm2; uint* d_isWordInProfileHash; ulong* d_docInfo; ulong* d_profileScore; cudaMalloc((void**)&d_docWordFrequencies_dimm1, sizeof(uint) * total_doc_size/2); cudaMalloc((void**)&d_docWordFrequencies_dimm2, sizeof(uint) * total_doc_size/2); cudaMalloc((void**)&d_partialSums_dimm1, sizeof(uint) * total_doc_size/(2*block_size)); cudaMalloc((void**)&d_partialSums_dimm2, sizeof(uint) * total_doc_size/(2*block_size)); cudaMalloc((void**)&d_profileWeights_dimm1, sizeof(ulong) * (1L << 24)); cudaMalloc((void**)&d_profileWeights_dimm2, sizeof(ulong) * (1L << 24)); cudaMalloc((void**)&d_isWordInProfileHash, sizeof(uint) * (1L << BLOOM_SIZE)); cudaMalloc((void**)&d_docInfo, sizeof(ulong) * total_num_docs); cudaMalloc((void**)&d_profileScore, sizeof(ulong) * total_num_docs); cudaMemcpy(d_docWordFrequencies_dimm1, h_docWordFrequencies_dimm1, sizeof(uint) * total_doc_size/2, cudaMemcpyHostToDevice); cudaMemcpy(d_docWordFrequencies_dimm2, h_docWordFrequencies_dimm2, sizeof(uint) * total_doc_size/2, cudaMemcpyHostToDevice); cudaMemcpy(d_profileWeights_dimm1, h_profileWeights, sizeof(ulong) * (1L << 24), cudaMemcpyHostToDevice); cudaMemcpy(d_profileWeights_dimm2, h_profileWeights, sizeof(ulong) * (1L << 24), cudaMemcpyHostToDevice); cudaMemcpy(d_isWordInProfileHash, h_isWordInProfileHash, sizeof(uint) * (1L << BLOOM_SIZE), cudaMemcpyHostToDevice); cudaMemcpy(d_docInfo, h_docInfo, sizeof(ulong) * total_num_docs, cudaMemcpyHostToDevice); const double start_time = getCurrentTimestamp(); for (uint i=0; i<repeat; i++) { compute<<<global_size, local_size>>>( d_docWordFrequencies_dimm1, d_docWordFrequencies_dimm2, d_profileWeights_dimm1, d_profileWeights_dimm2, d_isWordInProfileHash, d_partialSums_dimm1, d_partialSums_dimm2); reduction<<<global_size_reduction, local_size_reduction>>>( d_docInfo, d_partialSums_dimm1, d_partialSums_dimm2, d_profileScore); } cudaDeviceSynchronize(); const double end_time = getCurrentTimestamp(); double kernelExecutionTime = (end_time - start_time)/repeat; printf("======================================================\n"); printf("Kernel Time = %f ms (averaged over %d times)\n", kernelExecutionTime * 1000.0f, repeat ); printf("Throughput = %f\n", total_doc_size_no_padding / kernelExecutionTime / 1.0e+6f ); cudaMemcpy(h_profileScore, d_profileScore, sizeof(ulong) * total_num_docs, cudaMemcpyDeviceToHost); cudaFree(d_docWordFrequencies_dimm1); cudaFree(d_docWordFrequencies_dimm2); cudaFree(d_partialSums_dimm1); cudaFree(d_partialSums_dimm2); cudaFree(d_profileWeights_dimm1); cudaFree(d_profileWeights_dimm2); cudaFree(d_isWordInProfileHash); cudaFree(d_docInfo); cudaFree(d_profileScore); printf("Done\n"); runOnCPU(); }
a854f96d427f0f08e465dd930dfa74d96e47dc95.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "saber/funcs/impl/cuda/saber_crop.h" #include "hip/hip_fp16.h" namespace anakin { namespace saber { template <typename Dtype> __global__ void ker_crop_fwd(Dtype * out_data, \ const Dtype* in_data, const int in_n_stride, const int in_c_stride, const int in_h_stride, const int in_w_stride, const int out_n_stride, const int out_c_stride, const int out_h_stride, const int out_w_stride, const int out_n, const int out_c, const int out_h, const int out_w, const int num_threads) { CUDA_KERNEL_LOOP(tid, num_threads){ int n = (tid / out_n_stride) % out_n; int c = (tid / out_c_stride) % out_c; int h = (tid / out_h_stride) % out_h; int w = (tid / out_w_stride) % out_w; int in_offset = n * in_n_stride + c * in_c_stride + h * in_h_stride + w * in_w_stride; out_data[tid] = in_data[in_offset]; } } template <> SaberStatus SaberCrop<NV,AK_FLOAT>::dispatch(const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, CropParam<NV>& param) { const OpDataType* in_data = (const OpDataType*)inputs[0]->data(); OpDataType* out_data = (OpDataType*)outputs[0]->mutable_data(); hipStream_t cuda_stream = this->_ctx->get_compute_stream(); int count = outputs[0]->valid_size(); int out_n = outputs[0]->num(); int out_c = outputs[0]->channel(); int out_h = outputs[0]->height(); int out_w = outputs[0]->width(); if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()) { hipLaunchKernelGGL(( ker_crop_fwd<OpDataType>)\ , dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, \ out_data, in_data + _img_offset, \ _in_n_stride, _in_c_stride, _in_h_stride, _in_w_stride,\ _out_n_stride, _out_c_stride, _out_h_stride, _out_w_stride,\ out_n, out_c, out_h, out_w, count); } return SaberSuccess; } DEFINE_OP_TEMPLATE(SaberCrop, CropParam, NV, AK_HALF); DEFINE_OP_TEMPLATE(SaberCrop, CropParam, NV, AK_INT8); } }
a854f96d427f0f08e465dd930dfa74d96e47dc95.cu
#include "saber/funcs/impl/cuda/saber_crop.h" #include "cuda_fp16.h" namespace anakin { namespace saber { template <typename Dtype> __global__ void ker_crop_fwd(Dtype * out_data, \ const Dtype* in_data, const int in_n_stride, const int in_c_stride, const int in_h_stride, const int in_w_stride, const int out_n_stride, const int out_c_stride, const int out_h_stride, const int out_w_stride, const int out_n, const int out_c, const int out_h, const int out_w, const int num_threads) { CUDA_KERNEL_LOOP(tid, num_threads){ int n = (tid / out_n_stride) % out_n; int c = (tid / out_c_stride) % out_c; int h = (tid / out_h_stride) % out_h; int w = (tid / out_w_stride) % out_w; int in_offset = n * in_n_stride + c * in_c_stride + h * in_h_stride + w * in_w_stride; out_data[tid] = in_data[in_offset]; } } template <> SaberStatus SaberCrop<NV,AK_FLOAT>::dispatch(const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, CropParam<NV>& param) { const OpDataType* in_data = (const OpDataType*)inputs[0]->data(); OpDataType* out_data = (OpDataType*)outputs[0]->mutable_data(); cudaStream_t cuda_stream = this->_ctx->get_compute_stream(); int count = outputs[0]->valid_size(); int out_n = outputs[0]->num(); int out_c = outputs[0]->channel(); int out_h = outputs[0]->height(); int out_w = outputs[0]->width(); if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()) { ker_crop_fwd<OpDataType>\ <<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>(\ out_data, in_data + _img_offset, \ _in_n_stride, _in_c_stride, _in_h_stride, _in_w_stride,\ _out_n_stride, _out_c_stride, _out_h_stride, _out_w_stride,\ out_n, out_c, out_h, out_w, count); } return SaberSuccess; } DEFINE_OP_TEMPLATE(SaberCrop, CropParam, NV, AK_HALF); DEFINE_OP_TEMPLATE(SaberCrop, CropParam, NV, AK_INT8); } }
e374dd6688e62c54d4ecfe7a9cb0ab3161a03d7b.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <limits> #ifdef __CUDA_ARCH__ typedef int value_type; #else typedef double value_type; #endif constexpr value_type val = 12; __host__ __device__ value_type get_val() { return value_type(val); } __global__ void get_device_data_kernel(double* pBuf) { *pBuf = double(get_val()); } double get_device_data() { double h, *pD; auto err = hipMalloc((void**) &pD, sizeof(double)); if (err) return std::numeric_limits<double>::infinity(); hipLaunchKernelGGL(( get_device_data_kernel), dim3(1), dim3(1), 0, 0, pD); hipDeviceSynchronize(); err = hipMemcpy(&h, pD, sizeof(double), hipMemcpyDeviceToHost); if (err) return std::numeric_limits<double>::infinity(); hipDeviceSynchronize(); err = hipFree(pD); if (err) return std::numeric_limits<double>::infinity(); return h; } double get_host_data() { return get_val(); } #include <iostream> int main(int, char**) { std::cout << "Host copy " << get_host_data() << "\n"; std::cout << "Device copy " << get_device_data() << "\n"; return 0; }
e374dd6688e62c54d4ecfe7a9cb0ab3161a03d7b.cu
#include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <limits> #ifdef __CUDA_ARCH__ typedef int value_type; #else typedef double value_type; #endif constexpr value_type val = 12; __host__ __device__ value_type get_val() { return value_type(val); } __global__ void get_device_data_kernel(double* pBuf) { *pBuf = double(get_val()); } double get_device_data() { double h, *pD; auto err = cudaMalloc((void**) &pD, sizeof(double)); if (err) return std::numeric_limits<double>::infinity(); get_device_data_kernel<<<1, 1>>>(pD); cudaDeviceSynchronize(); err = cudaMemcpy(&h, pD, sizeof(double), cudaMemcpyDeviceToHost); if (err) return std::numeric_limits<double>::infinity(); cudaDeviceSynchronize(); err = cudaFree(pD); if (err) return std::numeric_limits<double>::infinity(); return h; } double get_host_data() { return get_val(); } #include <iostream> int main(int, char**) { std::cout << "Host copy " << get_host_data() << "\n"; std::cout << "Device copy " << get_device_data() << "\n"; return 0; }
9dd51eb28a8419a9eced154fce909cfe9faa106a.hip
// !!! This is a file automatically generated by hipify!!! #include"Imgsimulation.h" #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <hip/hip_runtime.h> #include <hip/device_functions.h> #include <opencv2/opencv.hpp> #include <iostream> #include <string.h> #include <fstream> #include <string> #include <io.h> #include <vector> #include <time.h> #include <thread> #include <mutex> #include <atomic> #include <stdio.h> #include <algorithm> #include "Thread.h" #include "ThreadPoolExecutor.h" #include "hip/hip_runtime_api.h" #include <helper_cuda.h>// #include <helper_string.h> #include <npp.h> #include "Endianess.h" #include "useful.h" #include "kernel.h" #include <Windows.h> #include <GdiPlus.h> #pragma comment( lib, "GdiPlus.lib" ) using namespace Gdiplus; using namespace std; using namespace cv; double Timedatarefresh = 0.5; bool SimulationSuccessFlaf = false; // std::mutex gExtrackPointLock;//RRec std::mutex gComressReadDataLock; std::mutex compress_process_lock;// std::mutex compress_write_lock;// std::mutex compress_writeCPU_lock;// // #define ExtractPointThreads 2 #define CompressionThreads 2 #define CUDAStreams 5 #define GRAYCompressStreams 5 //GB #define DiskRemainingSpaceThreshold 50 //blockthread int gHostImgblock = ExtractPointThreads * CUDAStreams; int gDeviceCount; int gHostPathImgNumber; dim3 blocks; //cuda dim3 threads(8, 8); //block // Parameter gStructVarible{ NULL,NULL,NULL,8,1,5120,5120,5120,60,30,300,8,640,640,0,99999,2000,5,0,0 ,4 }; // Infomation SignPoint; // HardwareInfo HardwareParam;// #define Pretreatment #ifdef Pretreatment #define ReadImageNumber 250 #endif // Pretreatment unsigned char* gHostImage[250] = { NULL }; unsigned char* gHostColorImage[250] = { NULL }; //-------------------------Model-----------------------------// typedef struct { short RecXmin; short RecYmin; short RecXmax; short RecYmax; }RecData;// vector<RecData> gHostRecData;//CPU int gRecNum;// int gSingleImgRecNum;// /*------------------------------------------------*/ struct CircleInfo//(24) { short index; short length; short area; double xpos; double ypos; }; // unsigned char * OnlineRefreshIMG; // int BufferBlockIndex[6] = { 0 };//600 int Bufferlength;//() vector<int>gWorkingGpuId;// bool ExtractPointInitialSuccessFlag[3] = { false };// bool ExtractPointSuccess = false;// // unsigned char * gRecupImgData = NULL;// bool DevUpdateRec[3] = { false };//true CPUGPUCPUGPU bool HostUpdateRec = false; //true bool RecupdataInitialSuccessFlag = false; // unsigned char * gCameraDress=NULL; unsigned char * gCameraBuffer[6] = { NULL }; bool CameraBufferFull[6] = { false };// //() unsigned char * gHostBuffer[4] = { NULL }; bool PageLockBufferEmpty[4] = { true }; bool PageLockBufferWorking[4] = { false }; int PageLockBufferStartIndex[4]; // unsigned char *gHostComressiongBuffer[4] = { NULL }; bool gComressionBufferEmpty[4] = { true }; bool gComressionBufferWorking[4] = { false }; int gComressionBufferStartIndex[4]; //-----------------------------------------------------------------------------------------------------// /*********************************************************************************************** Function: RGBtoYUV( Description: .bmpRGB Calls: Input: unsigned char* dataIn int imgHeight int imgWidth unsigned int nPitch Output: unsigned char* Y, unsigned char* Cb, unsigned char* Cr ************************************************************************************************/ __global__ void RGBtoYUV(unsigned char* dataIn, unsigned char* Y, unsigned char* Cb, unsigned char* Cr, int imgHeight, int imgWidth, int nPitch, int old_Height, int old_Width) { int xIndex = threadIdx.x + blockIdx.x * blockDim.x; int yIndex = threadIdx.y + blockIdx.y * blockDim.y; if (xIndex < old_Width && yIndex < old_Height) { unsigned char blue = dataIn[yIndex * old_Width * 3 + xIndex * 3 + 2]; unsigned char green = dataIn[yIndex * old_Width * 3 + xIndex * 3 + 1]; unsigned char red = dataIn[yIndex * old_Width * 3 + xIndex * 3]; unsigned char y = 0.299 * red + 0.587 * green + 0.114 * blue; unsigned char cb = -0.1687 * red - 0.3313 * green + 0.5 * blue + 127; unsigned char cr = 0.5 * red - 0.4187 * green - 0.0813 * blue + 127; Y[yIndex * nPitch + xIndex] = y; Cb[yIndex * nPitch + xIndex] = cb; Cr[yIndex * nPitch + xIndex] = cr; } } /*--------------------------------needmemory-------------------------------*/ struct needmemory { Npp16s *pDCT[3] = { 0,0,0 }; //GPUDCT Npp32s DCTStep[3]; //DCT NppiDCTState *pDCTState; Npp8u *pDImage[3] = { 0,0,0 }; //GPUYCbCr Npp32s DImageStep[3]; //YCbCr Npp8u *pDScan; //GPU Npp32s nScanSize; //pDScan Npp8u *pDJpegEncoderTemp; //GPU size_t nTempSize; // pDJpegEncoderTemp Npp32s nScanLength; //pDScan Npp8u *hpCodesDC[3]; //DCAC Npp8u *hpCodesAC[3]; Npp8u *hpTableDC[3]; Npp8u *hpTableAC[3]; }; /*--------------------------------needdata-------------------------------*/ struct needdata { NppiSize oDstImageSize; //jpg NppiSize aDstSize[3]; // Npp8u *pdQuantizationTables; //GPU NppiEncodeHuffmanSpec *apDHuffmanDCTable[3]; // GPU NppiEncodeHuffmanSpec *apDHuffmanACTable[3]; // GPU }; struct Pk { int Offest;// int FileLen;// //int FileNameLen;// //char* FileName;// int FileNumber; }; class Package { public: //Package(const char* Fname, int FileNum) :Fname(Fname), FileNum(FileNum) Package(const char* Fname) :Fname(Fname) { table_scale = 0; concordancesize = 0; head_cache = new char[20000]; head_bias = 0; } ~Package() { delete[] head_cache; delete[]concordance; } //void Form_one_head(int index, char* Filename, int FileLen); void Package_init(int Num) { FileNum = Num; concordance = new Pk[FileNum]; } void Form_one_head(int index, int one_picture_index, int FileLen); void UnPack(const char* name, const char* save_path); // //void Form_total_head(); void Form_total_head(int one_picture_width, int one_picture_height, int picture_number, int picture_index); Pk* concordance; fstream file; int concordancesize; // int FileNum; // const char* Fname; // char* head_cache; // int head_bias; // int table_scale; }; //void Package::Form_one_head(int index, char* Filename, int FileLen) void Package::Form_one_head(int index, int one_picture_index, int FileLen) { if (index == 0) // { concordance[index].Offest = 0; } else { concordance[index].Offest = concordance[index - 1].Offest + concordance[index - 1].FileLen; } //table_scale = table_scale + strlen(Filename) + 1 + 3 * sizeof(int); // table_scale = table_scale + 3 * sizeof(int); //concordance[index].FileNameLen = strlen(Filename) + 1; // //concordance[index].FileName = new char[50]; //strcpy(concordance[index].FileName, Filename); //cout << concordance[index].FileName << endl; concordance[index].FileNumber = one_picture_index; concordance[index].FileLen = FileLen; } void Package::Form_total_head(int one_picture_width, int one_picture_height, int picture_number, int picture_index) { concordancesize = table_scale + 6 * sizeof(int); // memcpy(head_cache, (char*)&concordancesize, sizeof(int)); head_bias += sizeof(int); memcpy(head_cache + head_bias, (char*)&FileNum, sizeof(int)); head_bias += sizeof(int); memcpy(head_cache + head_bias, (char*)&one_picture_width, sizeof(int)); head_bias += sizeof(int); memcpy(head_cache + head_bias, (char*)&one_picture_height, sizeof(int)); head_bias += sizeof(int); memcpy(head_cache + head_bias, (char*)&picture_number, sizeof(int)); head_bias += sizeof(int); memcpy(head_cache + head_bias, (char*)&picture_index, sizeof(int)); head_bias += sizeof(int); //cout << FileNum << endl; for (int i = 0; i < FileNum; ++i) { memcpy(head_cache + head_bias, (char*)&concordance[i].Offest, sizeof(int)); head_bias += sizeof(int); memcpy(head_cache + head_bias, (char*)&concordance[i].FileLen, sizeof(int)); head_bias += sizeof(int); //memcpy(head_cache + head_bias, &concordance[i].FileNameLen, sizeof(int)); memcpy(head_cache + head_bias, &concordance[i].FileNumber, sizeof(int)); head_bias += sizeof(int); //memcpy(head_cache + head_bias, concordance[i].FileName, concordance[i].FileNameLen); //head_bias += concordance[i].FileNameLen; } } void Package::UnPack(const char *name, const char* save_path) // { int one_picture_width, one_picture_height, picture_number, picture_index; file.open(name, ios::in | ios::binary); file.read((char*)&concordancesize, sizeof(int)); // file.read((char*)&FileNum, sizeof(int)); // file.read((char*)&one_picture_width, sizeof(int)); file.read((char*)&one_picture_height, sizeof(int)); file.read((char*)&picture_number, sizeof(int)); file.read((char*)& picture_index, sizeof(int)); file.seekg(8 + 4 * 4, ios::beg); concordance = new Pk[FileNum]; for (int i = 0; i < FileNum; ++i) // { file.read((char*)&concordance[i].Offest, sizeof(int)); // file.read((char*)&concordance[i].FileLen, sizeof(int)); // file.read((char*)&concordance[i].FileNumber, sizeof(int)); //file.read((char*)&concordance[i].FileNameLen, sizeof(int)); // //concordance[i].FileName = new char[concordance[i].FileNameLen]; //memset(concordance[i].FileName, 0, sizeof(char)*concordance[i].FileNameLen);// //file.read(concordance[i].FileName, concordance[i].FileNameLen);// } fstream file1; for (int i = 0; i < FileNum; ++i) { char arr[1024] = { 0 }; //sprintf(arr, "%s", concordance[i].FileName); //map sprintf_s(arr, "%s\\%d.jpg", save_path, concordance[i].FileNumber); file1.open(arr, ios::out | ios::binary); file.seekg(concordancesize + concordance[i].Offest, ios::beg); // for (int j = 0; j < concordance[i].FileLen; ++j) //copy { file1.put(file.get()); } file1.close(); Mat img = imread(arr, IMREAD_UNCHANGED); for (int j = 0; j < picture_number; j++) { char one_image_save_path[50]; sprintf_s(one_image_save_path, "%s\\%d.jpg", save_path, picture_index + i * picture_number + j); cv::Rect rect(0, j * one_picture_height / picture_number, one_picture_width, one_picture_height / picture_number); Mat image_cut = Mat(img, rect); Mat image_copy = image_cut.clone(); imwrite(one_image_save_path, image_copy); } //char one_image_save_path[50]; //sprintf_s(one_image_save_path, "%s\\%d.bin", save_path, picture_index); } file.close(); //for (int i = 0; i < FileNum; ++i)// //{ //delete[]concordance[i].FileName; //} } unsigned char* gpHudata; // unsigned char* gpHvdata; //-----------------------------------------------------------------------------// /************************************************* : ColorMakeBorder // : WidthWidth128 . int imgWidth = (width + 127) / 128 * 128 // const unsigned char *colorimg colorimg24 . Parameter devpardevpar // unsigned char *dstdstWidth0 // : // : . block(128,1,1) GridImgMakeborderWidth/128, ImgHeight,1 . GPU// *************************************************/ __global__ void ColorMakeBorder(const unsigned char * colorimg, unsigned char *dst, Parameter devpar) { const int Id_y = threadIdx.x + blockIdx.x*blockDim.x;// const int Id_x = blockIdx.y;// int b = 0; int g = 0; int r = 0; if (Id_y < devpar.ImgWidth) { b = colorimg[3 * Id_y + Id_x * devpar.ImgWidth *devpar.ImgChannelNum]; g = colorimg[3 * Id_y + 1 + Id_x * devpar.ImgWidth * devpar.ImgChannelNum]; r = colorimg[3 * Id_y + 2 + Id_x * devpar.ImgWidth * devpar.ImgChannelNum]; dst[Id_y + Id_x * devpar.ImgMakeborderWidth] = unsigned char((r * 30 + g * 59 + b * 11 + 50) / 100); } }; /************************************************* : GrayMakeBorder // : WidthWidth128 . int imgWidth = (width + 127) / 128 * 128 // const unsigned char *src Src . Parameter devpardevpar // unsigned char *dstdstWidth0 // : // : . block(128,1,1) GridImgMakeborderWidth/128, ImgHeight,1 . GPU// *************************************************/ __global__ void GrayMakeBorder(const unsigned char *src, unsigned char *dst, Parameter devpar) { const int Id_y = threadIdx.x + blockIdx.x*blockDim.x;// const int Id_x = blockIdx.y;// if (Id_y < devpar.ImgWidth) { dst[Id_y + Id_x * devpar.ImgMakeborderWidth] = src[Id_y + Id_x * devpar.ImgWidth]; } } /************************************************* : Binarization // : Parameter devpar . 2550 // unsigned char *psrcgray . Parameter devpar // unsigned char *pdst2val . unsigned char *pdstcounter // : // : . block(128,1,1) GridImgMakeborderWidth/128, ImgHeight,1 . GPU // *************************************************/ __global__ void Binarization(unsigned char *psrcgray, unsigned char *pdst2val, unsigned char *pdstcounter, Parameter devpar) { const int Id = threadIdx.x + (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x;// int temp = int(psrcgray[Id]);// if (Id < devpar.ImgMakeborderWidth * devpar.ImgHeight*devpar.PictureNum)// { pdst2val[Id] = unsigned char(255 * int(temp>devpar.Threshold));// pdstcounter[Id] = unsigned char(255 * int(temp>devpar.Threshold)); } } /************************************************* : Dilation // : 800255 // unsigned char *psrc . Parameter devpar // unsigned char *pdst // : // : . block(128,1,1) GridImgMakeborderWidth/128, ImgHeight,1 . GPU // *************************************************/ __global__ void Dilation(unsigned char *psrc, unsigned char *pdst, Parameter devpar) { const int Id_y = threadIdx.x + blockIdx.x *blockDim.x;//Id_y const int Id_x = blockIdx.y;//Id_x int temp;// if (Id_y> 1 && Id_y < (devpar.ImgMakeborderWidth - 1) && Id_x>0 && Id_x < devpar.PictureNum*devpar.ImgHeight - 1) { if (psrc[Id_y + Id_x * devpar.ImgMakeborderWidth] == 0) { temp = int(psrc[Id_y - 1 + (Id_x - 1)* devpar.ImgMakeborderWidth]) + int(psrc[Id_y + (Id_x - 1)* devpar.ImgMakeborderWidth]) + int(psrc[Id_y + 1 + (Id_x - 1)* devpar.ImgMakeborderWidth]) + int(psrc[Id_y - 1 + Id_x * devpar.ImgMakeborderWidth]) + int(psrc[Id_y + 1 + Id_x * devpar.ImgMakeborderWidth]) + int(psrc[Id_y - 1 + (Id_x + 1)* devpar.ImgMakeborderWidth]) + int(psrc[Id_y + (Id_x + 1)* devpar.ImgMakeborderWidth]) + int(psrc[Id_y + 1 + (Id_x + 1)* devpar.ImgMakeborderWidth]); pdst[Id_y + Id_x * devpar.ImgMakeborderWidth] = temp > 0 ? 255 : 0;// } } } /************************************************* : Erosion // : 4255400 // unsigned char *psrc . Parameter devpar // unsigned char *pdst . // : // : . block(128,1,1) GridImgMakeborderWidth/128, ImgHeight,1 . GPU // *************************************************/ __global__ void Erosion(unsigned char *psrc, unsigned char *pdst, Parameter devpar) { const int Id_y = threadIdx.x + blockIdx.x *blockDim.x;//Id_y const int Id_x = blockIdx.y;//Id_x int temp;//4 //4dst if (Id_y > 0 && Id_y < (devpar.ImgMakeborderWidth - 1) && Id_x>0 && Id_x <devpar.ImgHeight*devpar.PictureNum - 1) { if (psrc[Id_y + Id_x * devpar.ImgMakeborderWidth] != 0) { temp = int(psrc[Id_y + (Id_x - 1)*devpar.ImgMakeborderWidth]) + int(psrc[Id_y - 1 + Id_x * devpar.ImgMakeborderWidth]) + int(psrc[Id_y + 1 + Id_x * devpar.ImgMakeborderWidth]) + int(psrc[Id_y + (Id_x + 1)*devpar.ImgMakeborderWidth]);//4 pdst[Id_y + Id_x * devpar.ImgMakeborderWidth] = temp >= 1020 ? 0 : 255;// } } } /************************************************* : GetCounter // : 8 // . unsigned char *psrc . Parameter devpar // short *c_length 0 . x_miny_minx_maxy_max . x_miny_minx_maxy_max . 0 : // : . GPU . block(128,1,1)Grid(Devpar.ColThreadNum / 128, Devpar.RowThreadNum, 1) . ColThreadNumRowThreadNumColThreadNum . 128 . PicBlockSizePicBlockSizePicBlockSize81632 *************************************************/ __global__ void GetCounter(unsigned char *src, short *c_length, short* x_min, short * y_min, short* x_max, short *y_max, Parameter devpar) { /*,0451*/ const int direction_y[8] = { 1,1,0,-1,-1,-1,0,1 }; const int direction_x[8] = { 0,1,1,1,0,-1,-1,-1 }; //short Picblocksize = devpar.PicBlockSize;// /**/ const int y = (blockIdx.x*blockDim.x + threadIdx.x) * devpar.PicBlockSize;//y const int x = blockIdx.y * devpar.PicBlockSize;//x const int Id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*blockDim.x*gridDim.x;// /**/ c_length[Id] = 0; x_min[Id] = 0; x_max[Id] = 0; y_min[Id] = 0; y_max[Id] = 0; bool SuccessFlag = false;//true /**/ short Rec_xmx = 0, Rec_xmm = 0; short Rec_ymx = 0, Rec_ymm = 0; if ((y / devpar.PicBlockSize) < (devpar.ImgWidth / devpar.PicBlockSize) && (x / devpar.PicBlockSize) < (devpar.ImgHeight*devpar.PictureNum / devpar.PicBlockSize))// { for (int i = x; i < (x + devpar.PicBlockSize); i++) { for (int j = y; j < (y + devpar.PicBlockSize); j++) { if (255 == src[j + i * devpar.ImgMakeborderWidth]) { /**/ Rec_ymx = j; Rec_ymm = j; Rec_xmx = i; Rec_xmm = i; /**/ short root_x = i;// short root_y = j;// short counts;//8 short curr_d = 0;//0-78 /**/ for (short cLengthCount = 2; cLengthCount < devpar.LengthMax; cLengthCount++)// { /**/ short boot_x = root_x; short boot_y = root_y; /**/ Rec_xmx = Rec_xmx > root_x ? Rec_xmx : root_x; Rec_ymx = Rec_ymx > root_y ? Rec_ymx : root_y; Rec_xmm = Rec_xmm < root_x ? Rec_xmm : root_x; Rec_ymm = Rec_ymm < root_y ? Rec_ymm : root_y; /**/ for (counts = 0; counts < 8; counts++) { /**/ curr_d -= curr_d >= 8 ? 8 : 0; curr_d += curr_d < 0 ? 8 : 0; /*7()count=6*/ if (cLengthCount >2 && (counts == 6)) { curr_d++; continue; } /*boot*/ boot_x = root_x + direction_x[curr_d];// boot_y = root_y + direction_y[curr_d];// /**/ if (boot_x < 0 || boot_x >= devpar.ImgHeight*devpar.PictureNum || boot_y < 0 || boot_y >= devpar.ImgWidth) { curr_d++; continue; } /**/ if (255 == src[boot_y + boot_x * devpar.ImgMakeborderWidth]) { curr_d -= 2; // root_x = boot_x;// root_y = boot_y; break; } curr_d++; } // end for /**/ if (8 == counts || (root_x >= (x + devpar.PicBlockSize) && root_y >= (y + devpar.PicBlockSize))) { break; } /**/ if (root_y == j && root_x == i) { x_min[Id] = Rec_xmm; x_max[Id] = Rec_xmx; y_min[Id] = Rec_ymm; y_max[Id] = Rec_ymx; c_length[Id] = cLengthCount; SuccessFlag = true; break; }//if }//for }//if if (SuccessFlag) break; j = Rec_ymx > j ? Rec_ymx : j;// }//for if (SuccessFlag) break; i = Rec_xmx > i ? Rec_xmx : i;// }//for } }// /**/ __global__ void SelectTrueBox(unsigned char *ImgCounter, short *clength, short* Recxmm, short * Recymm, short* Recxmx, short *Recymx, short*index, Parameter devpar) { const int Id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*blockDim.x*gridDim.x; index[Id] = 0; short temp1 = 0; short yMidPos = 0; short xMidPos = 0; short Rxmm = Recxmm[Id]; short Rymm = Recymm[Id]; short RecBoxHeight = Recxmx[Id] - Recxmm[Id]; short RecBoxWidth = Recymx[Id] - Recymm[Id]; if (clength[Id] > devpar.LengthMin) { if ((float(RecBoxHeight) / float(RecBoxWidth))<1.5&& float((RecBoxHeight) / float(RecBoxWidth)) >0.7)// { if (Rxmm > 0 && Rymm > 0 && Recxmx[Id] < devpar.ImgHeight*devpar.PictureNum - 1 && Recymx[Id] < devpar.ImgWidth - 1) { yMidPos = Rymm + RecBoxWidth / 2;// xMidPos = Rxmm + RecBoxHeight / 2;// for (int i = -1; i < 2; i++)//9 { if (xMidPos + 1 < devpar.ImgHeight*devpar.PictureNum&&yMidPos + 1 < devpar.ImgWidth) { temp1 += ImgCounter[yMidPos - 1 + (xMidPos + i)*devpar.ImgMakeborderWidth]; temp1 += ImgCounter[yMidPos + (xMidPos + i)*devpar.ImgMakeborderWidth]; temp1 += ImgCounter[yMidPos + 1 + (xMidPos + i)*devpar.ImgMakeborderWidth]; } } for (int i = 0; Rxmm + i <= Rxmm + RecBoxHeight - i; i++)//Height { temp1 += ImgCounter[yMidPos + (Rxmm + i)*devpar.ImgMakeborderWidth] > 0 ? 1 : 0; temp1 += ImgCounter[yMidPos + (Rxmm + RecBoxHeight - i)*devpar.ImgMakeborderWidth] > 0 ? 1 : 0; } for (int i = 0; Rymm + i <= Rymm + RecBoxWidth - i; i++)//width { temp1 += ImgCounter[Rymm + i + xMidPos * devpar.ImgMakeborderWidth] > 0 ? 1 : 0; temp1 += ImgCounter[Rymm + RecBoxWidth - i + xMidPos * devpar.ImgMakeborderWidth] > 0 ? 1 : 0; } index[Id] = temp1 > 4 ? 0 : 1; } } } } __global__ void SelectNonRepeatBox(short* Recxmm, short * Recymm, short*index, Parameter devpar) { const int Id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*blockDim.x*gridDim.x;// short temp = 0;// if (index[Id] != 0) { if ((Id > devpar.ColThreadNum) && (Id < devpar.ColThreadNum*(devpar.RowThreadNum - 1)))// { if (Recxmm[Id] != 0)// { /*+1+1-1+1*/ temp += ((short(Recxmm[Id]) == short(Recxmm[Id + 1])) && (Recymm[Id] == Recymm[Id + 1])) ? 1 : 0;// temp += ((short(Recxmm[Id]) == short(Recxmm[Id + devpar.ColThreadNum])) && (short(Recymm[Id]) == short(Recymm[Id + devpar.ColThreadNum]))) ? 1 : 0;// temp += ((short(Recxmm[Id]) == short(Recxmm[Id - devpar.ColThreadNum + 1])) && (short(Recymm[Id]) == short(Recymm[Id - devpar.ColThreadNum + 1]))) ? 1 : 0;// index[Id] = temp > 0 ? 0 : 1;// } } } } __global__ void GetNonRepeatBox(short *Recxmm, short *Recymm, short*index, Parameter devpar) { const int Id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*blockDim.x*gridDim.x;// const int y = blockIdx.x*blockDim.x + threadIdx.x;// const int x = blockIdx.y;// int Id2 = 0; if (index[Id] != 0) { for (int i = x - 4; i < x + 4; i++) for (int j = y - 4; j < y + 4; j++) if (j > 0 && j < devpar.ImgWidth / devpar.PicBlockSize&&i > 0 && i < devpar.ImgHeight*devpar.PictureNum / devpar.PicBlockSize) { Id2 = j + i * devpar.ColThreadNum; if (index[Id2] != 0) { if ((short(Recxmm[Id]) == short(Recxmm[Id2])) && (short(Recymm[Id]) == short(Recymm[Id2]))) { index[Id] = Id > Id2 ? 0 : 1; } } } } } /************************************************* : GetInfo // : // . unsigned char* src_gray . short *length length>LengthMin . x_miny_minx_maxy_max . Parameter devpar // short *xposshort*ypos . short *area . short *xposshort*yposshort *area0 : // : . GPU . GetCounter block(128,1,1)Grid(Devpar.ColThreadNum / 128, Devpar.RowThreadNum, 1) *************************************************/ __global__ void GetInfo(unsigned char* src_gray, short *index, short* x_min, short * y_min, short* x_max, short *y_max, double *xpos, double*ypos, short *area, Parameter devpar) { const int Id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*blockDim.x*gridDim.x; short myArea = 0; double sum_gray = 0;// double x_sum = 0;//x double y_sum = 0;//y short mThreshold = devpar.Threshold;// xpos[Id] = 0; ypos[Id] = 0; int xRealIndex = 0; // short ymm = y_min[Id]; short ymx = y_max[Id]; short jcount = (ymx - ymm + 3) / 4 * 4; unsigned char temp0, temp1, temp2, temp3;// if (index[Id] >0) { //, for (int i = x_min[Id]; i <= x_max[Id]; i++) for (int j = ymm; j <= ymm + jcount; j = j + 4) { xRealIndex = i%devpar.ImgHeight; // temp0 = j > ymx ? 0 : 1; //qwt temp1 = j + 1 > ymx ? 0 : 1; temp2 = j + 2 > ymx ? 0 : 1; temp3 = j + 3 > ymx ? 0 : 1; // temp0 *= src_gray[j *temp0 + i * devpar.ImgMakeborderWidth] > mThreshold ? src_gray[j *temp0 + i * devpar.ImgMakeborderWidth] : 0; temp1 *= src_gray[(j + 1)*temp1 + i * devpar.ImgMakeborderWidth] > mThreshold ? src_gray[(j + 1)*temp1 + i * devpar.ImgMakeborderWidth] : 0; temp2 *= src_gray[(j + 2)*temp2 + i * devpar.ImgMakeborderWidth] > mThreshold ? src_gray[(j + 2)*temp2 + i * devpar.ImgMakeborderWidth] : 0; temp3 *= src_gray[(j + 3)*temp3 + i * devpar.ImgMakeborderWidth] > mThreshold ? src_gray[(j + 3)*temp3 + i * devpar.ImgMakeborderWidth] : 0; myArea += temp0 > 0 ? 1 : 0;// myArea += temp1 > 0 ? 1 : 0; myArea += temp2 > 0 ? 1 : 0; myArea += temp3 > 0 ? 1 : 0; sum_gray += temp0 + temp1 + temp2 + temp3; x_sum += xRealIndex* temp0 + xRealIndex * temp1 + xRealIndex * temp2 + xRealIndex * temp3; y_sum += j * temp0 + (j + 1)*temp1 + (j + 2)*temp2 + (j + 3)*temp3; } index[Id] = (myArea > devpar.AreaMin&&myArea < devpar.AreaMax) ? 1 : 0; area[Id] = myArea; xpos[Id] = x_sum / sum_gray; ypos[Id] = y_sum / sum_gray; } } /************************************************* : GetRecInfo // : // RecData* mRec . unsigned char *psrcgray . unsigned char *psrccounter . Parameter devpar // short *length . short* area . short *xpos, short *ypos : // : . GPU . block(128,1,1) Grid(Gridsize, 1, 1);Gridsize= mRecCount / 128,mRecCount, . 128 *************************************************/ __global__ void GetRecInfo(RecData* mRec, unsigned char *psrcgray, unsigned char *psrccounter, short *length, short* area, double *xpos, double *ypos, Parameter devpar) { const int Id = threadIdx.x + blockIdx.x*blockDim.x;// int mThreshold = devpar.Threshold;// short myArea = 0;// int clengthCount = 0;// short clength = 0;// double sum_gray = 0;// double x_sum = 0;//x double y_sum = 0;//y int xRealIndex = 0; /**/ short xmm = mRec[Id].RecXmin; short xmx = mRec[Id].RecXmax; short ymm = mRec[Id].RecYmin; short ymx = mRec[Id].RecYmax; short jcount = (ymx - ymm + 3) / 4 * 4;// unsigned char temp0, temp1, temp2, temp3;//temp unsigned char t0, t1, t2, t3;//t /**/ area[Id] = 0; xpos[Id] = 0; ypos[Id] = 0; length[Id] = 0; for (int i = xmm; i <= xmx; i++) for (int j = ymm; j <= ymm + jcount; j = j + 4) { xRealIndex = i%devpar.ImgHeight; /**/ temp0 = j > ymx ? 0 : 1; temp1 = j + 1> ymx ? 0 : 1; temp2 = j + 2> ymx ? 0 : 1; temp3 = j + 3> ymx ? 0 : 1; t0 = temp0;//qwt t1 = temp1; t2 = temp2; t3 = temp3; /*4*/ temp0 *= psrcgray[j *temp0 + i * devpar.ImgMakeborderWidth]>mThreshold ? psrcgray[j *temp0 + i * devpar.ImgMakeborderWidth] : 0; temp1 *= psrcgray[(j + 1)*temp1 + i * devpar.ImgMakeborderWidth]>mThreshold ? psrcgray[(j + 1)*temp1 + i * devpar.ImgMakeborderWidth] : 0; temp2 *= psrcgray[(j + 2)*temp2 + i * devpar.ImgMakeborderWidth]>mThreshold ? psrcgray[(j + 2)*temp2 + i * devpar.ImgMakeborderWidth] : 0; temp3 *= psrcgray[(j + 3)*temp3 + i * devpar.ImgMakeborderWidth]>mThreshold ? psrcgray[(j + 3)*temp3 + i * devpar.ImgMakeborderWidth] : 0; t0 *= psrccounter[j *t0 + i * devpar.ImgMakeborderWidth]; t1 *= psrccounter[(j + 1)*t1 + i * devpar.ImgMakeborderWidth]; t2 *= psrccounter[(j + 2)*t2 + i * devpar.ImgMakeborderWidth]; t3 *= psrccounter[(j + 3)*t3 + i * devpar.ImgMakeborderWidth]; myArea += temp0 > 0 ? 1 : 0; // myArea += temp1 > 0 ? 1 : 0; myArea += temp2 > 0 ? 1 : 0; myArea += temp3 > 0 ? 1 : 0; clengthCount += t0 + t1 + t2 + t3;// sum_gray += temp0 + temp1 + temp2 + temp3;// x_sum += xRealIndex* temp0 + xRealIndex * temp1 + xRealIndex * temp2 + xRealIndex * temp3; y_sum += j * temp0 + (j + 1)*temp1 + (j + 2)*temp2 + (j + 3)*temp3;//y } clength = clengthCount / 255;// /**/ length[Id] = clength; area[Id] = myArea; xpos[Id] = x_sum / sum_gray; ypos[Id] = y_sum / sum_gray; } //-----------------------------------------------------------------------------------------------// //-----------------------------------------------------------------------------// /** * -40964095 */ __device__ unsigned int GPUjpeg_huffman_value[8 * 1024]; /** * H * huffman- 257 (256 + 1 extra) * huffman: * - luminance (Y) AC * - luminance (Y) DC * - chroma (cb/cr) AC * - chroma (cb/cr) DC */ __device__ uint32_t gpujpeg_huffman_gpu_tab[(256 + 1) * 4]; dim3 gpujpeg_huffman_encoder_grid_size(int tblock_count) { dim3 size(tblock_count); while (size.x > 0xffff) { size.x = (size.x + 1) >> 1; size.y <<= 1; } return size; } /* */ static int ALIGN(int x, int y) { //y // y must be a power of 2. return (x + y - 1) & ~(y - 1); } /*********************************************************************************************************** /***write_bitstream /***bitstreambitd_JPEGdata /*** bit_location mcubit /*** bit_length mcubit /*** bit_code mcuhuffman /*** d_JPEGdata bitstream /*** ************************************************************************************************************/ __device__ void write_bitstream(unsigned int even_code, unsigned int odd_code, int length, int bit_location, int even_code_size, BYTE *d_JPEGdata) { // const int byte_restbits = (8 - (bit_location & MASK)); const int byte_location = bit_location >> SHIFT; int write_bytelocation = byte_location; uint64_t threadwrite_code = ((uint64_t)even_code << (24 + byte_restbits)) + ((uint64_t)odd_code << (24 + byte_restbits - even_code_size)); int right_shift = 56; if (byte_restbits != 8) { write_bytelocation++; length -= byte_restbits; right_shift -= 8; } for (int i = length; i > 0; i = i - 8) { d_JPEGdata[write_bytelocation] = (threadwrite_code >> right_shift) & 0XFF; right_shift -= 8; write_bytelocation++; } if (byte_restbits != 8) { d_JPEGdata[byte_location] = d_JPEGdata[byte_location] | (threadwrite_code >> 56) & 0XFF; } } /** *huffman */ __global__ static void GPUjpeg_huffman_encoder_value_init_kernel() { // fetch some value const int tid = threadIdx.x + blockIdx.x * blockDim.x; const int value = tid - 4096; // decompose it unsigned int value_code = value; int absolute = value; if (value < 0) { // valu eis now absolute value of input absolute = -absolute; // For a negative input, want temp2 = bitwise complement of abs(input) // This code assumes we are on a two's complement machine value_code--; } // bit unsigned int value_nbits = 0; while (absolute) { value_nbits++; absolute >>= 1; } // (bit) GPUjpeg_huffman_value[tid] = value_nbits | (value_code << (32 - value_nbits)); } __device__ static unsigned int gpuhuffman_encode_value(const int preceding_zero_count, const int coefficient, const int huffman_lut_offset) { // huffman const unsigned int packed_value = GPUjpeg_huffman_value[4096 + coefficient]; // packed_valuebit const int value_nbits = packed_value & 0xf; const unsigned int value_code = packed_value & ~0xf; // find prefix of the codeword and size of the prefix const int huffman_lut_idx = huffman_lut_offset + preceding_zero_count * 16 + value_nbits; const unsigned int packed_prefix = gpujpeg_huffman_gpu_tab[huffman_lut_idx]; const unsigned int prefix_nbits = packed_prefix & 31; // return (packed_prefix + value_nbits) | (value_code >> prefix_nbits); } __global__ static void gpujpeg_huffman_gpu_encoder_encode_block(BSI16 *d_ydst, int MCU_total, BYTE *d_JPEGdata, int *prefix_num, int offset, const int huffman_lut_offset) { //block id const int block_idx = (blockIdx.y * gridDim.x << 2) + (blockIdx.x << 2) + threadIdx.y; if (block_idx >= MCU_total) return; __shared__ int Length_count[(THREAD_WARP + 1) * 4]; d_ydst += block_idx << 6; const int load_idx = threadIdx.x * 2; int in_even = d_ydst[load_idx]; const int in_odd = d_ydst[load_idx + 1]; // if (threadIdx.x == 0 && block_idx != 0) in_even = in_even - d_ydst[load_idx - 64]; if (threadIdx.x == 0 && block_idx == 0) in_even = in_even - 64; //0 const unsigned int nonzero_mask = (1 << threadIdx.x) - 1; const unsigned int nonzero_bitmap_0 = 1 | __ballot(in_even); // DC const unsigned int nonzero_bitmap_1 = __ballot(in_odd); const unsigned int nonzero_bitmap_pairs = nonzero_bitmap_0 | nonzero_bitmap_1; const int zero_pair_count = __clz(nonzero_bitmap_pairs & nonzero_mask); //0 int zeros_before_even = 2 * (zero_pair_count + threadIdx.x - 32); if ((0x80000000 >> zero_pair_count) > (nonzero_bitmap_1 & nonzero_mask)) { zeros_before_even += 1; } // true if any nonzero pixel follows thread's odd pixel const bool nonzero_follows = nonzero_bitmap_pairs & ~nonzero_mask; // ,in_even0in_odd0+1 // (the count is actually multiplied by 16) int zeros_before_odd = (in_even || !threadIdx.x) ? 0 : zeros_before_even + 1; // clear zero counts if no nonzero pixel follows (so that no 16-zero symbols will be emited) // otherwise only trim extra bits from the counts of following zeros const int zero_count_mask = nonzero_follows ? 0xF : 0; zeros_before_even &= zero_count_mask; zeros_before_odd &= zero_count_mask; int even_lut_offset = huffman_lut_offset; if (0 == threadIdx.x) { // first thread uses DC part of the table for its even value even_lut_offset += 256 + 1; } // block if (0 == ((threadIdx.x ^ 31) | in_odd)) { // zeros_before_odd16 zeros_before_odd = 16; } // each thread gets codeword for its two pixels unsigned int even_code = gpuhuffman_encode_value(zeros_before_even, in_even, even_lut_offset); unsigned int odd_code = gpuhuffman_encode_value(zeros_before_odd, in_odd, huffman_lut_offset); int *bl_ptr = Length_count + (THREAD_WARP + 1) * threadIdx.y; const unsigned int even_code_size = even_code & 31; const unsigned int odd_code_size = odd_code & 31; int bit_length = even_code_size + odd_code_size; even_code = even_code & ~31; odd_code = odd_code & ~31; int code_nbits = bit_length; //BLOCK unsigned int prefix_bitmap = __ballot(bit_length); int prefix_count = __popc(prefix_bitmap & nonzero_mask); if (bit_length) { bl_ptr[prefix_count] = bit_length; __syncthreads(); // for (int j = 0; j < prefix_count; j++) { code_nbits = code_nbits + bl_ptr[j]; } } if (threadIdx.x == 31) { prefix_num[block_idx * 3 + offset] = code_nbits; } //d_JPEGdata BYTE *Write_JPEGdata = d_JPEGdata + (block_idx << 6); const int bit_location = code_nbits - bit_length; const int byte_restbits = (8 - (bit_location & MASK)); const int byte_location = bit_location >> SHIFT; int write_bytelocation = byte_location; // int length = bit_length; uint64_t threadwrite_code = ((uint64_t)even_code << (24 + byte_restbits)) + ((uint64_t)odd_code << (24 + byte_restbits - even_code_size)); int right_shift = 56; if (byte_restbits != 8) { write_bytelocation++; length -= byte_restbits; right_shift -= 8; } for (int i = length; i > 0; i = i - 8) { Write_JPEGdata[write_bytelocation] = (threadwrite_code >> right_shift) & 0XFF; right_shift -= 8; write_bytelocation++; } if (byte_restbits != 8) { if (bit_length < byte_restbits && bit_length) Write_JPEGdata[byte_location] = Write_JPEGdata[byte_location] | (threadwrite_code >> 56) & 0XFF; __syncthreads(); if (bit_length >= byte_restbits) Write_JPEGdata[byte_location] = Write_JPEGdata[byte_location] | (threadwrite_code >> 56) & 0XFF; } } /*********************************************************************************************************** /***CUDA_RGB2YUV_kernel /***BMPGRBYUV /*** d_bsrc /*** nPitch RGB /*** Size YCrCb /*** Y\Cr\Cb 3 /*** ************************************************************************************************************/ __global__ void CUDA_RGB2YUV_kernel(BYTE *d_bsrc, BYTE *Y, BYTE *Cr, BYTE *Cb, size_t nPitch, size_t StrideF) { int tid = (blockIdx.x << 3) + threadIdx.x; d_bsrc += ((blockIdx.y << 3) + threadIdx.y) * nPitch + (tid << 1) + tid; int OffsThreadInRow = ((blockIdx.y << 3) + threadIdx.y) * StrideF + tid; float r = d_bsrc[2]; float g = d_bsrc[1]; float b = d_bsrc[0]; Y[OffsThreadInRow] = (g * C_Yg + b * C_Yb + r * C_Yr); Cr[OffsThreadInRow] = (g * C_Ug + b * C_Ub + 128.f + r* C_Ur); Cb[OffsThreadInRow] = (g * C_Vg + b * C_Vb + 128.f + r* C_Vr); } /*********************************************************************************************************** /***work_efficient_PrefixSum_kernel(int *X, int *BlockSum, int InputSize) /***n /*** X /*** BlockSum /*** X /*** ************************************************************************************************************/ __global__ void work_efficient_PrefixSum_kernel(int *X, int *BlockSum) { // XY[2*BLOCK_SIZE] is in shared memory __shared__ int XY[512]; __shared__ int XY1[512]; int index; int tid = threadIdx.x << 1; int i = (blockIdx.x << 10) + tid + 1; XY[tid] = X[i]; XY[tid + 1] = X[i] + X[i + 1]; XY1[tid] = X[512 + i]; XY1[tid + 1] = X[512 + i] + X[i + 513]; __syncthreads(); index = ((threadIdx.x + 1) << 2) - 1; if (index < 512) { XY[index] += XY[index - 2]; XY1[index] += XY1[index - 2]; } __syncthreads(); index = ((threadIdx.x + 1) << 3) - 1; if (index < 512) { XY[index] += XY[index - 4]; XY1[index] += XY1[index - 4]; } __syncthreads(); index = ((threadIdx.x + 1) << 4) - 1; if (index < 512) { XY[index] += XY[index - 8]; XY1[index] += XY1[index - 8]; } __syncthreads(); index = ((threadIdx.x + 1) << 5) - 1; if (index < 512) { XY[index] += XY[index - 16]; XY1[index] += XY1[index - 16]; } __syncthreads(); index = ((threadIdx.x + 1) << 6) - 1; if (index < 512) { XY[index] += XY[index - 32]; XY1[index] += XY1[index - 32]; } __syncthreads(); index = ((threadIdx.x + 1) << 7) - 1; if (index < 512) { XY[index] += XY[index - 64]; XY1[index] += XY1[index - 64]; } __syncthreads(); index = ((threadIdx.x + 1) << 8) - 1; if (index < 512) { XY[index] += XY[index - 128]; XY1[index] += XY1[index - 128]; } __syncthreads(); if (index < 512) { XY[511] += XY[255]; XY1[511] += XY1[255]; } __syncthreads(); index = ((threadIdx.x + 1) << 8) - 1; if (index < 384) { XY[index + 128] += XY[index]; XY1[index + 128] += XY1[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 7) - 1; if (index < 448) { XY[index + 64] += XY[index]; XY1[index + 64] += XY1[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 6) - 1; if (index < 480) { XY[index + 32] += XY[index]; XY1[index + 32] += XY1[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 5) - 1; if (index < 496) { XY[index + 16] += XY[index]; XY1[index + 16] += XY1[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 4) - 1; if (index < 504) { XY[index + 8] += XY[index]; XY1[index + 8] += XY1[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 3) - 1; if (index < 508) { XY[index + 4] += XY[index]; XY1[index + 4] += XY1[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 2) - 1; if (index < 510) { XY[index + 2] += XY[index]; XY1[index + 2] += XY1[index]; } __syncthreads(); if (threadIdx.x == 0) { X[1 + i] = XY[tid + 1]; X[513 + i] = XY1[tid + 1]; } else { X[i] = XY[tid] + XY[tid - 1]; X[1 + i] = XY[tid + 1]; X[512 + i] = XY1[tid] + XY1[tid - 1]; X[513 + i] = XY1[tid + 1]; BlockSum[(blockIdx.x << 1) + 1] = XY[511]; BlockSum[(blockIdx.x << 1) + 2] = XY1[511]; } } /*********************************************************************************************************** /***work_efficient_BlockUp_kernel(int *dc_component) /***n /*** BlockSum /*** BlockSum /*** ************************************************************************************************************/ __global__ void work_efficient_BlockUp_kernel(int *BlockSum) { __shared__ int XY[512]; int index; int tid = threadIdx.x << 1; int i = (blockIdx.x << 9) + tid + 1; XY[tid] = BlockSum[i]; XY[tid + 1] = BlockSum[i] + BlockSum[i + 1]; __syncthreads(); index = ((threadIdx.x + 1) << 2) - 1; if (index < 512) { XY[index] += XY[index - 2]; } __syncthreads(); index = ((threadIdx.x + 1) << 3) - 1; if (index < 512) { XY[index] += XY[index - 4]; } __syncthreads(); index = ((threadIdx.x + 1) << 4) - 1; if (index < 512) { XY[index] += XY[index - 8]; } __syncthreads(); index = ((threadIdx.x + 1) << 5) - 1; if (index < 512) { XY[index] += XY[index - 16]; } __syncthreads(); index = ((threadIdx.x + 1) << 6) - 1; if (index < 512) { XY[index] += XY[index - 32]; } __syncthreads(); index = ((threadIdx.x + 1) << 7) - 1; if (index < 512) { XY[index] += XY[index - 64]; } __syncthreads(); index = ((threadIdx.x + 1) << 8) - 1; if (index < 512) { XY[index] += XY[index - 128]; } __syncthreads(); if (index < 512) { XY[511] += XY[255]; } __syncthreads(); index = ((threadIdx.x + 1) << 8) - 1; if (index < 384) { XY[index + 128] += XY[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 7) - 1; if (index < 448) { XY[index + 64] += XY[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 6) - 1; if (index < 480) { XY[index + 32] += XY[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 5) - 1; if (index < 496) { XY[index + 16] += XY[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 4) - 1; if (index < 504) { XY[index + 8] += XY[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 3) - 1; if (index < 508) { XY[index + 4] += XY[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 2) - 1; if (index < 510) { XY[index + 2] += XY[index]; } __syncthreads(); if (threadIdx.x == 0) { BlockSum[1 + i] = XY[tid + 1]; } else { BlockSum[i] = XY[tid] + XY[tid - 1]; BlockSum[1 + i] = XY[tid + 1]; } } __global__ void work_efficient_Adds_kernel(int *BlockSum, int *prefix_num) { int tid = (blockIdx.x << 9) + (threadIdx.x << 1) + 1; //blockIdx.x*blockDim.x + threadIdx.x prefix_num[tid] = BlockSum[blockIdx.x] + prefix_num[tid]; prefix_num[tid + 1] = BlockSum[blockIdx.x] + prefix_num[tid + 1]; } /*********************************************************************************************************** /***CUDA_DCT8_kernel /***DCT /*** X /*** MCU_total /*** X /*** ************************************************************************************************************/ __global__ void CUDA_DCT8_kernel(BSI16 *d_ydst, BYTE *d_bsrc, RIM Size, int *DEV_ZIGZAG, float *DEV_STD_QUANT_TAB_LUMIN) { __shared__ float block[512]; int OffsThreadInRow = (blockIdx.x << 6) + (threadIdx.z << 5) + (threadIdx.y << 3) + threadIdx.x; if (OffsThreadInRow >= Size.width) return; OffsThreadInRow = OffsThreadInRow - (blockIdx.x << 6); //32*16 d_bsrc += ((blockIdx.y << 3) + threadIdx.x) * Size.StrideF + (blockIdx.x << 6) + (threadIdx.z << 5) + (threadIdx.y << 3); float *bl_ptr = block + (threadIdx.z << 5) + (threadIdx.y << 3) + (threadIdx.x << 6); float Vect0 = d_bsrc[0]; float Vect1 = d_bsrc[1]; float Vect2 = d_bsrc[2]; float Vect3 = d_bsrc[3]; float Vect4 = d_bsrc[4]; float Vect5 = d_bsrc[5]; float Vect6 = d_bsrc[6]; float Vect7 = d_bsrc[7]; float X07P = Vect0 + Vect7; float X16P = Vect1 + Vect6; float X25P = Vect2 + Vect5; float X34P = Vect3 + Vect4; float X07M = Vect0 - Vect7; float X61M = Vect6 - Vect1; float X25M = Vect2 - Vect5; float X43M = Vect4 - Vect3; float X07P34PP = X07P + X34P; float X07P34PM = X07P - X34P; float X16P25PP = X16P + X25P; float X16P25PM = X16P - X25P; bl_ptr[0] = X07P34PP + X16P25PP; bl_ptr[2] = C_b * X07P34PM + C_e * X16P25PM; bl_ptr[4] = X07P34PP - X16P25PP; bl_ptr[6] = C_e * X07P34PM - C_b * X16P25PM; bl_ptr[1] = C_a * X07M - C_c * X61M + C_d * X25M - C_f * X43M; bl_ptr[3] = C_c * X07M + C_f * X61M - C_a * X25M + C_d * X43M; bl_ptr[5] = C_d * X07M + C_a * X61M + C_f * X25M - C_c * X43M; bl_ptr[7] = C_f * X07M + C_d * X61M + C_c * X25M + C_a * X43M; bl_ptr = block + OffsThreadInRow; Vect0 = bl_ptr[0]; Vect1 = bl_ptr[64]; Vect2 = bl_ptr[128]; Vect3 = bl_ptr[192]; Vect4 = bl_ptr[256]; Vect5 = bl_ptr[320]; Vect6 = bl_ptr[384]; Vect7 = bl_ptr[448]; X07P = Vect0 + Vect7; X16P = Vect1 + Vect6; X25P = Vect2 + Vect5; X34P = Vect3 + Vect4; X07M = Vect0 - Vect7; X61M = Vect6 - Vect1; X25M = Vect2 - Vect5; X43M = Vect4 - Vect3; X07P34PP = X07P + X34P; X07P34PM = X07P - X34P; X16P25PP = X16P + X25P; X16P25PM = X16P - X25P; d_ydst = d_ydst + blockIdx.y * (Size.width << 3) + (blockIdx.x << 9) + (threadIdx.z << 8) + (threadIdx.y << 6); DEV_STD_QUANT_TAB_LUMIN += threadIdx.x; DEV_ZIGZAG += threadIdx.x; d_ydst[DEV_ZIGZAG[0]] = (X07P34PP + X16P25PP)* DEV_STD_QUANT_TAB_LUMIN[0]; d_ydst[DEV_ZIGZAG[8]] = (C_a * X07M - C_c * X61M + C_d * X25M - C_f * X43M) * DEV_STD_QUANT_TAB_LUMIN[8]; d_ydst[DEV_ZIGZAG[16]] = (C_b * X07P34PM + C_e * X16P25PM) * DEV_STD_QUANT_TAB_LUMIN[16]; d_ydst[DEV_ZIGZAG[24]] = (C_c * X07M + C_f * X61M - C_a * X25M + C_d * X43M) * DEV_STD_QUANT_TAB_LUMIN[24]; d_ydst[DEV_ZIGZAG[32]] = (X07P34PP - X16P25PP) * DEV_STD_QUANT_TAB_LUMIN[32]; d_ydst[DEV_ZIGZAG[40]] = (C_d * X07M + C_a * X61M + C_f * X25M - C_c * X43M) * DEV_STD_QUANT_TAB_LUMIN[40]; d_ydst[DEV_ZIGZAG[48]] = (C_e * X07P34PM - C_b * X16P25PM) * DEV_STD_QUANT_TAB_LUMIN[48]; d_ydst[DEV_ZIGZAG[56]] = (C_f * X07M + C_d * X61M + C_c * X25M + C_a * X43M) * DEV_STD_QUANT_TAB_LUMIN[56]; } /*********************************************************************************************************** /***Data_codelength_kernel /***64mcu bitscan /***int *dc_component mcukernel64mcu bit /***int *d_ydst zigzag /***int *prefix_nummcubit ************************************************************************************************************/ __global__ void Data_codelength_kernel(BSI16 *d_ydst, int MCU_total, BYTE *d_JPEGdata, int *prefix_num, int offset, const int huffman_lut_offset) { //block id const int block_idx = (blockIdx.y * gridDim.x << 2) + (blockIdx.x << 2) + threadIdx.y; if (block_idx >= MCU_total) return; __shared__ int Length_count[(THREAD_WARP + 1) * 4]; d_ydst += block_idx << 6; const int load_idx = threadIdx.x * 2; int in_even = d_ydst[load_idx]; const int in_odd = d_ydst[load_idx + 1]; // if (threadIdx.x == 0 && block_idx != 0) in_even = in_even - d_ydst[load_idx - 64]; if (threadIdx.x == 0 && block_idx == 0) in_even = in_even - 85; //0 const unsigned int nonzero_mask = (1 << threadIdx.x) - 1; const unsigned int nonzero_bitmap_0 = 1 | __ballot(in_even); // DC const unsigned int nonzero_bitmap_1 = __ballot(in_odd); const unsigned int nonzero_bitmap_pairs = nonzero_bitmap_0 | nonzero_bitmap_1; const int zero_pair_count = __clz(nonzero_bitmap_pairs & nonzero_mask); //0 int zeros_before_even = 2 * (zero_pair_count + threadIdx.x - 32); if ((0x80000000 >> zero_pair_count) > (nonzero_bitmap_1 & nonzero_mask)) { zeros_before_even += 1; } // true if any nonzero pixel follows thread's odd pixel const bool nonzero_follows = nonzero_bitmap_pairs & ~nonzero_mask; // ,in_even0in_odd0+1 // (the count is actually multiplied by 16) int zeros_before_odd = (in_even || !threadIdx.x) ? 0 : zeros_before_even + 1; // clear zero counts if no nonzero pixel follows (so that no 16-zero symbols will be emited) // otherwise only trim extra bits from the counts of following zeros const int zero_count_mask = nonzero_follows ? 0xF : 0; zeros_before_even &= zero_count_mask; zeros_before_odd &= zero_count_mask; int even_lut_offset = huffman_lut_offset; if (0 == threadIdx.x) { // first thread uses DC part of the table for its even value even_lut_offset += 256 + 1; } // block if (0 == ((threadIdx.x ^ 31) | in_odd)) { // zeros_before_odd16 zeros_before_odd = 16; } // each thread gets codeword for its two pixels unsigned int even_code = gpuhuffman_encode_value(zeros_before_even, in_even, even_lut_offset); unsigned int odd_code = gpuhuffman_encode_value(zeros_before_odd, in_odd, huffman_lut_offset); int *bl_ptr = Length_count + (THREAD_WARP + 1) * threadIdx.y; const unsigned int even_code_size = even_code & 31; const unsigned int odd_code_size = odd_code & 31; int bit_length = even_code_size + odd_code_size; even_code = even_code & ~31; odd_code = odd_code & ~31; int code_nbits = bit_length; //BLOCK unsigned int prefix_bitmap = __ballot(bit_length); int prefix_count = __popc(prefix_bitmap & nonzero_mask); if (bit_length) { bl_ptr[prefix_count] = bit_length; __syncthreads(); // for (int j = 0; j < prefix_count; j++) { code_nbits = code_nbits + bl_ptr[j]; } } if (threadIdx.x == 31) { prefix_num[block_idx + 1] = code_nbits + 8; } //d_JPEGdata BYTE *Write_JPEGdata = d_JPEGdata + (block_idx << 6); const int bit_location = code_nbits - bit_length; const int byte_restbits = (8 - (bit_location & MASK)); const int byte_location = bit_location >> SHIFT; int write_bytelocation = byte_location; // int length = bit_length; uint64_t threadwrite_code = ((uint64_t)even_code << (24 + byte_restbits)) + ((uint64_t)odd_code << (24 + byte_restbits - even_code_size)); int right_shift = 56; if (byte_restbits != 8) { write_bytelocation++; length -= byte_restbits; right_shift -= 8; } for (int i = length; i > 0; i = i - 8) { Write_JPEGdata[write_bytelocation] = (threadwrite_code >> right_shift) & 0XFF; right_shift -= 8; write_bytelocation++; } if (byte_restbits != 8) { if (bit_length < byte_restbits && bit_length) Write_JPEGdata[byte_location] = Write_JPEGdata[byte_location] | (threadwrite_code >> 56) & 0XFF; __syncthreads(); if (bit_length >= byte_restbits) Write_JPEGdata[byte_location] = Write_JPEGdata[byte_location] | (threadwrite_code >> 56) & 0XFF; } } __global__ void CUDA_YCrCb_codelength_kernel(BSI16 *d_ydst, BYTE *d_JPEGdata, int *prefix_num, int MCU_total, int offset, int cycle) { int tid = (blockIdx.x << 7) + threadIdx.x; //blockIdx.x*blockDim.x + threadIdx.x int bit_location = 0; if (tid >= MCU_total) return; int in_even, zeros_before = 0; d_ydst += tid << 6; // if (tid == 0) in_even = d_ydst[0] - 85; else in_even = d_ydst[0] - d_ydst[-64]; int in_odd = d_ydst[1]; d_JPEGdata = d_JPEGdata + (tid << 6); unsigned int even_code = gpuhuffman_encode_value(0, in_even, (256 + 1) * 3); unsigned int odd_code = gpuhuffman_encode_value(0, in_odd, (256 + 1) * 2); unsigned int even_code_size = even_code & 31; unsigned int odd_code_size = odd_code & 31; int bit_length = even_code_size + odd_code_size; even_code = even_code & ~31; odd_code = odd_code & ~31; write_bitstream(even_code, odd_code, bit_length, bit_location, even_code_size, d_JPEGdata); bit_location += bit_length; for (int j = 2; j < cycle; j = j + 2) { in_even = d_ydst[j]; in_odd = d_ydst[j + 1]; if (!in_even) zeros_before++; odd_code = 0; even_code = 0; if (in_even) even_code = gpuhuffman_encode_value(zeros_before, in_even, (256 + 1) * 2); zeros_before = in_even ? 0 : zeros_before + 1; if (in_odd) odd_code = gpuhuffman_encode_value(zeros_before, in_odd, (256 + 1) * 2); if (in_even || in_odd) { even_code_size = even_code & 31; odd_code_size = odd_code & 31; bit_length = even_code_size + odd_code_size; even_code = even_code & ~31; odd_code = odd_code & ~31; write_bitstream(even_code, odd_code, bit_length, bit_location, even_code_size, d_JPEGdata); bit_location += bit_length; } } write_bitstream(0, 0, 2, bit_location, 0, d_JPEGdata); prefix_num[tid * 3 + offset] = bit_location + 2; } __global__ void adds_prefixsum(int *dc_component, int *prefix_num, int MCU_total) { int tid = (blockIdx.x << 7) + threadIdx.x; //blockIdx.x*blockDim.x + threadIdx.x if (tid >= MCU_total) return; prefix_num[tid + 1] = dc_component[blockIdx.x] + prefix_num[tid + 1]; } __global__ void adds_prefixsum1(int *dc_component, int *prefix_num, int MCU_total) { int tid = (blockIdx.x << 7) + threadIdx.x; //blockIdx.x*blockDim.x + threadIdx.x if (tid >= (MCU_total - 1) >> 7) return; prefix_num[tid + 1] = dc_component[blockIdx.x] + prefix_num[tid + 1]; } __global__ void data_shift_kernel(BYTE *d_JPEGdata, int *prefix_num, int MCU_total, int *d_datalen, int *dc_component, int* last_prefix_num) { int tid = (blockIdx.x << 7) + threadIdx.x; //blockIdx.x*blockDim.x + threadIdx.x int byte_location = 0; if (tid >= MCU_total) return; //tid>MCU d_JPEGdata = d_JPEGdata + (tid << 6); // BYTE *JPEG_Writedatalocation = d_JPEGdata + 63; //BYTE BYTE byte_tmp; int length = prefix_num[tid + 1] - prefix_num[tid] - 8; //MCUbit int right_shift = prefix_num[tid] & MASK; //MCUbitMCUbitbit int left_shift = 8 - right_shift; //MCUbitbit byte_location = (length - 1) >> SHIFT; //MCUbit int bit_rest = 8 - length + ((byte_location << SHIFT)); length = length + right_shift + 8; //MCUbit length >>= SHIFT; if (right_shift >= bit_rest) { JPEG_Writedatalocation[0] = (d_JPEGdata[byte_location] << left_shift); JPEG_Writedatalocation--; } for (; byte_location > 0; byte_location--) { byte_tmp = (d_JPEGdata[byte_location] >> right_shift) | (d_JPEGdata[byte_location - 1] << left_shift); if (byte_tmp == 0xff) { length++; JPEG_Writedatalocation[0] = 0; JPEG_Writedatalocation[-1] = byte_tmp; JPEG_Writedatalocation -= 2; } else { JPEG_Writedatalocation[0] = byte_tmp; JPEG_Writedatalocation--; } } byte_tmp = d_JPEGdata[0] >> right_shift; if (byte_tmp == 0xff) { length++; JPEG_Writedatalocation[0] = 0; JPEG_Writedatalocation--; JPEG_Writedatalocation[0] = byte_tmp; } else { JPEG_Writedatalocation[0] = byte_tmp; } last_prefix_num[tid + 1] = length; } __global__ void Data_encodelater1_kernel(int *prefix_num, BYTE *d_JPEGdata, BYTE *last_AC, int MCU_total, int *d_datalen) { int tid = (blockIdx.x << 7) + threadIdx.x; //blockIdx.x*blockDim.x + threadIdx.x if (tid >= MCU_total) return; int length; if (tid == MCU_total - 1) d_datalen[0] = prefix_num[tid + 1]; length = prefix_num[tid + 1] - prefix_num[tid]; last_AC = last_AC + prefix_num[tid]; d_JPEGdata = d_JPEGdata + (tid << 6) + 64 - length; for (int i = 0; i < length; i++) { last_AC[i] = d_JPEGdata[i]; } } //-----------------------------------------------------------------------------------------------// /************************************************* : RmwRead8BitBmpFile2Img // : .bmp // const char * filename . unsigned char* pImg :24 . unsigned char* Binarization : . int* width : . int* width :// unsigned char* pImg NULL . unsigned char* Binarization 24NULL// : bool -- // : . . (Byte) = width * height * ImgDeep // *************************************************/ bool RmwRead8BitBmpFile2Img(const char * filename, unsigned char*pImg, unsigned char*Binarization, int *width, int *height) { FILE *binFile; BITMAPFILEHEADER fileHeader;// BITMAPINFOHEADER bmpHeader;// BOOL isRead = TRUE; int ImgDeep; int linenum, ex; // nenum: //open file if ((binFile = fopen(filename, "rb")) == NULL) return NULL; //read struts if (fread((void *)&fileHeader, 1, sizeof(fileHeader), binFile) != sizeof(fileHeader)) isRead = FALSE; if (fread((void *)&bmpHeader, 1, sizeof(bmpHeader), binFile) != sizeof(bmpHeader)) isRead = FALSE; if (isRead == FALSE || fileHeader.bfOffBits<sizeof(fileHeader) + sizeof(bmpHeader)) { fclose(binFile); return NULL; } //read image info *width = bmpHeader.biWidth; *height = bmpHeader.biHeight; ImgDeep = bmpHeader.biBitCount / 8;// linenum = (*width * ImgDeep + 3) / 4 * 4;// ex = linenum - *width * ImgDeep; // fseek(binFile, fileHeader.bfOffBits, SEEK_SET); // if (ImgDeep == 1) { if (Binarization != NULL) for (int i = 0; i<*height; i++) { int r = fread(Binarization + (*height - i - 1)*(*width)*ImgDeep, sizeof(unsigned char), (*width)*ImgDeep, binFile); if (r != (*width)*ImgDeep) { delete Binarization; fclose(binFile); return NULL; } fseek(binFile, ex, SEEK_CUR); } fclose(binFile); return true; } // else if (ImgDeep == 3) { //pImg = new uchar[(*width)*(*height)*ImgDeep]; if (pImg != NULL) { for (int i = 0; i < *height; i++) { int r = fread(pImg + (*height - i - 1)*(*width)*ImgDeep, sizeof(unsigned char), (*width)*ImgDeep, binFile);//** if (r != (*width)*ImgDeep)//** { fclose(binFile); return NULL; } fseek(binFile, ex, SEEK_CUR); } fclose(binFile); //bmp if (Binarization != NULL) { for (int i = 0; i < *height; i++) for (int j = 0; j < *width; j++) { Binarization[j + i * (*width)] = pImg[j * ImgDeep + i * (*width) * ImgDeep] * 0.299 + pImg[j * ImgDeep + 1 + i * (*width) * ImgDeep] * 0.587 + pImg[j * ImgDeep + 2 + i * (*width) * ImgDeep] * 0.114; } } return true; } else// { unsigned char *tempImg = new uchar[(*width)*(*height)*ImgDeep]; if (tempImg != NULL) { for (int i = 0; i < *height; i++) { int r = fread(tempImg + (*height - i - 1)*(*width)*ImgDeep, sizeof(unsigned char), (*width)*ImgDeep, binFile);//** if (r != (*width)*ImgDeep)//** { delete[]tempImg; fclose(binFile); return NULL; } fseek(binFile, ex, SEEK_CUR); } fclose(binFile); //bmp if (Binarization != NULL) { for (int i = 0; i < *height; i++) for (int j = 0; j < *width; j++) { Binarization[j + i * (*width)] = tempImg[j * ImgDeep + i * (*width) * ImgDeep] * 0.299 + tempImg[j * ImgDeep + 1 + i * (*width) * ImgDeep] * 0.587 + tempImg[j * ImgDeep + 2 + i * (*width) * ImgDeep] * 0.114; } } delete[]tempImg; return true; } } } else return false; } /************************************************* : RmwWrite8bitImg2BmpFile // : .bmp // unsigned char* pImg : . int* width : . int* width : . const char * filename // const char * filename .bmp// : Suc(bool) -- // : . . (Byte) = width * height * ImgDeep // *************************************************/ bool RmwWrite8bitImg2BmpFile(unsigned char *pImg, int width, int height, const char * filename) { FILE * BinFile; BITMAPFILEHEADER FileHeader; BITMAPINFOHEADER BmpHeader; int i, extend; bool Suc = true; unsigned char p[4], *pCur; unsigned char* ex; extend = (width + 3) / 4 * 4 - width; // Open File if ((BinFile = fopen(filename, "w+b")) == NULL) { return false; } // FileHeader.bfType = ((WORD)('M' << 8) | 'B'); FileHeader.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER) + 256 * 4L;//2 FileHeader.bfSize = FileHeader.bfOffBits + (width + extend)*height; FileHeader.bfReserved1 = 0; FileHeader.bfReserved2 = 0; if (fwrite((void *)&FileHeader, 1, sizeof(FileHeader), BinFile) != sizeof(FileHeader)) Suc = false; // Fill the ImgHeader BmpHeader.biSize = 40; BmpHeader.biWidth = width; BmpHeader.biHeight = height; BmpHeader.biPlanes = 1; BmpHeader.biBitCount = 8; BmpHeader.biCompression = 0; BmpHeader.biSizeImage = 0; BmpHeader.biXPelsPerMeter = 0; BmpHeader.biYPelsPerMeter = 0; BmpHeader.biClrUsed = 0; BmpHeader.biClrImportant = 0; if (fwrite((void *)&BmpHeader, 1, sizeof(BmpHeader), BinFile) != sizeof(BmpHeader)) Suc = false; // for (i = 0, p[3] = 0; i<256; i++) { p[0] = p[1] = p[2] = i; // blue,green,red; //255 - i if (fwrite((void *)p, 1, 4, BinFile) != 4) { Suc = false; break; } } if (extend) { ex = new unsigned char[extend]; // 0~3 memset(ex, 0, extend); } //write data for (pCur = pImg + (height - 1)*width; pCur >= pImg; pCur -= width) { if (fwrite((void *)pCur, 1, width, BinFile) != (unsigned int)width) Suc = false; // if (extend) // 0 if (fwrite((void *)ex, 1, extend, BinFile) != 1) Suc = false; } // return; fclose(BinFile); if (extend) delete[] ex; return Suc; } /************************************************* : GetImgBoxHost // : CPU . vector<RecData>gHostRecData . // const char * filename -(*.bmp) // // : // : vector<RecData>gHostRecData gHostRecData, . 0128// *************************************************/ void GetImgBoxHost(const char *path) { Parameter devpar; // devpar.ImgHeight = gStructVarible.ImgHeight; devpar.ImgWidth = gStructVarible.ImgWidth; devpar.Threshold = gStructVarible.Threshold; devpar.LengthMin = gStructVarible.LengthMin; devpar.LengthMax = gStructVarible.LengthMax; devpar.AreaMin = gStructVarible.AreaMin; devpar.AreaMax = gStructVarible.AreaMax; devpar.PictureNum = gStructVarible.PictureNum; devpar.RecPadding = gStructVarible.RecPadding; // const cv::Point directions[8] = { { 0, 1 },{ 1,1 },{ 1, 0 },{ 1, -1 },{ 0, -1 },{ -1, -1 },{ -1, 0 },{ -1, 1 } }; //CPU if (gHostRecData.size() != 0) gHostRecData.clear(); // unsigned char *ImgHostdata = new unsigned char[devpar.ImgWidth* devpar.ImgHeight*devpar.PictureNum]; //qwtBUG unsigned char *m_ptr = new unsigned char[devpar.ImgWidth* devpar.ImgHeight*devpar.PictureNum];// unsigned char *n_ptr = new unsigned char[devpar.ImgWidth* devpar.ImgHeight*devpar.PictureNum];// unsigned char *c_ptr = new unsigned char[devpar.ImgWidth* devpar.ImgHeight*devpar.PictureNum];// unsigned char *temp_ptr = new unsigned char[devpar.ImgWidth* devpar.ImgHeight*devpar.PictureNum];// // int Picoffset = devpar.ImgHeight * devpar.ImgWidth; for (int j = 0; j < devpar.PictureNum; j++) { RmwRead8BitBmpFile2Img(path, NULL, ImgHostdata + j*Picoffset, &devpar.ImgWidth, &devpar.ImgHeight); } // for (int i = 0; i <devpar.ImgHeight*devpar.PictureNum; i++) { for (int j = 0; j < devpar.ImgWidth; j++) { m_ptr[j + i * devpar.ImgWidth] = ImgHostdata[j + i * devpar.ImgWidth] > devpar.Threshold ? 255 : 0; c_ptr[j + i * devpar.ImgWidth] = m_ptr[j + i * devpar.ImgWidth]; n_ptr[j + i * devpar.ImgWidth] = m_ptr[j + i * devpar.ImgWidth]; temp_ptr[j + i * devpar.ImgWidth] = m_ptr[j + i * devpar.ImgWidth]; } } // for (int i = 1; i<devpar.ImgHeight*devpar.PictureNum - 1; i++) for (int j = 1; j <devpar.ImgWidth - 1; j++) { if (m_ptr[j + i * devpar.ImgWidth] == 0) { if (m_ptr[j - 1 + (i - 1)*devpar.ImgWidth] != 0 || m_ptr[j + (i - 1)*devpar.ImgWidth] != 0 || m_ptr[j + 1 + (i - 1)*devpar.ImgWidth] != 0 || m_ptr[j - 1 + i * devpar.ImgWidth] != 0 || m_ptr[j + 1 + i * devpar.ImgWidth] != 0 || m_ptr[j - 1 + (i + 1)*devpar.ImgWidth] != 0 || m_ptr[j + (i + 1)*devpar.ImgWidth] != 0 || m_ptr[j + 1 + (i + 1)*devpar.ImgWidth] != 0) { n_ptr[j + i * devpar.ImgWidth] = 255; c_ptr[j + i * devpar.ImgWidth] = 255; temp_ptr[j + i * devpar.ImgWidth] = 255; } } } // c_ptr for (int i = 1; i<devpar.ImgHeight*devpar.PictureNum - 1; i++) for (int j = 1; j < devpar.ImgWidth - 1; j++) { if (n_ptr[j + i * devpar.ImgWidth] != 0) { if (n_ptr[j + (i - 1)*devpar.ImgWidth] != 0 && n_ptr[j - 1 + i * devpar.ImgWidth] != 0 && n_ptr[j + 1 + i * devpar.ImgWidth] != 0 && n_ptr[j + (i + 1)*devpar.ImgWidth] != 0) { c_ptr[j + i * devpar.ImgWidth] = 0; temp_ptr[j + i * devpar.ImgWidth] = 0; } } } // short xmax; short xmin; short ymax; short ymin; // int i, j, counts = 0, curr_d = 0;//counts curr_dID short cLength; // for (i = 1; i <devpar.ImgHeight*devpar.PictureNum - 1; i++) for (j = 1; j <devpar.ImgWidth - 1; j++) { // cv::Point b_pt = cv::Point(i, j); cv::Point c_pt = cv::Point(i, j); // if (255 == c_ptr[j + i * devpar.ImgWidth]) { cLength = 1; xmin = xmax = i; ymin = ymax = j; bool tra_flag = false;// c_ptr[j + i * devpar.ImgWidth] = 0;// 0 // while (!tra_flag) { // for (counts = 0; counts < 8; counts++) { // if (curr_d >= 8) { curr_d -= 8; } if (curr_d < 0) { curr_d += 8; } // root c_pt = cv::Point(b_pt.x + directions[curr_d].x, b_pt.y + directions[curr_d].y); // if ((c_pt.x > 0) && (c_pt.x < devpar.ImgHeight*devpar.PictureNum - 1) && (c_pt.y > 0) && (c_pt.y < devpar.ImgWidth - 1)) { // if (255 == c_ptr[c_pt.x*devpar.ImgWidth + c_pt.y]) { // xmax = xmax > c_pt.x ? xmax : c_pt.x; ymax = ymax > c_pt.y ? ymax : c_pt.y; xmin = xmin < c_pt.x ? xmin : c_pt.x; ymin = ymin < c_pt.y ? ymin : c_pt.y; curr_d -= 2; // c_ptr[c_pt.x*devpar.ImgWidth + c_pt.y] = 0; // b_pt:root b_pt.x = c_pt.x; b_pt.y = c_pt.y; cLength++; break; // for } } curr_d++; } // end for // 8 if (8 == counts) { // curr_d = 0; tra_flag = true; if (cLength < devpar.LengthMax && (cLength > devpar.LengthMin)) { RecData tempRecData; int tempcount = 0; if (0.7<double(xmax - xmin) / double(ymax - ymin) < 1.5)/// { //9 for (int k = -1; k < 2; k++) { if ((xmax + xmax) / 2 < devpar.ImgHeight*devpar.PictureNum && (ymax + ymin) / 2 < devpar.ImgWidth) { tempcount += temp_ptr[(ymax + ymin) / 2 - 1 + ((xmax + xmin) / 2 + i)*devpar.ImgMakeborderWidth]; tempcount += temp_ptr[(ymax + ymin) / 2 + ((xmax + xmin) / 2 + i)*devpar.ImgMakeborderWidth]; tempcount += temp_ptr[(ymax + ymin) / 2 + 1 + ((xmax + xmin) / 2 + i)*devpar.ImgMakeborderWidth]; } } //- for (int k = xmin; k <= xmax; k++)//Height { tempcount += temp_ptr[(ymax + ymin) / 2 + k*devpar.ImgWidth] > 0 ? 1 : 0; } for (int k = ymin; k <= ymax; k++)//width { tempcount += temp_ptr[k + (xmax + xmin) / 2 * devpar.ImgWidth] > 0 ? 1 : 0; } if (tempcount <= 4) { if (xmin - devpar.RecPadding < 0) tempRecData.RecXmin = 0; else tempRecData.RecXmin = xmin - devpar.RecPadding; if (ymin - devpar.RecPadding < 0) tempRecData.RecYmin = 0; else tempRecData.RecYmin = ymin - devpar.RecPadding; if (xmax + devpar.RecPadding > devpar.ImgHeight*devpar.PictureNum - 1) tempRecData.RecXmax = devpar.ImgHeight*devpar.PictureNum - 1; else tempRecData.RecXmax = xmax + devpar.RecPadding; if (ymax + devpar.RecPadding > devpar.ImgWidth) tempRecData.RecYmax = devpar.ImgWidth - 1; else tempRecData.RecYmax = ymax + devpar.RecPadding; gHostRecData.push_back(tempRecData); } } } break; } } // end if } // end while } // gSingleImgRecNum = gHostRecData.size() / devpar.PictureNum;// int rRecNum = (gHostRecData.size() + 127) / 128 * 128; gHostRecData.resize(rRecNum, RecData{ 0,0,0,0 }); gRecNum = rRecNum;// // delete[]ImgHostdata; delete[]m_ptr; delete[]n_ptr; delete[]c_ptr; delete[]temp_ptr; } //--------------------------------------------------------------------------------// //--------------------------------------------------------------------------------------// /*----------------------------------------------------------------*/ class SIM : public Runnable { public: HardwareInfo HardwarePar;// Parameter Devpar;// ~SIM()// { } void Run() { //GPU hipSetDevice(HardwarePar.GpuId); // hipError_t err, err1; clock_t start, end; clock_t startp, overp; clock_t time3; /**/ /***********/ int img_index; char DataFilename[100]; char strFilename[100]; const char* path = Devpar.DataReadPath; int OutPutInitialIndex = 0; //Bin int BufferIndex = 0;// long long Bufferoffset = 0;// bool DatafullFlag = false;//trueGPU /*----------------------------------------------------------------*/ Devpar.ImgChannelNum = Devpar.ImgBitDeep / 8;// Devpar.ImgMakeborderWidth = (Devpar.ImgWidth + 127) / 128 * 128;// Devpar.RowThreadNum = Devpar.ImgHeight*Devpar.PictureNum / Devpar.PicBlockSize; Devpar.ColThreadNum = (Devpar.ImgWidth / Devpar.PicBlockSize + 127) / 128 * 128; dim3 mGrid1(Devpar.ImgMakeborderWidth / 128, Devpar.ImgHeight*Devpar.PictureNum, 1); dim3 mGrid2(Devpar.ColThreadNum / 128, Devpar.RowThreadNum, 1); /*----------------------------------------------------------------*/ //CUDA hipStream_t *CStreams; CStreams = (hipStream_t *)malloc(CUDAStreams * sizeof(hipStream_t)); /**** ****/ unsigned char* DevPicColor[CUDAStreams]; unsigned char* DevPicGray[CUDAStreams];// unsigned char* DevPadding[CUDAStreams];// qwt7.26 unsigned char* Dev2Val[CUDAStreams];// unsigned char* DevCounter[CUDAStreams];//findcountores for (int i = 0; i < CUDAStreams; i++) { hipStreamCreate(&(CStreams[i])); hipMalloc((void**)&DevPicColor[i], Devpar.ImgHeight * Devpar.ImgWidth*Devpar.ImgChannelNum*Devpar.PictureNum * sizeof(unsigned char)); hipMalloc((void**)&DevPicGray[i], Devpar.ImgHeight * Devpar.ImgWidth*Devpar.PictureNum * sizeof(unsigned char)); hipMalloc((void**)&DevPadding[i], Devpar.ImgHeight * Devpar.ImgMakeborderWidth*Devpar.PictureNum * sizeof(unsigned char)); //qwt7.26 hipMalloc((void**)&Dev2Val[i], sizeof(unsigned char) * Devpar.ImgHeight * Devpar.ImgMakeborderWidth * Devpar.PictureNum); hipMalloc((void**)&DevCounter[i], sizeof(unsigned char) * Devpar.ImgHeight * Devpar.ImgMakeborderWidth * Devpar.PictureNum); } /**/ // short *gpHostLength[CUDAStreams]; short *gpHostArea[CUDAStreams]; double *gpHostXpos[CUDAStreams]; double *gpHostYpos[CUDAStreams]; short *gpHostIndex[CUDAStreams]; /**/ short * gpDevRecXLeft[CUDAStreams]; short * gpDevRecYLeft[CUDAStreams]; short * gpDevRecXRight[CUDAStreams]; short * gpDevRecYRight[CUDAStreams]; // short *gpDevLength[CUDAStreams]; short *gpDevArea[CUDAStreams]; double *gpDevXpos[CUDAStreams]; double *gpDevYpos[CUDAStreams]; short *gpDevIndex[CUDAStreams]; //GPUGPU for (int i = 0; i < CUDAStreams; i++) { hipHostMalloc((void**)&gpHostLength[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short), hipHostMallocDefault);// hipHostMalloc((void**)&gpHostArea[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short), hipHostMallocDefault);// hipHostMalloc((void**)&gpHostXpos[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(double), hipHostMallocDefault);//x hipHostMalloc((void**)&gpHostYpos[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(double), hipHostMallocDefault);//y hipHostMalloc((void**)&gpHostIndex[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short), hipHostMallocDefault);// hipMalloc((void**)&gpDevRecXLeft[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short) * 2);// xmin hipMalloc((void**)&gpDevRecYLeft[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short) * 2);// ymin hipMalloc((void**)&gpDevRecXRight[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short) * 2);// xmax hipMalloc((void**)&gpDevRecYRight[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short) * 2);// ymax hipMalloc((void**)&gpDevLength[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short) * 2);// hipMalloc((void**)&gpDevArea[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short) * 2);// hipMalloc((void**)&gpDevXpos[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(double) * 2);// xpos hipMalloc((void**)&gpDevYpos[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(double) * 2);// ypos err = hipMalloc((void**)&gpDevIndex[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short) * 2);// } // while ((img_index + CUDAStreams) <= gHostPathImgNumber && gStructVarible.TerminateFlag == 0) { //-DevPicGray if (Devpar.ImgChannelNum == 1) { for (int i = 0; i < CUDAStreams; i++) { Bufferoffset = long long(img_index + i)* Devpar.ImgHeight * Devpar.ImgWidth; hipMemcpyAsync(DevPicGray[i], gHostBuffer[BufferIndex] + Bufferoffset, sizeof(unsigned char)* Devpar.ImgHeight * Devpar.ImgWidth*Devpar.PictureNum, hipMemcpyHostToDevice, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { // GrayMakeBorder << <mGrid1, 128, 0, CStreams[i] >> > (DevPicGray[i], DevPadding[i], Devpar); } } else if (Devpar.ImgChannelNum == 3)//-DevPicColor { for (int i = 0; i < CUDAStreams; i++) { Bufferoffset = long long(img_index + i)*Devpar.ImgHeight * Devpar.ImgWidth*Devpar.ImgChannelNum; hipMemcpyAsync(DevPicColor[i], gHostBuffer[BufferIndex] + Bufferoffset, sizeof(unsigned char)* Devpar.ImgHeight * Devpar.ImgWidth*Devpar.ImgChannelNum*Devpar.PictureNum, hipMemcpyHostToDevice, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++)//+padding { ColorMakeBorder << <mGrid1, 128, 0, CStreams[i] >> > (DevPicColor[i], DevPadding[i], Devpar); } } for (int i = 0; i < CUDAStreams; i++) { // Binarization << <mGrid1, 128, 0, CStreams[i] >> > (DevPadding[i], Dev2Val[i], DevCounter[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { // Dilation << <mGrid1, 128, 0, CStreams[i] >> > (Dev2Val[i], DevCounter[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { hipMemcpyAsync(Dev2Val[i], DevCounter[i], sizeof(uchar)* Devpar.ImgHeight *Devpar.ImgMakeborderWidth*Devpar.PictureNum, hipMemcpyDeviceToDevice, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { Erosion << <mGrid1, 128, 0, CStreams[i] >> > (Dev2Val[i], DevCounter[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { // GetCounter << <mGrid2, 128, 0, CStreams[i] >> > (DevCounter[i], gpDevLength[i], gpDevRecXLeft[i], gpDevRecYLeft[i], gpDevRecXRight[i], gpDevRecYRight[i], Devpar);// } for (int i = 0; i < CUDAStreams; i++) { // SelectTrueBox << <mGrid2, 128, 0, CStreams[i] >> > (DevCounter[i], gpDevLength[i], gpDevRecXLeft[i], gpDevRecYLeft[i], gpDevRecXRight[i], gpDevRecYRight[i], gpDevIndex[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { // SelectTrueBox << <mGrid2, 128, 0, CStreams[i] >> > (DevCounter[i], gpDevLength[i], gpDevRecXLeft[i], gpDevRecYLeft[i], gpDevRecXRight[i], gpDevRecYRight[i], gpDevIndex[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { // GetNonRepeatBox << <mGrid2, 128, 0, CStreams[i] >> > (gpDevRecXLeft[i], gpDevRecYLeft[i], gpDevIndex[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { //// GetInfo << <mGrid2, 128, 0, CStreams[i] >> > (DevPadding[i], gpDevIndex[i], gpDevRecXLeft[i], gpDevRecYLeft[i], gpDevRecXRight[i], gpDevRecYRight[i], gpDevXpos[i], gpDevYpos[i], gpDevArea[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { hipMemcpyAsync(gpHostLength[i], gpDevLength[i], sizeof(short)* Devpar.ColThreadNum * Devpar.RowThreadNum, hipMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { hipMemcpyAsync(gpHostArea[i], gpDevArea[i], sizeof(short)* Devpar.ColThreadNum * Devpar.RowThreadNum, hipMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { hipMemcpyAsync(gpHostXpos[i], gpDevXpos[i], sizeof(double)* Devpar.ColThreadNum * Devpar.RowThreadNum, hipMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { hipMemcpyAsync(gpHostYpos[i], gpDevYpos[i], sizeof(double)* Devpar.ColThreadNum * Devpar.RowThreadNum, hipMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { hipMemcpyAsync(gpHostIndex[i], gpDevIndex[i], sizeof(short)* Devpar.ColThreadNum * Devpar.RowThreadNum, hipMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { err = hipStreamSynchronize(CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { // vector<CircleInfo>myInfo; for (int j = 0; j < Devpar.ColThreadNum * Devpar.RowThreadNum; j++) { if (gpHostIndex[i][j] != 0) { CircleInfo temp; temp.index = (short)j; temp.length = gpHostLength[i][j]; temp.area = gpHostArea[i][j]; temp.xpos = gpHostXpos[i][j]; temp.ypos = gpHostYpos[i][j]; myInfo.push_back(temp); } } SignPoint.PointNumbers = myInfo.size(); // if (myInfo.size() > 0) { FILE* fp; sprintf_s(DataFilename, "%s\\%d.bin", Devpar.DataReadPath, img_index + HardwarePar.DeviceID * HardwarePar.CUDAStreamNum + i + 1); //3DataFilename fp = fopen(DataFilename, "wb"); fwrite(&myInfo[0], sizeof(CircleInfo)*myInfo.size(), 1, fp); fclose(fp); } } img_index += HardwarePar.DeviceCount * HardwarePar.CUDAStreamNum; } /**** ****/ /**** ****/ if (gStructVarible.TerminateFlag == 1) { char buffer[20]; sprintf_s(buffer, "%s%d", "img_index = ", img_index); FILE* fp; sprintf_s(DataFilename, "%s\\%d.txt", Devpar.DataReadPath, 0); //3DataFilename fp = fopen(DataFilename, "wb"); fwrite(buffer, sizeof(char) * 20, 1, fp); fclose(fp); } /**********************/ // for (int i = 0; i < CUDAStreams; i++) { hipFree(DevPicColor[i]); hipFree(DevPicGray[i]); hipFree(DevPadding[i]); hipFree(Dev2Val[i]); hipFree(DevCounter[i]); hipHostFree(gpHostLength[i]); hipHostFree(gpHostArea[i]); hipHostFree(gpHostXpos[i]); hipHostFree(gpHostYpos[i]); hipHostFree(gpHostIndex[i]); // hipFree(gpDevRecXLeft[i]); hipFree(gpDevRecYLeft[i]); hipFree(gpDevRecXRight[i]); hipFree(gpDevRecYRight[i]); hipFree(gpDevLength[i]); hipFree(gpDevArea[i]); hipFree(gpDevXpos[i]); hipFree(gpDevYpos[i]); hipFree(gpDevIndex[i]); hipStreamDestroy(CStreams[i]); } } }; class R : public Runnable { public: Parameter Devpar;// HardwareInfo HardwarePar;// static int mRindex; ~R() { } void mydelay(double sec)// { clock_t start_time, cur_time; start_time = clock(); do { cur_time = clock(); } while (double(cur_time - start_time) / CLOCKS_PER_SEC < sec); } void Run() { //GPU hipSetDevice(HardwarePar.GpuId); // /***********/ int img_index; char strFilename[100]; const char* path = Devpar.DataReadPath; int OutPutInitialIndex = 0; //Bin int BufferIndex = 0;// long long Bufferoffset = 0;// bool DatafullFlag = false;//trueGPU /*----------------------------------------------------------------*/ Devpar.ImgChannelNum = Devpar.ImgBitDeep / 8;// Devpar.ImgMakeborderWidth = (Devpar.ImgWidth + 127) / 128 * 128;// Devpar.RowThreadNum = Devpar.ImgHeight*Devpar.PictureNum / Devpar.PicBlockSize;//BUG-PicBlock Devpar.ColThreadNum = (Devpar.ImgWidth / Devpar.PicBlockSize + 127) / 128 * 128; dim3 mGrid1(Devpar.ImgMakeborderWidth / 128, Devpar.ImgHeight*Devpar.PictureNum, 1); dim3 mGrid2(Devpar.ColThreadNum / 128, Devpar.RowThreadNum, 1); /*----------------------------------------------------------------*/ //CUDA hipStream_t *CStreams; CStreams = (hipStream_t *)malloc(CUDAStreams * sizeof(hipStream_t)); /**** ****/ unsigned char* DevPicColor[CUDAStreams]; unsigned char* DevPicGray[CUDAStreams];// unsigned char* DevPadding[CUDAStreams];// qwt7.26 unsigned char* Dev2Val[CUDAStreams];// unsigned char* DevCounter[CUDAStreams];//findcountores for (int i = 0; i < CUDAStreams; i++) { hipStreamCreate(&(CStreams[i])); hipMalloc((void**)&DevPicColor[i], Devpar.ImgHeight * Devpar.ImgWidth*Devpar.ImgChannelNum*Devpar.PictureNum * sizeof(unsigned char)); hipMalloc((void**)&DevPicGray[i], Devpar.ImgHeight * Devpar.ImgWidth*Devpar.PictureNum * sizeof(unsigned char)); hipMalloc((void**)&DevPadding[i], Devpar.ImgHeight * Devpar.ImgMakeborderWidth*Devpar.PictureNum * sizeof(unsigned char)); hipMalloc((void**)&Dev2Val[i], sizeof(unsigned char) * Devpar.ImgHeight * Devpar.ImgMakeborderWidth * Devpar.PictureNum); hipMalloc((void**)&DevCounter[i], sizeof(unsigned char) * Devpar.ImgHeight * Devpar.ImgMakeborderWidth * Devpar.PictureNum); } /**/ // short *gpHostLength[CUDAStreams]; short *gpHostArea[CUDAStreams]; double *gpHostXpos[CUDAStreams]; double *gpHostYpos[CUDAStreams]; short *gpHostIndex[CUDAStreams]; /**/ short * gpDevRecXLeft[CUDAStreams]; short * gpDevRecYLeft[CUDAStreams]; short * gpDevRecXRight[CUDAStreams]; short * gpDevRecYRight[CUDAStreams]; // short *gpDevLength[CUDAStreams]; short *gpDevArea[CUDAStreams]; double *gpDevXpos[CUDAStreams]; double *gpDevYpos[CUDAStreams]; short *gpDevIndex[CUDAStreams]; //GPUGPU for (int i = 0; i < CUDAStreams; i++) { hipHostMalloc((void**)&gpHostLength[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short), hipHostMallocDefault);// hipHostMalloc((void**)&gpHostArea[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short), hipHostMallocDefault);// hipHostMalloc((void**)&gpHostXpos[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(double), hipHostMallocDefault);//x hipHostMalloc((void**)&gpHostYpos[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(double), hipHostMallocDefault);//y hipHostMalloc((void**)&gpHostIndex[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short), hipHostMallocDefault);// hipMalloc((void**)&gpDevRecXLeft[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// xmin hipMalloc((void**)&gpDevRecYLeft[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// ymin hipMalloc((void**)&gpDevRecXRight[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// xmax hipMalloc((void**)&gpDevRecYRight[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// ymax hipMalloc((void**)&gpDevLength[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// hipMalloc((void**)&gpDevArea[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// hipMalloc((void**)&gpDevXpos[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(double));// xpos hipMalloc((void**)&gpDevYpos[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(double));// ypos hipMalloc((void**)&gpDevIndex[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// } ExtractPointInitialSuccessFlag[HardwarePar.DeviceID] = true; while (!ExtractPointSuccess) { mydelay(0.01); vector<CircleInfo>myInfo; img_index = 0;// Bufferoffset = 0;// // while (true) { gExtrackPointLock.lock(); mRindex = mRindex % (HardwareParam.DeviceCount + 1); if (PageLockBufferEmpty[mRindex] == false && PageLockBufferWorking[mRindex] == false) { PageLockBufferWorking[mRindex] = true;//-- OutPutInitialIndex = PageLockBufferStartIndex[mRindex] * Bufferlength;// BufferIndex = mRindex; DatafullFlag = true; mRindex++; gExtrackPointLock.unlock(); break; } mRindex++; gExtrackPointLock.unlock(); if (ExtractPointSuccess) break; } // while (DatafullFlag) { if (img_index >= Bufferlength) //qwt { gExtrackPointLock.lock(); PageLockBufferWorking[BufferIndex] = false;//--workingfalse gExtrackPointLock.unlock(); PageLockBufferEmpty[BufferIndex] = true; // DatafullFlag = false; break; } //-DevPicGray if (Devpar.ImgChannelNum == 1) { for (int i = 0; i < CUDAStreams; i++) { Bufferoffset = long long(img_index + i*Devpar.PictureNum)* Devpar.ImgHeight * Devpar.ImgWidth; hipMemcpyAsync(DevPicGray[i], gHostBuffer[BufferIndex] + Bufferoffset, sizeof(unsigned char)* Devpar.ImgHeight * Devpar.ImgWidth*Devpar.PictureNum, hipMemcpyHostToDevice, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { // GrayMakeBorder << <mGrid1, 128, 0, CStreams[i] >> > (DevPicGray[i], DevPadding[i], Devpar); } } else if (Devpar.ImgChannelNum == 3)//-DevPicColor { for (int i = 0; i < CUDAStreams; i++) { Bufferoffset = long long(img_index + i*Devpar.PictureNum)*Devpar.ImgHeight * Devpar.ImgWidth*Devpar.ImgChannelNum; hipMemcpyAsync(DevPicColor[i], gHostBuffer[BufferIndex] + +Bufferoffset, sizeof(unsigned char)* Devpar.ImgHeight * Devpar.ImgWidth*Devpar.ImgChannelNum*Devpar.PictureNum, hipMemcpyHostToDevice, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++)//+padding { ColorMakeBorder << <mGrid1, 128, 0, CStreams[i] >> > (DevPicColor[i], DevPadding[i], Devpar); } } for (int i = 0; i < CUDAStreams; i++) { // Binarization << <mGrid1, 128, 0, CStreams[i] >> > (DevPadding[i], Dev2Val[i], DevCounter[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { // Dilation << <mGrid1, 128, 0, CStreams[i] >> > (Dev2Val[i], DevCounter[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { hipMemcpyAsync(Dev2Val[i], DevCounter[i], sizeof(unsigned char)* Devpar.ImgHeight *Devpar.ImgMakeborderWidth*Devpar.PictureNum, hipMemcpyDeviceToDevice, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { Erosion << <mGrid1, 128, 0, CStreams[i] >> > (Dev2Val[i], DevCounter[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { // GetCounter << <mGrid2, 128, 0, CStreams[i] >> > (DevCounter[i], gpDevLength[i], gpDevRecXLeft[i], gpDevRecYLeft[i], gpDevRecXRight[i], gpDevRecYRight[i], Devpar);// } for (int i = 0; i < CUDAStreams; i++) { // SelectTrueBox << <mGrid2, 128, 0, CStreams[i] >> > (DevCounter[i], gpDevLength[i], gpDevRecXLeft[i], gpDevRecYLeft[i], gpDevRecXRight[i], gpDevRecYRight[i], gpDevIndex[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { // SelectTrueBox << <mGrid2, 128, 0, CStreams[i] >> > (DevCounter[i], gpDevLength[i], gpDevRecXLeft[i], gpDevRecYLeft[i], gpDevRecXRight[i], gpDevRecYRight[i], gpDevIndex[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { // GetNonRepeatBox << <mGrid2, 128, 0, CStreams[i] >> > (gpDevRecXLeft[i], gpDevRecYLeft[i], gpDevIndex[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { //// GetInfo << <mGrid2, 128, 0, CStreams[i] >> > (DevPadding[i], gpDevIndex[i], gpDevRecXLeft[i], gpDevRecYLeft[i], gpDevRecXRight[i], gpDevRecYRight[i], gpDevXpos[i], gpDevYpos[i], gpDevArea[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { hipMemcpyAsync(gpHostLength[i], gpDevLength[i], sizeof(short)* Devpar.ColThreadNum * Devpar.RowThreadNum, hipMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { hipMemcpyAsync(gpHostArea[i], gpDevArea[i], sizeof(short)* Devpar.ColThreadNum * Devpar.RowThreadNum, hipMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { hipMemcpyAsync(gpHostXpos[i], gpDevXpos[i], sizeof(double)* Devpar.ColThreadNum * Devpar.RowThreadNum, hipMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { hipMemcpyAsync(gpHostYpos[i], gpDevYpos[i], sizeof(double)* Devpar.ColThreadNum * Devpar.RowThreadNum, hipMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { hipMemcpyAsync(gpHostIndex[i], gpDevIndex[i], sizeof(short)* Devpar.ColThreadNum * Devpar.RowThreadNum, hipMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { hipStreamSynchronize(CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { for (int k = 0; k < Devpar.PictureNum; k++) { int hostindex = 0; int headpos = myInfo.size(); CircleInfo headInfo; headInfo.index = OutPutInitialIndex + img_index + i;// headInfo.xpos = 99999; headInfo.ypos = 99999;//xpos ypos headInfo.area = 0; //area0 myInfo.push_back(headInfo); for (int j = k*Devpar.ColThreadNum * Devpar.RowThreadNum / Devpar.PictureNum; j < (k + 1)*Devpar.ColThreadNum * Devpar.RowThreadNum / Devpar.PictureNum; j++) { if (gpHostIndex[i][j] != 0) { hostindex++; CircleInfo temp; temp.index = (short)hostindex; temp.length = gpHostLength[i][j]; temp.area = gpHostArea[i][j]; temp.xpos = gpHostXpos[i][j]; temp.ypos = gpHostYpos[i][j]; myInfo.push_back(temp); } } myInfo[headpos].length = hostindex;// } } img_index += HardwarePar.CUDAStreamNum*Devpar.PictureNum; } // if (myInfo.size() > 0) { FILE* fp; sprintf_s(strFilename, "%s\\%d.bin", path, OutPutInitialIndex); //3strFilename fp = fopen(strFilename, "wb"); fwrite(&myInfo[0], sizeof(CircleInfo)*myInfo.size(), 1, fp); fclose(fp); } } for (int i = 0; i < CUDAStreams; i++) { hipFree(DevPicColor[i]); hipFree(DevPicGray[i]); hipFree(DevPadding[i]); hipFree(Dev2Val[i]); hipFree(DevCounter[i]); hipHostFree(gpHostLength[i]); hipHostFree(gpHostArea[i]); hipHostFree(gpHostXpos[i]); hipHostFree(gpHostYpos[i]); hipHostFree(gpHostIndex[i]); // hipFree(gpDevRecXLeft[i]); hipFree(gpDevRecYLeft[i]); hipFree(gpDevRecXRight[i]); hipFree(gpDevRecYRight[i]); hipFree(gpDevLength[i]); hipFree(gpDevArea[i]); hipFree(gpDevXpos[i]); hipFree(gpDevYpos[i]); hipFree(gpDevIndex[i]); hipStreamDestroy(CStreams[i]); } } }; int R::mRindex = 0;// /*----------------------------------------------------------------*/ class RecR : public Runnable { public: HardwareInfo HardwarePar;// Parameter Devpar;// static int mRecindex; public: ~RecR()// { } void mydelay(double sec)// { clock_t start_time, cur_time; start_time = clock(); do { cur_time = clock(); } while (double(cur_time - start_time) / CLOCKS_PER_SEC < sec); } void Run() { //GPU hipSetDevice(HardwarePar.GpuId); // char DataFilename[100]; //---- int img_index = 0;// bin int OutPutInitialIndex = 0; //Bin int BufferIndex = 0;// long long Bufferoffset = 0;// bool DatafullFlag = false;//trueGPU const char* path = Devpar.DataReadPath; /*----------------------------------------------------------------*/ Devpar.ImgChannelNum = Devpar.ImgBitDeep / 8;// Devpar.ImgMakeborderWidth = (Devpar.ImgWidth + 127) / 128 * 128;// int Gridsize = gRecNum / 128; if (Gridsize == 0)//qwt823 Gridsize = 1; /**** Grid ****/ dim3 mGrid1(Devpar.ImgMakeborderWidth / 128, Devpar.ImgHeight*Devpar.PictureNum, 1); dim3 mGrid2(Gridsize, 1, 1); /*----------------------------------------------------------------*/ //CUDA hipStream_t *CStreams; CStreams = (hipStream_t *)malloc(CUDAStreams * sizeof(hipStream_t)); /*** ****/ unsigned char* DevPicColor[CUDAStreams]; unsigned char* DevPicGray[CUDAStreams];// unsigned char* DevPadding[CUDAStreams];// qwt7.26 unsigned char* Dev2Val[CUDAStreams];// unsigned char* DevCounter[CUDAStreams];//findcountores for (int i = 0; i < CUDAStreams; i++) { hipStreamCreate(&(CStreams[i])); hipMalloc((void**)&DevPicColor[i], Devpar.ImgHeight * Devpar.ImgWidth*Devpar.ImgChannelNum*Devpar.PictureNum * sizeof(unsigned char)); hipMalloc((void**)&DevPicGray[i], Devpar.ImgHeight * Devpar.ImgWidth*Devpar.PictureNum * sizeof(unsigned char)); hipMalloc((void**)&DevPadding[i], Devpar.ImgHeight *Devpar.ImgMakeborderWidth*Devpar.PictureNum * sizeof(unsigned char)); //qwt7.26 hipMalloc((void**)&Dev2Val[i], sizeof(unsigned char) * Devpar.ImgHeight * Devpar.ImgMakeborderWidth * Devpar.PictureNum); hipMalloc((void**)&DevCounter[i], sizeof(unsigned char) * Devpar.ImgHeight * Devpar.ImgMakeborderWidth * Devpar.PictureNum); } /**** ****/ // short *gpHostLength[CUDAStreams]; short *gpHostArea[CUDAStreams]; double *gpHostXpos[CUDAStreams]; double *gpHostYpos[CUDAStreams]; /**** ****/ // short *gpDevLength[CUDAStreams]; short *gpDevArea[CUDAStreams]; double *gpDevXpos[CUDAStreams]; double *gpDevYpos[CUDAStreams]; RecData *gpRDevRecData[CUDAStreams];//qwt821 // if (gRecNum > 0) { for (int i = 0; i < CUDAStreams; i++) { hipMalloc((void**)&gpRDevRecData[i], gRecNum * sizeof(RecData) * 2);//2 hipMemcpy(gpRDevRecData[i], &gHostRecData[0], gRecNum * sizeof(RecData), hipMemcpyHostToDevice); } } //GPUGPU for (int i = 0; i < CUDAStreams; i++) { hipHostMalloc((void**)&gpHostLength[i], gRecNum * sizeof(short), hipHostMallocDefault);// hipHostMalloc((void**)&gpHostArea[i], gRecNum * sizeof(short), hipHostMallocDefault);// hipHostMalloc((void**)&gpHostXpos[i], gRecNum * sizeof(double), hipHostMallocDefault);//x hipHostMalloc((void**)&gpHostYpos[i], gRecNum * sizeof(double), hipHostMallocDefault);//y hipMalloc((void**)&gpDevLength[i], gRecNum * sizeof(short));// hipMalloc((void**)&gpDevArea[i], gRecNum * sizeof(short));// hipMalloc((void**)&gpDevXpos[i], gRecNum * sizeof(double));// xpos hipMalloc((void**)&gpDevYpos[i], gRecNum * sizeof(double));// ypos } ExtractPointInitialSuccessFlag[HardwarePar.DeviceID] = true; // while (!ExtractPointSuccess) { mydelay(0.01); vector<CircleInfo>myInfo; img_index = 0;// Bufferoffset = 0;// // while (true) { gExtrackPointLock.lock(); mRecindex = mRecindex % (HardwareParam.DeviceCount + 1); if (PageLockBufferEmpty[mRecindex] == false && PageLockBufferWorking[mRecindex] == false) { PageLockBufferWorking[mRecindex] = true;//-- OutPutInitialIndex = PageLockBufferStartIndex[mRecindex] * Bufferlength;// BufferIndex = mRecindex; DatafullFlag = true; mRecindex++; gExtrackPointLock.unlock(); break; } mRecindex++; gExtrackPointLock.unlock(); if (ExtractPointSuccess) break; } // while (DatafullFlag) { if (img_index >= Bufferlength) //qwt { gExtrackPointLock.lock(); PageLockBufferWorking[BufferIndex] = false;//--workingfalse gExtrackPointLock.unlock(); PageLockBufferEmpty[BufferIndex] = true; // DatafullFlag = false; break; } if (Devpar.ImgChannelNum == 1) { for (int i = 0; i < CUDAStreams; i++) { Bufferoffset = long long(img_index + i*Devpar.PictureNum)* Devpar.ImgHeight * Devpar.ImgWidth; hipMemcpyAsync(DevPicGray[i], gHostBuffer[BufferIndex] + Bufferoffset, sizeof(unsigned char)* Devpar.ImgHeight * Devpar.ImgWidth*Devpar.PictureNum, hipMemcpyHostToDevice, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { // GrayMakeBorder << <mGrid1, 128, 0, CStreams[i] >> > (DevPicGray[i], DevPadding[i], Devpar); } } else if (Devpar.ImgChannelNum == 3)//-DevPicColor { for (int i = 0; i < CUDAStreams; i++) { Bufferoffset = long long(img_index + i*Devpar.PictureNum)*Devpar.ImgHeight * Devpar.ImgWidth*Devpar.ImgChannelNum; hipMemcpyAsync(DevPicColor[i], gHostBuffer[BufferIndex] + +Bufferoffset, sizeof(unsigned char)* Devpar.ImgHeight * Devpar.ImgWidth*Devpar.ImgChannelNum*Devpar.PictureNum, hipMemcpyHostToDevice, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++)//+padding { ColorMakeBorder << <mGrid1, 128, 0, CStreams[i] >> > (DevPicColor[i], DevPadding[i], Devpar); } } for (int i = 0; i < CUDAStreams; i++) { // Binarization << <mGrid1, 128, 0, CStreams[i] >> > (DevPadding[i], Dev2Val[i], DevCounter[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { // Dilation << <mGrid1, 128, 0, CStreams[i] >> > (Dev2Val[i], DevCounter[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { hipMemcpyAsync(Dev2Val[i], DevCounter[i], sizeof(unsigned char)* Devpar.ImgHeight *Devpar.ImgMakeborderWidth*Devpar.PictureNum, hipMemcpyDeviceToDevice, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { Erosion << <mGrid1, 128, 0, CStreams[i] >> > (Dev2Val[i], DevCounter[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { //GPUqwt GetRecInfo << <mGrid2, 128, 0, CStreams[i] >> > (gpRDevRecData[i], DevPadding[i], DevCounter[i], gpDevLength[i], gpDevArea[i], gpDevXpos[i], gpDevYpos[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { hipMemcpyAsync(gpHostLength[i], gpDevLength[i], sizeof(short)* gRecNum, hipMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { hipMemcpyAsync(gpHostArea[i], gpDevArea[i], sizeof(short)* gRecNum, hipMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { hipMemcpyAsync(gpHostXpos[i], gpDevXpos[i], sizeof(double)* gRecNum, hipMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { hipMemcpyAsync(gpHostYpos[i], gpDevYpos[i], sizeof(double)* gRecNum, hipMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { hipStreamSynchronize(CStreams[i]); } for (int j = 0, int i = 0; i < CUDAStreams; i++) { for (int k = 0; k < Devpar.PictureNum; k++) { int hostindex = 0; int headpos = myInfo.size(); CircleInfo headInfo; headInfo.index = OutPutInitialIndex + img_index + i;// headInfo.xpos = 99999; headInfo.ypos = 99999;//xpos ypos headInfo.area = 0; //area0 myInfo.push_back(headInfo); while (gpHostXpos[i][j] < (k + 1)*Devpar.ImgHeight&&j < gRecNum) { if (0 < gpHostXpos[i][j]) { hostindex++; CircleInfo temp; temp.index = hostindex; temp.length = gpHostLength[i][j]; temp.area = gpHostArea[i][j]; temp.xpos = gpHostXpos[i][j]; temp.ypos = gpHostYpos[i][j]; myInfo.push_back(temp); } j++; } myInfo[headpos].length = hostindex;// } } img_index += HardwarePar.CUDAStreamNum*Devpar.PictureNum; } // if (myInfo.size() > 0) { FILE* fp; sprintf_s(DataFilename, "%s\\%d.bin", path, OutPutInitialIndex); //3strFilename fp = fopen(DataFilename, "wb"); fwrite(&myInfo[0], sizeof(CircleInfo)*myInfo.size(), 1, fp); fclose(fp); } // if (DevUpdateRec[HardwarePar.DeviceID] == true) { for (int i = 0; i < CUDAStreams; i++) { hipMemcpy(gpRDevRecData[i], &gHostRecData[0], gRecNum * sizeof(RecData), hipMemcpyHostToDevice); } DevUpdateRec[HardwarePar.DeviceID] = false; } } for (int i = 0; i < CUDAStreams; i++) { hipFree(DevPicColor[i]); hipFree(DevPicGray[i]); hipFree(DevPadding[i]); hipFree(Dev2Val[i]); hipFree(DevCounter[i]); hipHostFree(gpHostLength[i]); hipHostFree(gpHostArea[i]); hipHostFree(gpHostXpos[i]); hipHostFree(gpHostYpos[i]); // hipFree(gpDevLength[i]); hipFree(gpDevArea[i]); hipFree(gpDevXpos[i]); hipFree(gpDevYpos[i]); hipFree(gpRDevRecData[i]); hipStreamDestroy(CStreams[i]); } } }; int RecR::mRecindex = 0; /*----------------------------------------------------------------------------*/ class RecUpData : public Runnable { public: Parameter Devpar;// ~RecUpData() { } void Run() { char strFilename[250]; // Devpar.ImgHeight = gStructVarible.ImgHeight; Devpar.ImgWidth = gStructVarible.ImgWidth; Devpar.Threshold = gStructVarible.Threshold; Devpar.LengthMin = gStructVarible.LengthMin; Devpar.LengthMax = gStructVarible.LengthMax; Devpar.AreaMin = gStructVarible.AreaMin; Devpar.AreaMax = gStructVarible.AreaMax; Devpar.PictureNum = gStructVarible.PictureNum; Devpar.RecPadding = gStructVarible.RecPadding; // const cv::Point directions[8] = { { 0, 1 },{ 1,1 },{ 1, 0 },{ 1, -1 },{ 0, -1 },{ -1, -1 },{ -1, 0 },{ -1, 1 } }; // unsigned char *ImgHostdata = new unsigned char[Devpar.ImgWidth* Devpar.ImgHeight*Devpar.PictureNum]; //qwtBUG unsigned char *m_ptr = new unsigned char[Devpar.ImgWidth* Devpar.ImgHeight*Devpar.PictureNum];// unsigned char *n_ptr = new unsigned char[Devpar.ImgWidth* Devpar.ImgHeight*Devpar.PictureNum];// unsigned char *c_ptr = new unsigned char[Devpar.ImgWidth* Devpar.ImgHeight*Devpar.PictureNum];// unsigned char *temp_ptr = new unsigned char[Devpar.ImgWidth* Devpar.ImgHeight*Devpar.PictureNum];// RecupdataInitialSuccessFlag = true; while (ExtractPointSuccess == false)//**************************qwt10.26 { if (HostUpdateRec)// { vector<RecData>myTempRec; memcpy(ImgHostdata, gRecupImgData, sizeof(unsigned char)*Devpar.ImgWidth* Devpar.ImgHeight*Devpar.PictureNum);// // for (int i = 0; i < Devpar.ImgHeight*Devpar.PictureNum; i++) { for (int j = 0; j < Devpar.ImgWidth; j++) { m_ptr[j + i * Devpar.ImgWidth] = ImgHostdata[j + i * Devpar.ImgWidth] > Devpar.Threshold ? 255 : 0; c_ptr[j + i * Devpar.ImgWidth] = m_ptr[j + i * Devpar.ImgWidth]; n_ptr[j + i * Devpar.ImgWidth] = m_ptr[j + i * Devpar.ImgWidth]; temp_ptr[j + i * Devpar.ImgWidth] = m_ptr[j + i * Devpar.ImgWidth]; } } // for (int i = 1; i < Devpar.ImgHeight*Devpar.PictureNum - 1; i++) for (int j = 1; j < Devpar.ImgWidth - 1; j++) { if (m_ptr[j + i * Devpar.ImgWidth] == 0) { if (m_ptr[j - 1 + (i - 1)*Devpar.ImgWidth] != 0 || m_ptr[j + (i - 1)*Devpar.ImgWidth] != 0 || m_ptr[j + 1 + (i - 1)*Devpar.ImgWidth] != 0 || m_ptr[j - 1 + i * Devpar.ImgWidth] != 0 || m_ptr[j + 1 + i * Devpar.ImgWidth] != 0 || m_ptr[j - 1 + (i + 1)*Devpar.ImgWidth] != 0 || m_ptr[j + (i + 1)*Devpar.ImgWidth] != 0 || m_ptr[j + 1 + (i + 1)*Devpar.ImgWidth] != 0) { n_ptr[j + i * Devpar.ImgWidth] = 255; c_ptr[j + i * Devpar.ImgWidth] = 255; temp_ptr[j + i * Devpar.ImgWidth] = 255; } } } // c_ptr for (int i = 1; i < Devpar.ImgHeight*Devpar.PictureNum - 1; i++) for (int j = 1; j < Devpar.ImgWidth - 1; j++) { if (n_ptr[j + i * Devpar.ImgWidth] != 0) { if (n_ptr[j + (i - 1)*Devpar.ImgWidth] != 0 && n_ptr[j - 1 + i * Devpar.ImgWidth] != 0 && n_ptr[j + 1 + i * Devpar.ImgWidth] != 0 && n_ptr[j + (i + 1)*Devpar.ImgWidth] != 0) { c_ptr[j + i * Devpar.ImgWidth] = 0; temp_ptr[j + i * Devpar.ImgWidth] = 0; } } } // short xmax; short xmin; short ymax; short ymin; // int i, j, counts = 0, curr_d = 0;//counts curr_dID short cLength; // for (i = 1; i < Devpar.ImgHeight*Devpar.PictureNum - 1; i++) for (j = 1; j < Devpar.ImgWidth - 1; j++) { // cv::Point b_pt = cv::Point(i, j); cv::Point c_pt = cv::Point(i, j); // if (255 == c_ptr[j + i * Devpar.ImgWidth]) { cLength = 1; xmin = xmax = i; ymin = ymax = j; /* bool first_t = false;*/ bool tra_flag = false;// c_ptr[j + i * Devpar.ImgWidth] = 0;// 0 while (!tra_flag)// { // for (counts = 0; counts < 8; counts++) { // if (curr_d >= 8) { curr_d -= 8; } if (curr_d < 0) { curr_d += 8; } // root c_pt = cv::Point(b_pt.x + directions[curr_d].x, b_pt.y + directions[curr_d].y); // if ((c_pt.x > 0) && (c_pt.x < Devpar.ImgHeight*Devpar.PictureNum - 1) && (c_pt.y > 0) && (c_pt.y < Devpar.ImgWidth - 1)) { // if (255 == c_ptr[c_pt.x*Devpar.ImgWidth + c_pt.y]) { // xmax = xmax > c_pt.x ? xmax : c_pt.x; ymax = ymax > c_pt.y ? ymax : c_pt.y; xmin = xmin < c_pt.x ? xmin : c_pt.x; ymin = ymin < c_pt.y ? ymin : c_pt.y; curr_d -= 2; // c_ptr[c_pt.x*Devpar.ImgWidth + c_pt.y] = 0; // b_pt:root b_pt.x = c_pt.x; b_pt.y = c_pt.y; cLength++; break; // for } } curr_d++; } // end for // 8 if (8 == counts) { // curr_d = 0; tra_flag = true; // if (cLength < Devpar.LengthMax && (cLength > Devpar.LengthMin)) { RecData tempRecData; int tempcount = 0; if (0.7<double(xmax - xmin) / double(ymax - ymin) < 1.5)/// { for (int k = xmin; k <= xmax; k++)//Height { tempcount += temp_ptr[(ymax + ymin) / 2 + k*Devpar.ImgWidth] > 0 ? 1 : 0; } for (int k = ymin; k <= ymax; k++)//width { tempcount += temp_ptr[k + (xmax + xmin) / 2 * Devpar.ImgWidth] > 0 ? 1 : 0; } if (tempcount <= 4) { if (xmin - Devpar.RecPadding < 0) tempRecData.RecXmin = 0; else tempRecData.RecXmin = xmin - Devpar.RecPadding; if (ymin - Devpar.RecPadding < 0) tempRecData.RecYmin = 0; else tempRecData.RecYmin = ymin - Devpar.RecPadding; if (xmax + Devpar.RecPadding > Devpar.ImgHeight*Devpar.PictureNum - 1) tempRecData.RecXmax = Devpar.ImgHeight*Devpar.PictureNum - 1; else tempRecData.RecXmax = xmax + Devpar.RecPadding; if (ymax + Devpar.RecPadding > Devpar.ImgWidth) tempRecData.RecYmax = Devpar.ImgWidth - 1; else tempRecData.RecYmax = ymax + Devpar.RecPadding; myTempRec.push_back(tempRecData); } } } break; } } // end if } // end while } // gSingleImgRecNum = myTempRec.size() / Devpar.PictureNum;// int rRecNum = (myTempRec.size() + 127) / 128 * 128; myTempRec.resize(gRecNum, RecData{ 0,0,0,0 }); if (gRecNum != 0) { memcpy(&gHostRecData[0], &myTempRec[0], sizeof(RecData)*gRecNum); for (int m = 0; m < HardwareParam.DeviceCount; m++) { DevUpdateRec[m] = true; } } HostUpdateRec = false; } } // delete[]ImgHostdata; delete[]m_ptr; delete[]n_ptr; delete[]c_ptr; delete[]temp_ptr; } }; /*--------------------------------------------------------------------*/ class TC : public Runnable { public: HardwareInfo param; // unsigned char* my_in; // needmemory memory; // needdata staticdata; static int mTCindex; unsigned char* total_malloc; // int pix_index; public: void mydelay(double sec)// { clock_t start_time, cur_time; start_time = clock(); do { cur_time = clock(); } while (double(cur_time - start_time) / CLOCKS_PER_SEC < sec); } /************************************************************************************************* Function: void Initialize() Description: Calls: hipMalloc()nppiDCTInitAlloc()hipMemcpyAsync()hipMallocPitch() nppiEncodeHuffmanSpecInitAlloc_JPEG()cuda Input: Output: ***************************************************************************************************/ void Initialize() { //hipMalloc((void**)&(this->my_in), imgHeight * imgWidth * sizeof(unsigned char) * 3); //my_in hipMalloc((void**)&(this->my_in), compress_old_Height * compress_old_Width * sizeof(unsigned char) * 3); nppiDCTInitAlloc(&(this->memory).pDCTState); //memory.pDCTState hipMalloc(&(this->staticdata).pdQuantizationTables, 64 * 4); //staticdata.pdQuantizationTables float nScaleFactor; nScaleFactor = 1.0f; int nMCUBlocksH = 0; int nMCUBlocksV = 0; quantityassgnment(); for (int i = 0; i < oFrameHeader.nComponents; ++i) { nMCUBlocksV = max(nMCUBlocksV, oFrameHeader.aSamplingFactors[i] & 0x0f); nMCUBlocksH = max(nMCUBlocksH, oFrameHeader.aSamplingFactors[i] >> 4); } Npp8u aZigzag[] = { 0, 1, 5, 6, 14, 15, 27, 28, 2, 4, 7, 13, 16, 26, 29, 42, 3, 8, 12, 17, 25, 30, 41, 43, 9, 11, 18, 24, 31, 40, 44, 53, 10, 19, 23, 32, 39, 45, 52, 54, 20, 22, 33, 38, 46, 51, 55, 60, 21, 34, 37, 47, 50, 56, 59, 61, 35, 36, 48, 49, 57, 58, 62, 63 }; for (int i = 0; i < 4; ++i) { Npp8u temp[64]; for (int k = 0; k < 32; ++k) { temp[2 * k + 0] = aQuantizationTables[i].aTable[aZigzag[k + 0]]; temp[2 * k + 1] = aQuantizationTables[i].aTable[aZigzag[k + 32]]; } hipMemcpyAsync((unsigned char *)(this->staticdata).pdQuantizationTables + i * 64, temp, 64, hipMemcpyHostToDevice); } float frameWidth = floor((float)oFrameHeader.nWidth * (float)nScaleFactor); float frameHeight = floor((float)oFrameHeader.nHeight * (float)nScaleFactor); (this->staticdata).oDstImageSize.width = (int)max(1.0f, frameWidth); (this->staticdata).oDstImageSize.height = (int)max(1.0f, frameHeight); size_t newPitch[3]; NppiSize oBlocks; for (int i = 0; i < oFrameHeader.nComponents; ++i) //DCTHuffman { //NppiSize oBlocks; NppiSize oBlocksPerMCU = { oFrameHeader.aSamplingFactors[i] & 0x0f, oFrameHeader.aSamplingFactors[i] >> 4 }; oBlocks.width = (int)ceil(((this->staticdata).oDstImageSize.width + 7) / 8 * static_cast<float>(oBlocksPerMCU.width) / nMCUBlocksH); oBlocks.width = DivUp(oBlocks.width, oBlocksPerMCU.width) * oBlocksPerMCU.width; oBlocks.height = (int)ceil(((this->staticdata).oDstImageSize.height + 7) / 8 * static_cast<float>(oBlocksPerMCU.height) / nMCUBlocksV); oBlocks.height = DivUp(oBlocks.height, oBlocksPerMCU.height) * oBlocksPerMCU.height; (this->staticdata).aDstSize[i].width = oBlocks.width * 8; (this->staticdata).aDstSize[i].height = oBlocks.height * 8; } // Scale to target image size // Assume we only deal with 420 images. int aSampleFactor[3] = { 1, 2, 2 }; (this->memory).nScanSize = (this->staticdata).oDstImageSize.width * (this->staticdata).oDstImageSize.height * 2; (this->memory).nScanSize = (this->memory).nScanSize > (4 << 20) ? (this->memory).nScanSize : (4 << 20); hipMalloc(&(this->memory).pDScan, (this->memory).nScanSize); //memory.pDScan nppiEncodeHuffmanGetSize((this->staticdata).aDstSize[0], 3, &(this->memory).nTempSize); hipMalloc(&(this->memory).pDJpegEncoderTemp, (this->memory).nTempSize); //memory.pDJpegEncoderTemp for (int j = 0; j < 3; j++) { size_t nPitch1; hipMallocPitch(&(this->memory).pDCT[j], &nPitch1, oBlocks.width * 64 * sizeof(Npp16s), oBlocks.height); //memory.pDCT (this->memory).DCTStep[j] = static_cast<Npp32s>(nPitch1); hipMallocPitch(&(this->memory).pDImage[j], &nPitch1, (this->staticdata).aDstSize[j].width, (this->staticdata).aDstSize[j].height); //memory.pDImage (this->memory).DImageStep[j] = static_cast<Npp32s>(nPitch1); dataduiqi[j] = nPitch1; } for (int i = 0; i < 3; ++i) //staticdata.apDHuffmanDCTable staticdata.apDHuffmanACTable { nppiEncodeHuffmanSpecInitAlloc_JPEG(pHuffmanDCTables[(oScanHeader.aHuffmanTablesSelector[i] >> 4)].aCodes, nppiDCTable, &(this->staticdata).apDHuffmanDCTable[i]); nppiEncodeHuffmanSpecInitAlloc_JPEG(pHuffmanACTables[(oScanHeader.aHuffmanTablesSelector[i] & 0x0f)].aCodes, nppiACTable, &(this->staticdata).apDHuffmanACTable[i]); } for (int iComponent = 0; iComponent < 2; ++iComponent) { (this->memory).hpCodesDC[iComponent] = pHuffmanDCTables[iComponent].aCodes; (this->memory).hpCodesAC[iComponent] = pHuffmanACTables[iComponent].aCodes; (this->memory).hpTableDC[iComponent] = pHuffmanDCTables[iComponent].aTable; (this->memory).hpTableAC[iComponent] = pHuffmanACTables[iComponent].aTable; } } /************************************************************************************************* Function: void process() Description: jpegNPPnppiDCTQuantFwd8x8LS_JPEG_8u16s_C1R_NEW memory.pDImageYUVDCTmemory.pDCT jpegNPPnppiEncodeOptimizeHuffmanScan_JPEG_8u16s_P3R DCTmemory.pDCTmemory.pDScan Calls: nppiDCTQuantFwd8x8LS_JPEG_8u16s_C1R_NEW()nppiEncodeOptimizeHuffmanScan_JPEG_8u16s_P3R()cuda Input: Output: ***************************************************************************************************/ void process() { for (int i = 0; i < 3; ++i) //YCbCrDCT { nppiDCTQuantFwd8x8LS_JPEG_8u16s_C1R_NEW((this->memory).pDImage[i], (this->memory).DImageStep[i], (this->memory).pDCT[i], (this->memory).DCTStep[i], (this->staticdata).pdQuantizationTables + oFrameHeader.aQuantizationTableSelector[i] * 64, (this->staticdata).aDstSize[i], (this->memory).pDCTState); } nppiEncodeOptimizeHuffmanScan_JPEG_8u16s_P3R((this->memory).pDCT, (this->memory).DCTStep, // 0, oScanHeader.nSs, oScanHeader.nSe, oScanHeader.nA >> 4, oScanHeader.nA & 0x0f, (this->memory).pDScan, &(this->memory).nScanLength, (this->memory).hpCodesDC, (this->memory).hpTableDC, (this->memory).hpCodesAC, (this->memory).hpTableAC, (this->staticdata).apDHuffmanDCTable, (this->staticdata).apDHuffmanACTable, (this->staticdata).aDstSize, (this->memory).pDJpegEncoderTemp); } /************************************************************************************************* Function: void writedisk() Description: jpg JFIFaQuantizationTablesoFrameHeader oScanHeaderjpg memory.pDScan.jpg Calls: writeMarker()writeJFIFTag()writeQuantizationTable()writeHuffmanTable() useful.h Input: Output: ***************************************************************************************************/ //void writedisk(char* OutputFile) void writedisk(int picture_num, Package* a, int bag_index) { unsigned char *pDstJpeg = new unsigned char[(this->memory).nScanSize]; //.jpg unsigned char *pDstOutput = pDstJpeg; oFrameHeader.nWidth = (this->staticdata).oDstImageSize.width; oFrameHeader.nHeight = (this->staticdata).oDstImageSize.height; writeMarker(0x0D8, pDstOutput); writeJFIFTag(pDstOutput); writeQuantizationTable(aQuantizationTables[0], pDstOutput); // writeQuantizationTable(aQuantizationTables[1], pDstOutput); writeFrameHeader(oFrameHeader, pDstOutput); writeHuffmanTable(pHuffmanDCTables[0], pDstOutput); // writeHuffmanTable(pHuffmanACTables[0], pDstOutput); writeHuffmanTable(pHuffmanDCTables[1], pDstOutput); writeHuffmanTable(pHuffmanACTables[1], pDstOutput); writeScanHeader(oScanHeader, pDstOutput); hipMemcpy(pDstOutput, (this->memory).pDScan, (this->memory).nScanLength, hipMemcpyDeviceToHost); pDstOutput += (this->memory).nScanLength; writeMarker(0x0D9, pDstOutput); char szOutputFiler[100]; sprintf_s(szOutputFiler, "%s\\%d.jpg", gStructVarible.ImgSavePath, picture_num); memcpy(total_malloc + pix_index, pDstJpeg, static_cast<int>(pDstOutput - pDstJpeg)); //.jpgtotal_malloc pix_index += static_cast<int>(pDstOutput - pDstJpeg); //a->Form_one_head(bag_index / gStructVarible.PictureNum, szOutputFiler, pDstOutput - pDstJpeg); a->Form_one_head(bag_index / gStructVarible.PictureNum, picture_num, pDstOutput - pDstJpeg); //.jpg bag_index / gStructVarible.PictureNum //{ //Write result to file. //std::ofstream outputFile1(OutputFile, ios::out | ios::binary); //outputFile1.write(reinterpret_cast<const char *>(pDstJpeg), static_cast<int>(pDstOutput - pDstJpeg)); //} delete[] pDstJpeg; } /************************************************************************************************* Function: void memoryfree() Description: Calls: hipFree()nppiEncodeHuffmanSpecFree_JPEG()nppiDCTFree() cuda Input: Output: ***************************************************************************************************/ void memoryfree() // { hipFree(this->my_in); for (int i = 0; i < 3; ++i) { hipFree((this->memory).pDCT[i]); hipFree((this->memory).pDImage[i]); nppiEncodeHuffmanSpecFree_JPEG((this->staticdata).apDHuffmanDCTable[i]); nppiEncodeHuffmanSpecFree_JPEG((this->staticdata).apDHuffmanACTable[i]); } nppiDCTFree((this->memory).pDCTState); hipFree((this->memory).pDJpegEncoderTemp); hipFree((this->memory).pDScan); hipFree((this->staticdata).pdQuantizationTables); } ~TC() {} /************************************************************************************************* Function: Run() Description: T Calls: Initialize()RGBtoYUV <<<blocks, threads >>>process() writedisk(szOutputFile)memoryfree() Input: Output: ***************************************************************************************************/ void Run() { char ImgoutputPath[255]; total_malloc = new unsigned char[100000000]; pix_index = 0; char szOutputFile[100]; clock_t start, end; int img_index; // int mFlagIndex = 0; int OutPutInitialIndex = 0; //Bin int Bufferoffset = 0; // bool DatafullFlag = false; //trueGPU hipSetDevice((this->param).GpuId); this->Initialize(); cout << "T GPU " << param.GpuId << " initial success!" << endl; while (!ExtractPointSuccess) { mydelay(0.01); img_index = 0; // Bufferoffset = 0; // while (true) { gComressReadDataLock.lock(); mTCindex = mTCindex % (HardwareParam.DeviceCount + 1); if (gComressionBufferEmpty[mTCindex] == false && gComressionBufferWorking[mTCindex] == false) { //-- gComressionBufferWorking[mTCindex] = true; OutPutInitialIndex = gComressionBufferStartIndex[mTCindex] * Bufferlength;// mFlagIndex = mTCindex; DatafullFlag = true; mTCindex++; gComressReadDataLock.unlock(); break; } mTCindex++; gComressReadDataLock.unlock(); if (ExtractPointSuccess) break; } start = clock(); sprintf_s(ImgoutputPath, "%s\\%d.bin", gStructVarible.ImgSavePath, OutPutInitialIndex); cout << ImgoutputPath << endl; //Package data_bag(ImgoutputPath, Bufferlength / gStructVarible.PictureNum); Package data_bag(ImgoutputPath); data_bag.Package_init(Bufferlength / gStructVarible.PictureNum); //pImg while (DatafullFlag) { if (img_index >= Bufferlength) { end = clock(); gComressReadDataLock.lock(); gComressionBufferWorking[mFlagIndex] = false; gComressReadDataLock.unlock(); gComressionBufferEmpty[mFlagIndex] = true; DatafullFlag = false; compress_write_lock.lock(); data_bag.file.open(data_bag.Fname, ios::out | ios::binary); //50.jpg //data_bag.Form_total_head(); //50 data_bag.Form_total_head(compress_imgWidth, compress_imgHeight, gStructVarible.PictureNum, OutPutInitialIndex); data_bag.file.write(data_bag.head_cache, data_bag.head_bias); // data_bag.file.write(reinterpret_cast<const char *>(total_malloc), static_cast<int>(pix_index)); // data_bag.file.close(); //data_bag.UnPack(data_bag.Fname); compress_write_lock.unlock(); memset(total_malloc, 0, 100000000); // pix_index = 0; // break; } //sprintf_s(szOutputFile, "%s\\%d.jpg", gStructVarible.ImgSavePath, OutPutInitialIndex + img_index); int picture_index = OutPutInitialIndex + img_index; Bufferoffset = gStructVarible.ImgWidth * gStructVarible.ImgHeight * gStructVarible.PictureNum * 3; hipMemcpy(this->my_in, gHostComressiongBuffer[mFlagIndex] + Bufferoffset, compress_old_Width * compress_old_Height * sizeof(unsigned char) * 3, hipMemcpyHostToDevice); RGBtoYUV << <blocks, threads >> > (this->my_in, (this->memory).pDImage[0], (this->memory).pDImage[2], (this->memory).pDImage[1], compress_imgHeight, compress_imgWidth, dataduiqi[0], compress_old_Height, compress_old_Width); this->process(); this->writedisk(picture_index, &data_bag, img_index); //img_index++; img_index = img_index + gStructVarible.PictureNum; //picture_index } } delete[] total_malloc; this->memoryfree(); } }; int TC::mTCindex = 0; //----------------------------------------------------------------------------// // class T : public Runnable //TC { public: HardwareInfo param; // gpuneedmemory memory[GRAYCompressStreams]; static int mTindex; needconstdata staticdata; // RIM ImageSize; // size_t Org_Pitch; // int h_MCUtotal; //8*8 hipStream_t stream[GRAYCompressStreams]; //CUDA int stridef; cpuneedmemory cpumemory[GRAYCompressStreams]; //CPU unsigned char* total_malloc; // int pix_index; public: void mydelay(double sec) // { clock_t start_time, cur_time; start_time = clock(); do { cur_time = clock(); } while (double(cur_time - start_time) / CLOCKS_PER_SEC < sec); } //************************************************************************************************* //*Function: void Initialize() //*Description: //*Calls: hipMalloc()hipMemcpyAsync()hipMallocPitch()cuda //*Input: ImageSize //*Output: //*************************************************************************************************** void Initialize() { size_t nPitch; this->stridef = ALIGN(compress_old_Width, 4); (this->ImageSize).width = ALIGN(compress_old_Width, 8); (this->ImageSize).height = ALIGN(compress_old_Height, 8); int h_MCUtotal = (this->ImageSize).height*(this->ImageSize).width / 64; int ARRAY_SIZE = ALIGN(h_MCUtotal + 1025, 1024); int ARRAY_SIZE1 = ALIGN(h_MCUtotal / 1024 + 1025, 1024); // (this->staticdata).nScanSize = (this->ImageSize).width * (this->ImageSize).height * 2; (this->staticdata).nScanSize = (this->staticdata).nScanSize > (10 << 20) ? (this->staticdata).nScanSize : (10 << 20); for (int i = 0; i < GRAYCompressStreams; i++) { // hipMallocPitch((void **)&(this->memory[i].d_bsrc), &(this->ImageSize.StrideF), (this->ImageSize).width * sizeof(BYTE), (this->ImageSize).height); //my_in hipMallocPitch((void **)&(this->memory[i].d_ydst), &nPitch, (this->ImageSize).width * (this->ImageSize).height * sizeof(BSI16), 1); hipMallocPitch((void **)&(this->memory[i].d_JPEGdata), &nPitch, (this->ImageSize).width * sizeof(BYTE)*(this->ImageSize).height, 1); hipMalloc((void **)&(this->memory[i].last_JPEGdata), (10 << 20)); hipMalloc((void **)&(this->memory[i].prefix_num), ARRAY_SIZE * sizeof(int)); hipMalloc((void **)&(this->memory[i].last_prefix_num), ARRAY_SIZE * sizeof(int)); hipMalloc((void **)&(this->memory[i].dc_component), ARRAY_SIZE * sizeof(int)); hipMalloc((void **)&(this->memory[i].d_blocksum), 768 * sizeof(int)); hipMalloc((void **)&(this->memory[i].d_datalen), sizeof(int)); //CUDA hipStreamCreate(&(this->stream[i])); //CPU //hipHostMalloc((BYTE**)&(this->cpumemory[i]).pDstJpeg, (this->staticdata).nScanSize, hipHostMallocDefault); // (this->cpumemory[i]).pDstJpeg = new unsigned char[(this->staticdata).nScanSize]; this->cpumemory[i].pDstOutput = this->cpumemory[i].pDstJpeg; } //--------------------------------------------- hipMalloc(&(this->staticdata).DEV_STD_QUANT_TAB_LUMIN, 64 * sizeof(float)); hipMalloc(&(this->staticdata).DEV_ZIGZAG, 64 * sizeof(int)); { //---------------------------------------------------- float temp[64]; for (int i = 0; i<64; i++) { temp[i] = 1.0f / (float)STD_QUANT_TAB_LUMIN[i] * C_norm * C_norm; } hipMemcpyAsync((this->staticdata).DEV_STD_QUANT_TAB_LUMIN, temp, 64 * sizeof(float), hipMemcpyHostToDevice); } hipMemcpyAsync((this->staticdata).DEV_ZIGZAG, aZIGZAG, 64 * sizeof(float), hipMemcpyHostToDevice); { //----------------huffman GPUjpeg_huffman_encoder_value_init_kernel << <32, 256 >> >(); // 8192 threads total // GPUHuffman ( CC >= 2.0) uint32_t gpujpeg_huffman_cpu_lut[(256 + 1) * 4]; memset(gpujpeg_huffman_cpu_lut, 0, (256 + 1) * 4 * sizeof(uint32_t)); Newhuffman_table_init(gpujpeg_huffman_cpu_lut + 257 * 0, STD_HUFTAB_LUMIN_AC, true); Newhuffman_table_init(gpujpeg_huffman_cpu_lut + 257 * 1, STD_HUFTAB_LUMIN_DC, false); Newhuffman_table_init(gpujpeg_huffman_cpu_lut + 257 * 2, STD_HUFTAB_CHROM_AC, true); Newhuffman_table_init(gpujpeg_huffman_cpu_lut + 257 * 3, STD_HUFTAB_CHROM_DC, false); hipMemcpyToSymbol(gpujpeg_huffman_gpu_tab, gpujpeg_huffman_cpu_lut, (256 + 1) * 4 * sizeof(*gpujpeg_huffman_gpu_tab), 0, hipMemcpyHostToDevice ); } } //************************************************************************************************** //**Function: void process() //**Description: //**Input: Size //**Output: //*************************************************************************************************** void process() { const int ARRAY_SIZE = ImageSize.width * ImageSize.height; const int h_MCUtotal = ARRAY_SIZE / 64; //8*8MCU const int Code_blocks = (h_MCUtotal + CODE_THREADS - 1) / CODE_THREADS; int Blocksums; int prexsum_blocks = 1; int prexsum_threads = (h_MCUtotal - 1) / CODE_THREADS; //prefix_sum int preSum_Blocks = (h_MCUtotal + 1023) / 1024; //DCT dim3 DCT_blocks((ImageSize.width + 63) / DCT_BLOCK_WIDTH, ImageSize.height / DCT_BLOCK_HEIGHT); dim3 DCT_threads(8, 32 / 8, 2); dim3 Encode_thread(THREAD_WARP, 4); dim3 Encode_Blocks(gpujpeg_huffman_encoder_grid_size((h_MCUtotal + 3) / 4)); for (int i = 0; i < GRAYCompressStreams; i++) { CUDA_DCT8_kernel << <DCT_blocks, DCT_threads, 0, this->stream[i] >> >(this->memory[i].d_ydst, this->memory[i].d_bsrc, ImageSize, this->staticdata.DEV_ZIGZAG, this->staticdata.DEV_STD_QUANT_TAB_LUMIN); } for (int i = 0; i < GRAYCompressStreams; i++) { Data_codelength_kernel << <Encode_Blocks, Encode_thread, 0, this->stream[i] >> > (this->memory[i].d_ydst, h_MCUtotal, this->memory[i].d_JPEGdata, this->memory[i].prefix_num, 1, 0); //mcu work_efficient_PrefixSum_kernel << <preSum_Blocks, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].prefix_num, this->memory[i].dc_component); if (h_MCUtotal <= PRESUM_THREADS * 512) { work_efficient_BlockUp_kernel << <1, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].dc_component); work_efficient_Adds_kernel << <(h_MCUtotal + 511) / 512, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].dc_component, this->memory[i].prefix_num); } else { work_efficient_PrefixSum_kernel << < ((h_MCUtotal - 1) / 512 + 1023) / 1024, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].dc_component, this->memory[i].d_blocksum); work_efficient_BlockUp_kernel << <1, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].d_blocksum); work_efficient_Adds_kernel << <((h_MCUtotal + 511) / 512 + 511) / 512, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].d_blocksum, this->memory[i].dc_component); work_efficient_Adds_kernel << <(h_MCUtotal + 511) / 512, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].dc_component, this->memory[i].prefix_num); } // data_shift_kernel << <Code_blocks, CODE_THREADS, 0, this->stream[i] >> >(this->memory[i].d_JPEGdata, this->memory[i].prefix_num, h_MCUtotal, this->memory[i].d_datalen, this->memory[i].dc_component, this->memory[i].last_prefix_num); //MCU BYTE work_efficient_PrefixSum_kernel << <preSum_Blocks, PRESUM_THREADS, 0, this->stream[i] >> > (this->memory[i].last_prefix_num, this->memory[i].dc_component); if (h_MCUtotal <= PRESUM_THREADS * 512) { work_efficient_BlockUp_kernel << <1, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].dc_component); work_efficient_Adds_kernel << <(h_MCUtotal + 511) / 512, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].dc_component, this->memory[i].last_prefix_num); } else { work_efficient_PrefixSum_kernel << < ((h_MCUtotal - 1) / 512 + 1023) / 1024, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].dc_component, this->memory[i].d_blocksum); work_efficient_BlockUp_kernel << <1, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].d_blocksum); work_efficient_Adds_kernel << <((h_MCUtotal + 511) / 512 + 511) / 512, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].d_blocksum, this->memory[i].dc_component); work_efficient_Adds_kernel << <(h_MCUtotal + 511) / 512, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].dc_component, this->memory[i].last_prefix_num); } hipMemsetAsync(this->memory[i].last_JPEGdata, 0, (10 << 20), this->stream[i]); Data_encodelater1_kernel << <Code_blocks, CODE_THREADS, 0, this->stream[i] >> >(this->memory[i].last_prefix_num, this->memory[i].d_JPEGdata, this->memory[i].last_JPEGdata, h_MCUtotal, this->memory[i].d_datalen); // hipMemcpyAsync(&this->cpumemory[i].dst_JPEGdatalength, (this->memory[i]).d_datalen, sizeof(int), hipMemcpyDeviceToHost, this->stream[i]); } } void writedisk(int picture_num, Package* a, int bag_index) { for (int i = 0; i < GRAYCompressStreams; i++) { this->cpumemory[i].pDstOutput = this->cpumemory[i].pDstJpegDataStart; // hipMemcpyAsync(this->cpumemory[i].pDstOutput, this->memory[i].last_JPEGdata, this->cpumemory[i].dst_JPEGdatalength, hipMemcpyDeviceToHost, this->stream[i]); //-------------Stream hipStreamSynchronize(this->stream[i]); this->cpumemory[i].pDstOutput += this->cpumemory[i].dst_JPEGdatalength; writeMarker(0x0D9, this->cpumemory[i].pDstOutput); //char szOutputFiler[100]; //sprintf_s(szOutputFiler, "%s\\%d.jpg", gStructVarible.ImgSavePath, picture_num); memcpy(total_malloc + pix_index, this->cpumemory[i].pDstJpeg, static_cast<int>(this->cpumemory[i].pDstOutput - this->cpumemory[i].pDstJpeg)); pix_index += static_cast<int>(this->cpumemory[i].pDstOutput - this->cpumemory[i].pDstJpeg); a->Form_one_head(bag_index / gStructVarible.PictureNum, picture_num, this->cpumemory[i].pDstOutput - this->cpumemory[i].pDstJpeg); picture_num = picture_num + gStructVarible.PictureNum; bag_index = bag_index + gStructVarible.PictureNum; } } void WriteJpgheader() { for (int i = 0; i < GRAYCompressStreams; i++) { writeMarker(0x0D8, this->cpumemory[i].pDstOutput); writeMarker(0x0DB, this->cpumemory[i].pDstOutput); writeWords(67, this->cpumemory[i].pDstOutput); writeChar(0, this->cpumemory[i].pDstOutput); for (int j = 0; j < 64; j++) { writeChar(STD_QUANT_TAB_LUMIN[ZIGZAG[j]], this->cpumemory[i].pDstOutput); } writeMarker(0x0DB, this->cpumemory[i].pDstOutput); writeWords(67, this->cpumemory[i].pDstOutput); writeChar(1, this->cpumemory[i].pDstOutput); for (int j = 0; j < 64; j++) { writeChar(STD_QUANT_TAB_CHROM[ZIGZAG[j]], this->cpumemory[i].pDstOutput); } writeMarker(0x0C0, this->cpumemory[i].pDstOutput); unsigned short len = 2 + 1 + 2 + 2 + 1 + 3 * 3; //3 writeWords(len, this->cpumemory[i].pDstOutput); writeChar(8, this->cpumemory[i].pDstOutput); writeWords(compress_old_Height, this->cpumemory[i].pDstOutput); writeWords(compress_old_Width, this->cpumemory[i].pDstOutput); writeChar(3, this->cpumemory[i].pDstOutput); writeChar(1, this->cpumemory[i].pDstOutput); writeChar((1 << 0) | (1 << 4), this->cpumemory[i].pDstOutput); writeChar(0, this->cpumemory[i].pDstOutput); writeChar(2, this->cpumemory[i].pDstOutput); writeChar((1 << 0) | (1 << 4), this->cpumemory[i].pDstOutput); writeChar(1, this->cpumemory[i].pDstOutput); writeChar(3, this->cpumemory[i].pDstOutput); writeChar((1 << 0) | (1 << 4), this->cpumemory[i].pDstOutput); writeChar(1, this->cpumemory[i].pDstOutput); //********************************************************************************************* // output DHT AC 0xC4 (Huffman) writeMarker(0x0C4, this->cpumemory[i].pDstOutput); len = 2 + 1 + 16 + 162; writeWords(len, this->cpumemory[i].pDstOutput); writeChar(0 + 0x10, this->cpumemory[i].pDstOutput); memcpy(this->cpumemory[i].pDstOutput, STD_HUFTAB_LUMIN_AC, len - 3); this->cpumemory[i].pDstOutput += len - 3; writeMarker(0x0C4, this->cpumemory[i].pDstOutput); len = 2 + 1 + 16 + 162; writeWords(len, this->cpumemory[i].pDstOutput); writeChar(1 + 0x10, this->cpumemory[i].pDstOutput); memcpy(this->cpumemory[i].pDstOutput, STD_HUFTAB_CHROM_AC, len - 3); this->cpumemory[i].pDstOutput += len - 3; // output DHT DC 0xC4 (Huffman) writeMarker(0x0C4, this->cpumemory[i].pDstOutput); len = 2 + 1 + 16 + 12; writeWords(len, this->cpumemory[i].pDstOutput); writeChar(0 + 0x00, this->cpumemory[i].pDstOutput); memcpy(this->cpumemory[i].pDstOutput, STD_HUFTAB_LUMIN_DC, len - 3); this->cpumemory[i].pDstOutput += len - 3; writeMarker(0x0C4, this->cpumemory[i].pDstOutput); len = 2 + 1 + 16 + 12; writeWords(len, this->cpumemory[i].pDstOutput); writeChar(1 + 0x00, this->cpumemory[i].pDstOutput); memcpy(this->cpumemory[i].pDstOutput, STD_HUFTAB_CHROM_DC, len - 3); this->cpumemory[i].pDstOutput += len - 3; // output SOS 0xDA len = 2 + 1 + 2 * 3 + 3; writeMarker(0x0DA, this->cpumemory[i].pDstOutput); writeWords(len, this->cpumemory[i].pDstOutput); writeChar(3, this->cpumemory[i].pDstOutput); writeChar(1, this->cpumemory[i].pDstOutput); writeChar((0 << 0) | (0 << 4), this->cpumemory[i].pDstOutput); writeChar(2, this->cpumemory[i].pDstOutput); writeChar((1 << 0) | (1 << 4), this->cpumemory[i].pDstOutput); writeChar(3, this->cpumemory[i].pDstOutput); writeChar((1 << 0) | (1 << 4), this->cpumemory[i].pDstOutput); writeChar(0x00, this->cpumemory[i].pDstOutput); writeChar(0x3f, this->cpumemory[i].pDstOutput); writeChar(0x00, this->cpumemory[i].pDstOutput); this->cpumemory[i].pDstJpegDataStart = this->cpumemory[i].pDstOutput; } } // void memoryfree() { for (int i = 0; i < GRAYCompressStreams; i++) { // hipFree(this->memory[i].d_bsrc); hipFree(this->memory[i].d_ydst); hipFree(this->memory[i].d_JPEGdata); hipFree(this->memory[i].last_JPEGdata); hipFree(this->memory[i].prefix_num); hipFree(this->memory[i].last_prefix_num); hipFree(this->memory[i].dc_component); hipFree(this->memory[i].d_blocksum); hipFree(this->memory[i].d_datalen); //hipFree(this->cpumemory[i].pDstJpeg); delete[] this->cpumemory[i].pDstJpeg; } hipFree(this->staticdata.DEV_STD_QUANT_TAB_LUMIN); hipFree(this->staticdata.DEV_ZIGZAG); } ~T() {} void Run() { char ImgoutputPath[255]; total_malloc = new unsigned char[100000000]; pix_index = 0; clock_t start, end, end2; int img_index;// int cudaStreams_imgindex = 0; // int mFlagIndex = 0; int OutPutInitialIndex = 0; //Bin int Bufferoffset = 0; // bool DatafullFlag = false;//trueGPU //------------------------------------------------------------------------------------- cv::Mat img1(5120, 5120, CV_8UC1); hipSetDevice((this->param).GpuId); this->Initialize(); cout << "T GPU " << param.GpuId << " initial success!" << endl; WriteJpgheader(); while (!ExtractPointSuccess) { mydelay(0.01); img_index = 0;// Bufferoffset = 0; // while (true)// { gComressReadDataLock.lock(); mTindex = mTindex % (HardwareParam.DeviceCount + 1); if (gComressionBufferEmpty[mTindex] == false && gComressionBufferWorking[mTindex] == false) { //-- gComressionBufferWorking[mTindex] = true; OutPutInitialIndex = gComressionBufferStartIndex[mTindex] * Bufferlength;// mFlagIndex = mTindex; DatafullFlag = true; mTindex++; gComressReadDataLock.unlock(); break; } mTindex++; gComressReadDataLock.unlock(); if (ExtractPointSuccess) break; } start = clock(); sprintf_s(ImgoutputPath, "%s\\%d.bin", gStructVarible.ImgSavePath, OutPutInitialIndex); Package data_bag(ImgoutputPath); data_bag.Package_init(Bufferlength / gStructVarible.PictureNum); //pImg while (DatafullFlag) { if (img_index >= Bufferlength) { end = clock(); gComressReadDataLock.lock(); gComressionBufferWorking[mFlagIndex] = false; gComressReadDataLock.unlock(); gComressionBufferEmpty[mFlagIndex] = true; DatafullFlag = false; // compress_write_lock.lock(); data_bag.file.open(data_bag.Fname, ios::out | ios::binary); //data_bag.Form_total_head(); data_bag.Form_total_head(compress_imgWidth, compress_imgHeight, gStructVarible.PictureNum, OutPutInitialIndex); //cout << OutPutInitialIndex << endl; data_bag.file.write(data_bag.head_cache, data_bag.head_bias); data_bag.file.write(reinterpret_cast<const char *>(total_malloc), static_cast<int>(pix_index)); data_bag.file.close(); //data_bag.UnPack(data_bag.Fname); compress_write_lock.unlock(); memset(total_malloc, 0, 100000000); pix_index = 0; end2 = clock(); //cout << "T GPU " << param.GpuId << " Index" << OutPutInitialIndex << " " << double(end - start) / CLOCKS_PER_SEC <<" "<< double(end2 - start) / CLOCKS_PER_SEC<< endl; break; } int picture_index = OutPutInitialIndex + img_index; //Bufferoffset = gStructVarible.ImgWidth * gStructVarible.ImgHeight * gStructVarible.PictureNum; Bufferoffset = gStructVarible.ImgWidth * gStructVarible.ImgHeight * img_index; //GPU for (int i = 0; i < GRAYCompressStreams; i++) { hipMemcpy2DAsync(this->memory[i].d_bsrc, ImageSize.StrideF, gHostComressiongBuffer[mFlagIndex] + Bufferoffset, ImageSize.width * sizeof(unsigned char), ImageSize.width * sizeof(unsigned char), ImageSize.height, hipMemcpyHostToDevice, this->stream[i]); Bufferoffset += gStructVarible.ImgWidth * gStructVarible.ImgHeight * gStructVarible.PictureNum; } this->process(); this->writedisk(picture_index, &data_bag, img_index); img_index = img_index + gStructVarible.PictureNum * GRAYCompressStreams; } } delete[] total_malloc; this->memoryfree(); for (int i = 0; i < GRAYCompressStreams; i++) //-------------CUDA hipStreamDestroy(this->stream[i]); } }; /*------------------------------------------------------------------*/ //npp //class T : public Runnable //TC //{ //public: // HardwareInfo param; // // needmemory memory; // needdata staticdata; // static int mTindex; // static int test_number; // unsigned char* total_malloc; // int pix_index; // // //public: // void mydelay(double sec)// // { // clock_t start_time, cur_time; // start_time = clock(); // do // { // cur_time = clock(); // } while (double(cur_time - start_time) / CLOCKS_PER_SEC < sec); // } // void Initialize() // { // //hipMalloc((void**)&(this->my_in), imgHeight * imgWidth * sizeof(unsigned char) * 3); // nppiDCTInitAlloc(&(this->memory).pDCTState); // hipMalloc(&(this->staticdata).pdQuantizationTables, 64 * 4); // // float nScaleFactor; // nScaleFactor = 1.0f; // int nMCUBlocksH = 0; // int nMCUBlocksV = 0; // quantityassgnment(); // // for (int i = 0; i < oFrameHeader.nComponents; ++i) // { // nMCUBlocksV = max(nMCUBlocksV, oFrameHeader.aSamplingFactors[i] & 0x0f); // nMCUBlocksH = max(nMCUBlocksH, oFrameHeader.aSamplingFactors[i] >> 4); // } // // // Npp8u aZigzag[] = { // 0, 1, 5, 6, 14, 15, 27, 28, // 2, 4, 7, 13, 16, 26, 29, 42, // 3, 8, 12, 17, 25, 30, 41, 43, // 9, 11, 18, 24, 31, 40, 44, 53, // 10, 19, 23, 32, 39, 45, 52, 54, // 20, 22, 33, 38, 46, 51, 55, 60, // 21, 34, 37, 47, 50, 56, 59, 61, // 35, 36, 48, 49, 57, 58, 62, 63 // }; // // for (int i = 0; i < 4; ++i) // { // Npp8u temp[64]; // // for (int k = 0; k < 32; ++k) // { // temp[2 * k + 0] = aQuantizationTables[i].aTable[aZigzag[k + 0]]; // temp[2 * k + 1] = aQuantizationTables[i].aTable[aZigzag[k + 32]]; // } // // hipMemcpyAsync((unsigned char *)(this->staticdata).pdQuantizationTables + i * 64, temp, 64, hipMemcpyHostToDevice); // // } // // float frameWidth = floor((float)oFrameHeader.nWidth * (float)nScaleFactor); // float frameHeight = floor((float)oFrameHeader.nHeight * (float)nScaleFactor); // // (this->staticdata).oDstImageSize.width = (int)max(1.0f, frameWidth); // (this->staticdata).oDstImageSize.height = (int)max(1.0f, frameHeight); // // size_t newPitch[3]; // NppiSize oBlocks; // // // for (int i = 0; i < oFrameHeader.nComponents; ++i) // { // //NppiSize oBlocks; // NppiSize oBlocksPerMCU = { oFrameHeader.aSamplingFactors[i] & 0x0f, oFrameHeader.aSamplingFactors[i] >> 4 }; // // oBlocks.width = (int)ceil(((this->staticdata).oDstImageSize.width + 7) / 8 * // static_cast<float>(oBlocksPerMCU.width) / nMCUBlocksH); // oBlocks.width = DivUp(oBlocks.width, oBlocksPerMCU.width) * oBlocksPerMCU.width; // // oBlocks.height = (int)ceil(((this->staticdata).oDstImageSize.height + 7) / 8 * // static_cast<float>(oBlocksPerMCU.height) / nMCUBlocksV); // oBlocks.height = DivUp(oBlocks.height, oBlocksPerMCU.height) * oBlocksPerMCU.height; // // (this->staticdata).aDstSize[i].width = oBlocks.width * 8; // (this->staticdata).aDstSize[i].height = oBlocks.height * 8; // } // // // // Scale to target image size // // Assume we only deal with 420 images. // int aSampleFactor[3] = { 1, 2, 2 }; // // (this->memory).nScanSize = (this->staticdata).oDstImageSize.width * (this->staticdata).oDstImageSize.height * 2; // (this->memory).nScanSize = (this->memory).nScanSize > (4 << 20) ? (this->memory).nScanSize : (4 << 20); // hipMalloc(&(this->memory).pDScan, (this->memory).nScanSize); // nppiEncodeHuffmanGetSize((this->staticdata).aDstSize[0], 3, &(this->memory).nTempSize); // hipMalloc(&(this->memory).pDJpegEncoderTemp, (this->memory).nTempSize); // // // for (int j = 0; j < 3; j++) { // size_t nPitch1; // hipMallocPitch(&(this->memory).pDCT[j], &nPitch1, oBlocks.width * 64 * sizeof(Npp16s), oBlocks.height); // (this->memory).DCTStep[j] = static_cast<Npp32s>(nPitch1); // //NPP_CHECK_CUDA(hipMallocPitch(&myImage1[j], &nPitch1, aSrcSize[j].width, aSrcSize[j].height)); // hipMallocPitch(&(this->memory).pDImage[j], &nPitch1, (this->staticdata).aDstSize[j].width, (this->staticdata).aDstSize[j].height); // (this->memory).DImageStep[j] = static_cast<Npp32s>(nPitch1); // dataduiqi[j] = nPitch1; // // } // for (int i = 0; i < 3; ++i) // { // nppiEncodeHuffmanSpecInitAlloc_JPEG(pHuffmanDCTables[(oScanHeader.aHuffmanTablesSelector[i] >> 4)].aCodes, nppiDCTable, &(this->staticdata).apDHuffmanDCTable[i]); // nppiEncodeHuffmanSpecInitAlloc_JPEG(pHuffmanACTables[(oScanHeader.aHuffmanTablesSelector[i] & 0x0f)].aCodes, nppiACTable, &(this->staticdata).apDHuffmanACTable[i]); // } // // for (int iComponent = 0; iComponent < 2; ++iComponent) // { // (this->memory).hpCodesDC[iComponent] = pHuffmanDCTables[iComponent].aCodes; // (this->memory).hpCodesAC[iComponent] = pHuffmanACTables[iComponent].aCodes; // (this->memory).hpTableDC[iComponent] = pHuffmanDCTables[iComponent].aTable; // (this->memory).hpTableAC[iComponent] = pHuffmanACTables[iComponent].aTable; // } // } // void process() // { // compress_process_lock.lock(); // nppiDCTQuantFwd8x8LS_JPEG_8u16s_C1R_NEW((this->memory).pDImage[0], (this->memory).DImageStep[0], // (this->memory).pDCT[0], (this->memory).DCTStep[0], // (this->staticdata).pdQuantizationTables + oFrameHeader.aQuantizationTableSelector[0] * 64, // (this->staticdata).aDstSize[0], // (this->memory).pDCTState); // compress_process_lock.unlock(); // // // nppiEncodeOptimizeHuffmanScan_JPEG_8u16s_P3R((this->memory).pDCT, (this->memory).DCTStep, // 0, oScanHeader.nSs, oScanHeader.nSe, oScanHeader.nA >> 4, oScanHeader.nA & 0x0f, // (this->memory).pDScan, &(this->memory).nScanLength, // (this->memory).hpCodesDC, (this->memory).hpTableDC, (this->memory).hpCodesAC, (this->memory).hpTableAC, // (this->staticdata).apDHuffmanDCTable, // (this->staticdata).apDHuffmanACTable, // (this->staticdata).aDstSize, // (this->memory).pDJpegEncoderTemp); // // // } // // //void writedisk(int picture_num, Package* a, int bag_index, unsigned char*total_malloc, int& pix_index) // void writedisk(int picture_num, Package* a, int bag_index) // { // unsigned char *pDstJpeg = new unsigned char[(this->memory).nScanSize]; // unsigned char *pDstOutput = pDstJpeg; // // oFrameHeader.nWidth = (this->staticdata).oDstImageSize.width; // oFrameHeader.nHeight = (this->staticdata).oDstImageSize.height; // // writeMarker(0x0D8, pDstOutput); // writeJFIFTag(pDstOutput); // writeQuantizationTable(aQuantizationTables[0], pDstOutput); // writeQuantizationTable(aQuantizationTables[1], pDstOutput); // writeFrameHeader(oFrameHeader, pDstOutput); // writeHuffmanTable(pHuffmanDCTables[0], pDstOutput); // writeHuffmanTable(pHuffmanACTables[0], pDstOutput); // writeHuffmanTable(pHuffmanDCTables[1], pDstOutput); // writeHuffmanTable(pHuffmanACTables[1], pDstOutput); // writeScanHeader(oScanHeader, pDstOutput); // // hipMemcpy(pDstOutput, (this->memory).pDScan, (this->memory).nScanLength, hipMemcpyDeviceToHost); // pDstOutput += (this->memory).nScanLength; // // writeMarker(0x0D9, pDstOutput); // // char szOutputFiler[100]; // sprintf_s(szOutputFiler, "%s\\%d.jpg", gStructVarible.ImgSavePath, picture_num); // memcpy(total_malloc + pix_index, pDstJpeg, static_cast<int>(pDstOutput - pDstJpeg)); // pix_index += static_cast<int>(pDstOutput - pDstJpeg); // a->Form_one_head(bag_index / gStructVarible.PictureNum, szOutputFiler, pDstOutput - pDstJpeg); // // delete[] pDstJpeg; // // //Write result to file. // //std::ofstream outputFile1(OutputFile, ios::out | ios::binary); // //outputFile1.write(reinterpret_cast<const char *>(pDstJpeg), static_cast<int>(pDstOutput - pDstJpeg)); // //delete[] pDstJpeg; // } // void memoryfree() // { // //hipFree(this->my_in); // for (int i = 0; i < 3; ++i) // { // hipFree((this->memory).pDCT[i]); // hipFree((this->memory).pDImage[i]); // nppiEncodeHuffmanSpecFree_JPEG((this->staticdata).apDHuffmanDCTable[i]); // nppiEncodeHuffmanSpecFree_JPEG((this->staticdata).apDHuffmanACTable[i]); // } // nppiDCTFree((this->memory).pDCTState); // hipFree((this->memory).pDJpegEncoderTemp); // hipFree((this->memory).pDScan); // hipFree((this->staticdata).pdQuantizationTables); // } // ~T() {} // void Run() // { // char ImgoutputPath[255]; // total_malloc = new unsigned char[100000000]; // pix_index = 0; // clock_t start, end, end2; // int img_index;// // int mFlagIndex = 0; // int OutPutInitialIndex = 0; //Bin // int Bufferoffset = 0;// // bool DatafullFlag = false;//trueGPU // //------------------------------------------------------------------------------------------------------------ // cv::Mat img1(5120, 5120, CV_8UC1); // hipSetDevice((this->param).GpuId); // this->Initialize(); // hipMemcpy2D((this->memory).pDImage[1], dataduiqi[1], gpHudata, compress_imgWidth * sizeof(unsigned char), compress_imgWidth * sizeof(unsigned char), compress_imgHeight, hipMemcpyHostToDevice); // hipMemcpy2D((this->memory).pDImage[2], dataduiqi[2], gpHvdata, compress_imgWidth * sizeof(unsigned char), compress_imgWidth * sizeof(unsigned char), compress_imgHeight, hipMemcpyHostToDevice); // // for (int i = 1; i < 3; ++i) // { // nppiDCTQuantFwd8x8LS_JPEG_8u16s_C1R_NEW((this->memory).pDImage[i], (this->memory).DImageStep[i], // (this->memory).pDCT[i], (this->memory).DCTStep[i], // (this->staticdata).pdQuantizationTables + oFrameHeader.aQuantizationTableSelector[i] * 64, // (this->staticdata).aDstSize[i], // (this->memory).pDCTState); // } // cout << "T GPU " << param.GpuId << " initial success!" << endl; // // while (!ExtractPointSuccess) // { // mydelay(0.01); // img_index = 0;// // Bufferoffset = 0; // // // while (true)// // { // gComressReadDataLock.lock(); // mTindex = mTindex % (HardwareParam.DeviceCount + 1); // if (gComressionBufferEmpty[mTindex] == false && gComressionBufferWorking[mTindex] == false) // { // //-- // gComressionBufferWorking[mTindex] = true; // OutPutInitialIndex = gComressionBufferStartIndex[mTindex] * Bufferlength;// // mFlagIndex = mTindex; // DatafullFlag = true; // mTindex++; // gComressReadDataLock.unlock(); // break; // } // mTindex++; // gComressReadDataLock.unlock(); // if (ExtractPointSuccess) // break; // } // start = clock(); // sprintf_s(ImgoutputPath, "%s\\%d.bin", gStructVarible.ImgSavePath, OutPutInitialIndex); // //Package data_bag(ImgoutputPath, Bufferlength); // Package data_bag(ImgoutputPath, Bufferlength / gStructVarible.PictureNum); // //pImg // while (DatafullFlag) // { // if (img_index >= Bufferlength) // { // end = clock(); // gComressReadDataLock.lock(); // gComressionBufferWorking[mFlagIndex] = false; // gComressReadDataLock.unlock(); // gComressionBufferEmpty[mFlagIndex] = true; // DatafullFlag = false; // // // // compress_write_lock.lock(); // data_bag.file.open(data_bag.Fname, ios::out | ios::binary); // //data_bag.Form_total_head(); // data_bag.Form_total_head(compress_imgWidth, compress_imgHeight, gStructVarible.PictureNum, OutPutInitialIndex); // //cout << OutPutInitialIndex << endl; // data_bag.file.write(data_bag.head_cache, data_bag.head_bias); // data_bag.file.write(reinterpret_cast<const char *>(total_malloc), static_cast<int>(pix_index)); // data_bag.file.close(); // //data_bag.UnPack(data_bag.Fname); // compress_write_lock.unlock(); // memset(total_malloc, 0, 100000000); // pix_index = 0; // end2 = clock(); // cout << "T GPU " << param.GpuId << " Index" << OutPutInitialIndex << " " << double(end - start) / CLOCKS_PER_SEC << " " << double(end2 - start) / CLOCKS_PER_SEC << endl; // break; // } // int picture_index = OutPutInitialIndex + img_index; // Bufferoffset = gStructVarible.ImgWidth * gStructVarible.ImgHeight * gStructVarible.PictureNum; // // hipMemcpy2D((this->memory).pDImage[0], dataduiqi[0], gHostComressiongBuffer[mFlagIndex] + Bufferoffset, compress_old_Width * sizeof(unsigned char), compress_old_Width * sizeof(unsigned char), compress_old_Height, hipMemcpyHostToDevice); // this->process(); // this->writedisk(picture_index, &data_bag, img_index); // //img_index++; // img_index = img_index + gStructVarible.PictureNum; // } // } // delete[] total_malloc; // this->memoryfree(); // } //}; int T::mTindex = 0; /*------------------------------------------------------------------------------*/ class ReadImg : public Runnable { public: bool ExtractPointWorkingFlag = false;// bool CompressionWorkingFlag = false;// Parameter Devpar;// ~ReadImg() { } void mydelay(double sec)// { clock_t start_time, cur_time; start_time = clock(); do { cur_time = clock(); } while (double(cur_time - start_time) / CLOCKS_PER_SEC < sec); } void Run() { Devpar.ImgHeight = gStructVarible.ImgHeight; Devpar.ImgWidth = gStructVarible.ImgWidth; Devpar.PictureNum = gStructVarible.PictureNum; Devpar.ImgChannelNum = gStructVarible.ImgChannelNum; int mPageLockBufferIndex = 0; int mCompressionBufferindex = 0; bool ExtractCopySuccess; bool ComressionCopySuccess; // for (int i = 0; i < HardwareParam.DeviceCount + 1; i++) { // PageLockBufferEmpty[i] = true; PageLockBufferWorking[i] = false; PageLockBufferStartIndex[i] = 0; // gComressionBufferEmpty[i] = true; gComressionBufferWorking[i] = false; gComressionBufferStartIndex[i] = 0; } //cout << "ReadImg initial success!" << endl; while (!ExtractPointSuccess) // { mydelay(0.01); for (int i = 0; i <HardwareParam.DeviceCount * 2; i++)//buffer { ExtractCopySuccess = false; ComressionCopySuccess = false; if (CameraBufferFull[i]) //--true,iBuffer { // if (gStructVarible.RecModelFlag == true && HostUpdateRec == false) { memcpy(gRecupImgData, gCameraBuffer[i], sizeof(unsigned char)*Devpar.ImgChannelNum*Devpar.ImgWidth* Devpar.ImgHeight*Devpar.PictureNum);// HostUpdateRec = true; } // if (ExtractPointWorkingFlag) { while (1) //Buffer,Buffer { mPageLockBufferIndex = mPageLockBufferIndex % (HardwareParam.DeviceCount + 1); if (PageLockBufferEmpty[mPageLockBufferIndex])// { memcpy(gHostBuffer[mPageLockBufferIndex], gCameraBuffer[i], sizeof(unsigned char)*Devpar.ImgHeight*Devpar.ImgWidth *Devpar.ImgChannelNum* Bufferlength);// ExtractCopySuccess = true; PageLockBufferEmpty[mPageLockBufferIndex] = false;// false; PageLockBufferStartIndex[mPageLockBufferIndex] = BufferBlockIndex[i];// mPageLockBufferIndex++; break; } mPageLockBufferIndex++; if (ExtractPointSuccess) break; } } else ExtractCopySuccess = true; // if (CompressionWorkingFlag) { while (1) //Buffer,Buffer { mCompressionBufferindex = mCompressionBufferindex % (HardwareParam.DeviceCount + 1); if (gComressionBufferEmpty[mCompressionBufferindex])// { memcpy(gHostComressiongBuffer[mCompressionBufferindex], gCameraBuffer[i], sizeof(unsigned char)*Devpar.ImgHeight*Devpar.ImgWidth *Devpar.ImgChannelNum* Bufferlength);// ComressionCopySuccess = true; gComressionBufferEmpty[mCompressionBufferindex] = false;// false; gComressionBufferStartIndex[mCompressionBufferindex] = BufferBlockIndex[i];// mCompressionBufferindex++; break; } mCompressionBufferindex++; if (ExtractPointSuccess) break; } } else ComressionCopySuccess = true; //false if (ExtractCopySuccess&&ComressionCopySuccess) CameraBufferFull[i] = false; } } } } }; /*--------------------------------------------------------------------------*/ class DataRefresh : public Runnable { public: Parameter Devpar;// ~DataRefresh() { } void mydelay(double sec)// { clock_t start_time, cur_time; start_time = clock(); do { cur_time = clock(); } while (double(cur_time - start_time) / CLOCKS_PER_SEC < sec); } void Run() { // Devpar.ImgHeight = gStructVarible.ImgHeight; Devpar.ImgWidth = gStructVarible.ImgWidth; Devpar.PictureNum = gStructVarible.PictureNum; Devpar.ImgChannelNum = gStructVarible.ImgChannelNum; clock_t start, end; char path[250]; // unsigned char *Img1 = new unsigned char[Devpar.ImgWidth* Devpar.ImgHeight*Devpar.ImgChannelNum]; if (Devpar.ImgChannelNum == 1) { RmwRead8BitBmpFile2Img(gStructVarible.ImgReadPath, NULL, Img1, &Devpar.ImgWidth, &Devpar.ImgHeight); } else { RmwRead8BitBmpFile2Img(gStructVarible.ImgReadPath, Img1, NULL, &Devpar.ImgWidth, &Devpar.ImgHeight); } for (int i = 0; i < HardwareParam.DeviceCount * 2; i++) { for (long long j = 0; j < Bufferlength; j++) { memcpy(gCameraBuffer[i] + j* Devpar.ImgHeight * Devpar.ImgWidth*Devpar.ImgChannelNum, Img1, Devpar.ImgWidth* Devpar.ImgHeight*Devpar.ImgChannelNum * sizeof(unsigned char)); } } // for (int i = 0; i < HardwareParam.DeviceCount * 2; i++) { BufferBlockIndex[i] = i - HardwareParam.DeviceCount * 2; CameraBufferFull[i] = false; } //cout << "DataRefresh initial success!" << endl;/**/ mydelay(2); //cout << " start!" << endl; // start = clock(); for (int q = 0; q <3; q++) //5 { for (int i = 0; i < HardwareParam.DeviceCount * 2; i++) { BufferBlockIndex[i] += HardwareParam.DeviceCount * 2; //; if (CameraBufferFull[i]) { //cout << "speed is too slow!" << endl; SimulationSuccessFlaf = true; ExtractPointSuccess = true; break; } CameraBufferFull[i] = true; mydelay(Timedatarefresh); } if (ExtractPointSuccess) break; } end = clock(); mydelay(3); ExtractPointSuccess = true; //cout << ":50=" << Timedatarefresh << " over" << endl; delete[] Img1; } }; //--------------------------------------------------------------------------------------// /**/ // // /************************************************* : GetDiskSpaceInfo // : (GB) // LPCWSTR pszDrive . "D:\1.bmp""D:\"// // : RemainingSpace(int) -- (GB)// : . . . // *************************************************/ int GetDiskSpaceInfo(LPCWSTR pszDrive) { DWORD64 qwFreeBytesToCaller, qwTotalBytes, qwFreeBytes; DWORD dwSectPerClust, dwBytesPerSect, dwFreeClusters, dwTotalClusters; BOOL bResult; //GetDiskFreeSpaceEx bResult = GetDiskFreeSpaceEx(pszDrive, (PULARGE_INTEGER)&qwFreeBytesToCaller, (PULARGE_INTEGER)&qwTotalBytes, (PULARGE_INTEGER)&qwFreeBytes); //GetDiskFreeSpace bResult = GetDiskFreeSpace(pszDrive, &dwSectPerClust, &dwBytesPerSect, &dwFreeClusters, &dwTotalClusters); int RemainingSpace; if (bResult) { RemainingSpace = int((DWORD64)dwFreeClusters* (DWORD64)dwSectPerClust*(DWORD64)dwBytesPerSect >> 30); } return RemainingSpace; } /************************************************* : HardwareInit // : // null// HardwareInfo *HardwareProp // : (int) -- // : . // *************************************************/ IMGSIMULATION_API int HardwareInit(HardwareInfo *HardwareProp) { if (gWorkingGpuId.size() != 0) gWorkingGpuId.clear(); hipGetDeviceCount(&gDeviceCount); //HardwareParam HardwareParam.DeviceCount = 0;//GPU HardwareParam.DiskRemainingSpace = GetDiskSpaceInfo(L"C:/pic");//C if (HardwareParam.DiskRemainingSpace < DiskRemainingSpaceThreshold)//%%%100G%%% { return 1;// } for (int i = 0; i<gDeviceCount-1; i++) { hipDeviceProp_t DevProp; hipGetDeviceProperties(&DevProp, i); HardwareProp->major = DevProp.major; HardwareProp->minor = DevProp.minor; if (DevProp.major > 5)//5 { gWorkingGpuId.push_back(i); } } HardwareParam.DeviceCount = gWorkingGpuId.size();//GPU HardwareProp->DeviceCount = HardwareParam.DeviceCount; if (HardwareParam.DeviceCount > 5 || HardwareParam.DeviceCount < 1) { return 2;//5GPU } HardwareParam.ExPointThreads = HardwareParam.DeviceCount;// HardwareProp->ExPointThreads = HardwareParam.DeviceCount; HardwareParam.CompThreads = HardwareParam.DeviceCount;// HardwareProp->CompThreads = HardwareParam.DeviceCount; return 0; } //-----------------------------------------------------------------------------------------------// /************************************************* : Image_Pretreatment // : // const char *path . const char *exten ".bmp" . int ChooseMode : --1 . --2 // gHostImage[i]// : gHostPathImgNumber(int) -- // : // *************************************************/ IMGSIMULATION_API int Image_Pretreatment(const char *path, const char *exten, int ChooseMode) { cv::Directory dir; string filepath(path); string fileexten(exten); vector<string> filenames = dir.GetListFiles(filepath, fileexten, false); if (filenames.size() == NULL) { return 0; } else { gHostPathImgNumber = filenames.size(); } switch (ChooseMode) { case 1: { // #ifdef Pretreatment char strFilename[100]; int mWidth; int mHeight; for (int i = 0; i < gHostPathImgNumber; i++) { sprintf_s(strFilename, "%s\\%d.bmp", path, i + 1); //strFilename checkCudaErrors(hipHostMalloc((void**)&gHostImage[i], gStructVarible.ImgHeight * gStructVarible.ImgWidth * sizeof(unsigned char), hipHostMallocDefault)); if (gStructVarible.ImgBitDeep == 24) { gHostColorImage[i] = new unsigned char[gStructVarible.ImgHeight * gStructVarible.ImgWidth * 3]; } RmwRead8BitBmpFile2Img(strFilename, gHostColorImage[i], gHostImage[i], &mWidth, &mHeight); } #endif // Pretreatment break; } case 2: { // #ifdef Pretreatment for (int i = 0; i < gHostPathImgNumber; i++) { hipHostFree(gHostImage[i]); if (gStructVarible.ImgBitDeep == 24) { hipHostFree(gHostColorImage[i]); } } #endif // Pretreatment break; } default: break; } return gHostPathImgNumber; } /************************************************* : SimulationImageTest // : // const char *path // Infomation *Info // : bool -- // : . . // *************************************************/ IMGSIMULATION_API bool SimulationImageTest(const char *path, Infomation *Info) { hipError_t err; int mWidth, mHeight; gHostPathImgNumber = 5;// Info->ImgProcessingNumbers = gHostPathImgNumber; /**** ****/ for (int i = 0; i < gHostPathImgNumber; i++)// { err = hipHostMalloc((void**)&gHostImage[i], gStructVarible.ImgHeight * gStructVarible.ImgWidth *gStructVarible.PictureNum * sizeof(unsigned char), hipHostMallocDefault); if (gStructVarible.ImgBitDeep == 24) { err = hipHostMalloc((void**)&gHostColorImage[i], gStructVarible.ImgHeight * gStructVarible.ImgWidth *gStructVarible.PictureNum * 3 * sizeof(unsigned char), hipHostMallocDefault); } } int Picoffset = gStructVarible.ImgHeight * gStructVarible.ImgWidth;// int PicoffsetColor = gStructVarible.ImgHeight * gStructVarible.ImgWidth * 3;// for (int i = 0; i < gHostPathImgNumber; i++)// { for (int j = 0; j < gStructVarible.PictureNum; j++) { RmwRead8BitBmpFile2Img(path, gHostColorImage[i] + j * PicoffsetColor, gHostImage[i] + j * Picoffset, &mWidth, &mHeight); } } if (gStructVarible.RecModelFlag == 1) GetImgBoxHost(path);// Info->DeviceCount = HardwareParam.DeviceCount; Info->CPUThreadCount = ExtractPointThreads; clock_t start, finish; float Difftime;// float ImageSize;// int ImgChannel;// int ThreadID; /**** ****/ CThreadPoolExecutor * pExecutor = new CThreadPoolExecutor(); //GPU pExecutor->Init(1, HardwareParam.ExPointThreads, 1); SIM *ExtractPoint = new SIM[HardwareParam.ExPointThreads]; RecR *RecExtractPoint = new RecR[HardwareParam.ExPointThreads]; //RecS recs; if (gStructVarible.RecModelFlag == 0)// { start = clock(); // ThreadID = 0x01;// for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** ****/ ExtractPoint[i].HardwarePar.DeviceID = i; ExtractPoint[i].HardwarePar.GpuId = gWorkingGpuId[i]; ExtractPoint[i].HardwarePar.CUDAStreamNum = CUDAStreams; ExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.ExPointThreads; sprintf_s(ExtractPoint[i].Devpar.ImgReadPath, "%s", gStructVarible.ImgReadPath); sprintf_s(ExtractPoint[i].Devpar.ImgSavePath, "%s", gStructVarible.ImgSavePath); sprintf_s(ExtractPoint[i].Devpar.DataReadPath, "%s", gStructVarible.DataReadPath); ExtractPoint[i].Devpar.ImgBitDeep = gStructVarible.ImgBitDeep; ExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; ExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; ExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; ExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; ExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; ExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; ExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; ExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; ExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; ExtractPoint[i].Devpar.ImgChannelNum = gStructVarible.ImgBitDeep / 8; /**** ****/ pExecutor->Execute(&ExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } pExecutor->Terminate();// delete pExecutor;// finish = clock();// Difftime = (float)(finish - start) / CLOCKS_PER_SEC;// Info->PointNumbers = SignPoint.PointNumbers; Info->ExtractPointTimes = Difftime; ImgChannel = gStructVarible.ImgBitDeep / 8;// ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; Info->ExtractPointSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; } else // { start = clock(); // ThreadID = 0x01;// for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** ****/ RecExtractPoint[i].HardwarePar.DeviceID = i; RecExtractPoint[i].HardwarePar.GpuId = gWorkingGpuId[i]; RecExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.DeviceCount; RecExtractPoint[i].HardwarePar.CUDAStreamNum = 5; //RecExtractPoint[i].Devpar.DataReadPath = "C:\\pic\\img_data"; RecExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; RecExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; RecExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; RecExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; RecExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; RecExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; RecExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; RecExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; RecExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; /**** ****/ pExecutor->Execute(&RecExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } pExecutor->Terminate(); delete pExecutor; finish = clock();// // Difftime = (float)(finish - start) / CLOCKS_PER_SEC; Info->PointNumbers = SignPoint.PointNumbers; Info->ExtractPointTimes = Difftime; ImgChannel = gStructVarible.ImgBitDeep / 8;// ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; Info->ExtractPointSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; } ///**** ****/ //CThreadPoolExecutor * pExecutor1 = new CThreadPoolExecutor(); //pExecutor1->Init(1, HardwareParam.CompThreads, 1); //T *Compression_grey = new T[HardwareParam.CompThreads]; //TC *Compression = new TC[HardwareParam.CompThreads]; //start = clock(); // //ThreadID = 0x01;// //for (int i = 0; i < HardwareParam.ExPointThreads; i++) //{ // /**** ****/ // Compression_grey[i].param.DeviceID = i; // Compression_grey[i].param.GpuId = gWorkingGpuId[i]; // Compression_grey[i].param.CompThreads = HardwareParam.CompThreads; // Compression[i].param.DeviceID = i; // Compression[i].param.GpuId = gWorkingGpuId[i]; // Compression[i].param.CompThreads = HardwareParam.CompThreads; // if (gStructVarible.ImgBitDeep == 8) // { // pExecutor1->Execute(&Compression_grey[i], ThreadID); // ThreadID = ThreadID << 1; // } // else if (gStructVarible.ImgBitDeep == 24) // { // pExecutor1->Execute(&Compression[i], ThreadID); // ThreadID = ThreadID << 1; // } //} //pExecutor1->Terminate(); //delete pExecutor1; //finish = clock();// // // //Difftime = (float)(finish - start) / CLOCKS_PER_SEC; //Info->CompressionTimes = Difftime; //ImgChannel = gStructVarible.ImgBitDeep / 8;// //ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; //Info->CompressionSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; ///**** ****/ //CThreadPoolExecutor * pExecutor2 = new CThreadPoolExecutor(); //pExecutor2->Init(1, HardwareParam.ExPointThreads + HardwareParam.CompThreads, 1); //start = clock(); // //ThreadID = 0x01;// //if (gStructVarible.RecModelFlag == 0)// //{ // for (int i = 0; i < HardwareParam.ExPointThreads; i++) // { // pExecutor2->Execute(&ExtractPoint[i], ThreadID); // ThreadID = ThreadID << 1; // } //} //else //{ // for (int i = 0; i < HardwareParam.ExPointThreads; i++) // { // pExecutor2->Execute(&RecExtractPoint[i], ThreadID); // ThreadID = ThreadID << 1; // } //} //if (gStructVarible.ImgBitDeep == 8) //{ // for (int i = 0; i < HardwareParam.CompThreads; i++) // { // pExecutor2->Execute(&Compression_grey[i], ThreadID); // ThreadID = ThreadID << 1; // } //} //else if (gStructVarible.ImgBitDeep == 24) //{ // for (int i = 0; i < HardwareParam.CompThreads; i++) // { // pExecutor2->Execute(&Compression[i], ThreadID); // ThreadID = ThreadID << 1; // } //} //pExecutor2->Terminate(); //delete pExecutor2; //finish = clock();// // // //Difftime = (float)(finish - start) / CLOCKS_PER_SEC; //Info->SynchronizeTimes = Difftime; //ImgChannel = gStructVarible.ImgBitDeep / 8;// //ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; //Info->SynchronizeSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; // for (int i = 0; i < gHostPathImgNumber; i++) { err = hipHostFree(gHostImage[i]); if (gStructVarible.ImgBitDeep == 24) { err = hipHostFree(gHostColorImage[i]); } if (err != hipSuccess) { return false; } } return true; } IMGSIMULATION_API void SimulationTestReport(const char *path, Infomation *Info) { // Bufferlength = 50; Memory_application(); Timedatarefresh = 1; double SiglePicSize = double(gStructVarible.ImgHeight*gStructVarible.ImgWidth) / (1024 * 1024);// double minTimeRefresh = Bufferlength*SiglePicSize / (2 * 1024);//2G/s SimulationSuccessFlaf = false; while (!SimulationSuccessFlaf) { if (Timedatarefresh > minTimeRefresh) { Timedatarefresh = Timedatarefresh - 0.05; continue; } for (int i = 0; i < 3; i++) ExtractPointInitialSuccessFlag[i] = false; ExtractPointSuccess = false; Timedatarefresh = Timedatarefresh - 0.05; OnlineImageRecExperiment(3 , Info); // clock_t start_time, cur_time; start_time = clock(); do { cur_time = clock(); } while (double(cur_time - start_time) / CLOCKS_PER_SEC < 2); } Memory_release(); Timedatarefresh = Timedatarefresh + 0.05; if (Timedatarefresh > minTimeRefresh) Timedatarefresh = minTimeRefresh; Info->SynchronizeTimes = Timedatarefresh; Info->SynchronizeSpeed = SiglePicSize*Bufferlength / Timedatarefresh; } /************************************************************************************/ //qwe IMGSIMULATION_API bool SimulationExperient(int ChooseMode) { clock_t start, finish; Infomation *Info; float Difftime;// float ImageSize;// int ImgChannel;// int ThreadID; //cout << ":" << HardwareParam.DeviceCount << endl; switch (ChooseMode) { case 1:// { /**** ****/ CThreadPoolExecutor * pExecutor = new CThreadPoolExecutor(); //int ThreadsNum; //if (gStructVarible.RecModelFlag == true)//qwt // ThreadsNum = HardwareParam.ExPointThreads + 3; //else // ThreadsNum = HardwareParam.ExPointThreads + 2; pExecutor->Init(1, 10, 1); R *ExtractPoint = new R[HardwareParam.ExPointThreads]; RecR *RecExtractPoint = new RecR[HardwareParam.ExPointThreads]; RecUpData recupdate; ReadImg readimg; DataRefresh datarefresh; readimg.CompressionWorkingFlag = false; readimg.ExtractPointWorkingFlag = true; if (gStructVarible.RecModelFlag == false)// { ThreadID = 0x01;// for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** ****/ ExtractPoint[i].HardwarePar.DeviceID = i; ExtractPoint[i].HardwarePar.GpuId = gWorkingGpuId[i]; ExtractPoint[i].HardwarePar.CUDAStreamNum = CUDAStreams; ExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.ExPointThreads; sprintf_s(ExtractPoint[i].Devpar.ImgReadPath, "%s", gStructVarible.ImgReadPath); sprintf_s(ExtractPoint[i].Devpar.ImgSavePath, "%s", gStructVarible.ImgSavePath); sprintf_s(ExtractPoint[i].Devpar.DataReadPath, "%s", gStructVarible.DataReadPath); ExtractPoint[i].Devpar.ImgBitDeep = gStructVarible.ImgBitDeep; ExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; ExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; ExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; ExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; ExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; ExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; ExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; ExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; ExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; ExtractPoint[i].Devpar.ImgChannelNum = gStructVarible.ImgBitDeep / 8; /**** ****/ pExecutor->Execute(&ExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } pExecutor->Execute(&readimg, ThreadID); ThreadID = ThreadID << 1; pExecutor->Execute(&datarefresh, ThreadID); pExecutor->Terminate();// } else // { GetImgBoxHost(gStructVarible.ImgReadPath); ThreadID = 0x01;// /**** ****/ for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** ****/ RecExtractPoint[i].HardwarePar.DeviceID = i; RecExtractPoint[i].HardwarePar.GpuId = gWorkingGpuId[i]; RecExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.DeviceCount; RecExtractPoint[i].HardwarePar.CUDAStreamNum = CUDAStreams; sprintf_s(ExtractPoint[i].Devpar.ImgReadPath, "%s", gStructVarible.ImgReadPath); sprintf_s(ExtractPoint[i].Devpar.ImgSavePath, "%s", gStructVarible.ImgSavePath); sprintf_s(ExtractPoint[i].Devpar.DataReadPath, "%s", gStructVarible.DataReadPath); RecExtractPoint[i].Devpar.ImgBitDeep = gStructVarible.ImgBitDeep; RecExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; RecExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; RecExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; RecExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; RecExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; RecExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; RecExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; RecExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; RecExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; RecExtractPoint[i].Devpar.ImgChannelNum = gStructVarible.ImgBitDeep / 8; /**** ****/ pExecutor->Execute(&RecExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } pExecutor->Execute(&recupdate, ThreadID); ThreadID = ThreadID << 1; pExecutor->Execute(&readimg, ThreadID); ThreadID = ThreadID << 1; pExecutor->Execute(&datarefresh, ThreadID); pExecutor->Terminate(); cout << "" << endl; delete pExecutor; } break; } case 2:// { CThreadPoolExecutor * pExecutor1 = new CThreadPoolExecutor(); pExecutor1->Init(1, 10, 1); T *Compression_grey = new T[HardwareParam.CompThreads]; TC *Compression = new TC[HardwareParam.CompThreads]; DataRefresh datarefresh; ReadImg readimg; readimg.CompressionWorkingFlag = true; readimg.ExtractPointWorkingFlag = false; ThreadID = 0x01;// for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** ****/ Compression_grey[i].param.DeviceID = i; Compression_grey[i].param.GpuId = gWorkingGpuId[i]; Compression_grey[i].param.CompThreads = HardwareParam.CompThreads; Compression[i].param.DeviceID = i; Compression[i].param.CompThreads = HardwareParam.CompThreads; if (gStructVarible.ImgChannelNum == 1) { pExecutor1->Execute(&Compression_grey[i], ThreadID); ThreadID = ThreadID << 1; } else if (gStructVarible.ImgChannelNum == 3) { pExecutor1->Execute(&Compression[i], ThreadID); ThreadID = ThreadID << 1; } } pExecutor1->Execute(&datarefresh, ThreadID); ThreadID = ThreadID << 1; pExecutor1->Execute(&readimg, ThreadID); pExecutor1->Terminate(); delete pExecutor1; break; } case 3://& { CThreadPoolExecutor * pExecutor2 = new CThreadPoolExecutor(); pExecutor2->Init(1, 10, 1); R *ExtractPoint = new R[HardwareParam.ExPointThreads]; RecR *RecExtractPoint = new RecR[HardwareParam.ExPointThreads]; T *Compression_grey = new T[HardwareParam.CompThreads]; TC *Compression = new TC[HardwareParam.CompThreads]; RecUpData recupdate; ReadImg readimg; DataRefresh datarefresh; readimg.CompressionWorkingFlag = true; readimg.ExtractPointWorkingFlag = true; ThreadID = 0x01;// // if (gStructVarible.RecModelFlag == false)// { for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** ****/ ExtractPoint[i].HardwarePar.DeviceID = i; ExtractPoint[i].HardwarePar.GpuId = gWorkingGpuId[i]; ExtractPoint[i].HardwarePar.CUDAStreamNum = CUDAStreams; ExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.ExPointThreads; sprintf_s(ExtractPoint[i].Devpar.ImgReadPath, "%s", gStructVarible.ImgReadPath); sprintf_s(ExtractPoint[i].Devpar.ImgSavePath, "%s", gStructVarible.ImgSavePath); sprintf_s(ExtractPoint[i].Devpar.DataReadPath, "%s", gStructVarible.DataReadPath); ExtractPoint[i].Devpar.ImgBitDeep = gStructVarible.ImgBitDeep; ExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; ExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; ExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; ExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; ExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; ExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; ExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; ExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; ExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; ExtractPoint[i].Devpar.ImgChannelNum = gStructVarible.ImgBitDeep / 8; /**** ****/ pExecutor2->Execute(&ExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } } else // { GetImgBoxHost(gStructVarible.ImgReadPath); for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** ****/ RecExtractPoint[i].HardwarePar.DeviceID = i; RecExtractPoint[i].HardwarePar.GpuId = gWorkingGpuId[i]; RecExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.DeviceCount; RecExtractPoint[i].HardwarePar.CUDAStreamNum = CUDAStreams; sprintf_s(ExtractPoint[i].Devpar.ImgReadPath, "%s", gStructVarible.ImgReadPath); sprintf_s(ExtractPoint[i].Devpar.ImgSavePath, "%s", gStructVarible.ImgSavePath); sprintf_s(ExtractPoint[i].Devpar.DataReadPath, "%s", gStructVarible.DataReadPath); RecExtractPoint[i].Devpar.ImgBitDeep = gStructVarible.ImgBitDeep; RecExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; RecExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; RecExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; RecExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; RecExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; RecExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; RecExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; RecExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; RecExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; RecExtractPoint[i].Devpar.ImgChannelNum = gStructVarible.ImgBitDeep / 8; /**** ****/ pExecutor2->Execute(&RecExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; pExecutor2->Execute(&recupdate, ThreadID); ThreadID = ThreadID << 1; } } // for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** ****/ Compression_grey[i].param.DeviceID = i; Compression_grey[i].param.GpuId = gWorkingGpuId[i]; Compression_grey[i].param.CompThreads = HardwareParam.CompThreads; Compression[i].param.DeviceID = i; Compression[i].param.GpuId = gWorkingGpuId[i]; Compression[i].param.CompThreads = HardwareParam.CompThreads; if (gStructVarible.ImgChannelNum == 1) { pExecutor2->Execute(&Compression_grey[i], ThreadID); ThreadID = ThreadID << 1; } else if (gStructVarible.ImgChannelNum == 3) { pExecutor2->Execute(&Compression[i], ThreadID); ThreadID = ThreadID << 1; } } //+ pExecutor2->Execute(&readimg, ThreadID); ThreadID = ThreadID << 1; pExecutor2->Execute(&datarefresh, ThreadID); pExecutor2->Terminate(); delete pExecutor2; delete[] ExtractPoint; delete[] RecExtractPoint; delete[] Compression_grey; delete[] Compression; break; } default: return 1; } return 0; } //qwe IMGSIMULATION_API void SimulationTestSynchronize(const char *path, Infomation *Info) { // //Bufferlength = 50; Memory_application(); Timedatarefresh = 1; double SiglePicSize = double(gStructVarible.ImgHeight*gStructVarible.ImgWidth) / (1024 * 1024);// double minTimeRefresh = Bufferlength*SiglePicSize / (2 * 1024);//2G/s SimulationSuccessFlaf = false; while (!SimulationSuccessFlaf) { if (Timedatarefresh > minTimeRefresh) { Timedatarefresh = Timedatarefresh - 0.05; continue; } for (int i = 0; i < 3; i++) ExtractPointInitialSuccessFlag[i] = false; ExtractPointSuccess = false; Timedatarefresh = Timedatarefresh - 0.05; SimulationExperient(3); // clock_t start_time, cur_time; start_time = clock(); do { cur_time = clock(); } while (double(cur_time - start_time) / CLOCKS_PER_SEC < 2); } Memory_release(); Timedatarefresh = Timedatarefresh + 0.05; if (Timedatarefresh > minTimeRefresh) Timedatarefresh = minTimeRefresh; Info->SynchronizeTimes = Timedatarefresh; Info->SynchronizeSpeed = SiglePicSize*Bufferlength / Timedatarefresh; } //qwe IMGSIMULATION_API void SimulationTestExtractPoint(const char *path, Infomation *Info) { // //Bufferlength = 50; Memory_application(); Timedatarefresh = 1; double SiglePicSize = double(gStructVarible.ImgHeight*gStructVarible.ImgWidth) / (1024 * 1024);// double minTimeRefresh = Bufferlength*SiglePicSize / (2 * 1024);//2G/s SimulationSuccessFlaf = false; while (!SimulationSuccessFlaf) { if (Timedatarefresh > minTimeRefresh) { Timedatarefresh = Timedatarefresh - 0.05; continue; } for (int i = 0; i < 3; i++) ExtractPointInitialSuccessFlag[i] = false; ExtractPointSuccess = false; Timedatarefresh = Timedatarefresh - 0.1; SimulationExperient(1); // clock_t start_time, cur_time; start_time = clock(); do { cur_time = clock(); } while (double(cur_time - start_time) / CLOCKS_PER_SEC < 2); } Memory_release(); Timedatarefresh = Timedatarefresh + 0.05; if (Timedatarefresh > minTimeRefresh) Timedatarefresh = minTimeRefresh; Info->SynchronizeTimes = Timedatarefresh; Info->SynchronizeSpeed = SiglePicSize*Bufferlength / Timedatarefresh; } //qwe IMGSIMULATION_API void SimulationTestComression(const char *path, Infomation *Info) { // //Bufferlength = 50; Memory_application(); Timedatarefresh = 1; double SiglePicSize = double(gStructVarible.ImgHeight*gStructVarible.ImgWidth) / (1024 * 1024);// double minTimeRefresh = Bufferlength*SiglePicSize / (2 * 1024);//2G/s SimulationSuccessFlaf = false; while (!SimulationSuccessFlaf) { if (Timedatarefresh > minTimeRefresh) { Timedatarefresh = Timedatarefresh - 0.05; continue; } for (int i = 0; i < 3; i++) ExtractPointInitialSuccessFlag[i] = false; ExtractPointSuccess = false; Timedatarefresh = Timedatarefresh - 0.1; SimulationExperient(2); // clock_t start_time, cur_time; start_time = clock(); do { cur_time = clock(); } while (double(cur_time - start_time) / CLOCKS_PER_SEC < 2); } Memory_release(); Timedatarefresh = Timedatarefresh + 0.05; if (Timedatarefresh > minTimeRefresh) Timedatarefresh = minTimeRefresh; Info->SynchronizeTimes = Timedatarefresh; Info->SynchronizeSpeed = SiglePicSize*Bufferlength / Timedatarefresh; } /*---------------------------------------------------------------------------------------*/ /************************************************* : OnlineImageExperiment // : -- // const char *Imgpath . ChooseMode 1 . 2 . 3 &// Infomation *Info // : bool -- // : . // *************************************************/ IMGSIMULATION_API bool OnlineImageExperiment(int ChooseMode, const char *Imgpath, Infomation *Info) { hipError_t err; int mWidth, mHeight; clock_t start, finish; float Difftime;// float ImageSize;// int ImgChannel;// int ThreadID; switch (ChooseMode) { case 1:// { /**** ****/ CThreadPoolExecutor * pExecutor = new CThreadPoolExecutor(); pExecutor->Init(1, HardwareParam.ExPointThreads, 1); R *ExtractPoint = new R[HardwareParam.ExPointThreads]; RecR *RecExtractPoint = new RecR[HardwareParam.ExPointThreads]; if (gStructVarible.RecModelFlag == 0)// { start = clock(); // ThreadID = 0x01;// for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** ****/ ExtractPoint[i].HardwarePar.DeviceID = i; ExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.ExPointThreads; ExtractPoint[i].HardwarePar.CUDAStreamNum = 5; //ExtractPoint[i].Devpar.DataReadPath = "C:\\pic\\img_data"; ExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; ExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; ExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; ExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; ExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; ExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; ExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; ExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; ExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; /**** ****/ pExecutor->Execute(&ExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } pExecutor->Terminate();// delete pExecutor;// finish = clock();// // Difftime = (float)(finish - start) / CLOCKS_PER_SEC; Info->PointNumbers = SignPoint.PointNumbers; Info->ExtractPointTimes = Difftime; ImgChannel = gStructVarible.ImgBitDeep / 8;// ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; Info->ExtractPointSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; } else // { start = clock(); // ThreadID = 0x01;// for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** ****/ RecExtractPoint[i].HardwarePar.DeviceID = i; RecExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.ExPointThreads; RecExtractPoint[i].HardwarePar.CUDAStreamNum = 5; //RecExtractPoint[i].Devpar.DataReadPath = "C:\\pic\\img_data"; RecExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; RecExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; RecExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; RecExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; RecExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; RecExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; RecExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; RecExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; RecExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; /**** ****/ pExecutor->Execute(&RecExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } pExecutor->Terminate(); delete pExecutor; finish = clock();// // Difftime = (float)(finish - start) / CLOCKS_PER_SEC; Info->PointNumbers = SignPoint.PointNumbers; Info->ExtractPointTimes = Difftime; ImgChannel = gStructVarible.ImgBitDeep / 8;// ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; Info->ExtractPointSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; } break; } case 2:// { CThreadPoolExecutor * pExecutor1 = new CThreadPoolExecutor(); pExecutor1->Init(1, HardwareParam.CompThreads, 1); T *Compression_grey = new T[HardwareParam.CompThreads]; TC *Compression = new TC[HardwareParam.CompThreads]; start = clock(); // ThreadID = 0x01;// for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** ****/ Compression_grey[i].param.DeviceID = i; Compression_grey[i].param.CompThreads = HardwareParam.CompThreads; Compression[i].param.DeviceID = i; Compression[i].param.CompThreads = HardwareParam.CompThreads; if (gStructVarible.ImgBitDeep == 8) { pExecutor1->Execute(&Compression_grey[i], ThreadID); ThreadID = ThreadID << 1; } else if (gStructVarible.ImgBitDeep == 24) { pExecutor1->Execute(&Compression[i], ThreadID); ThreadID = ThreadID << 1; } } pExecutor1->Terminate(); delete pExecutor1; finish = clock();// // Difftime = (float)(finish - start) / CLOCKS_PER_SEC; Info->CompressionTimes = Difftime; ImgChannel = gStructVarible.ImgBitDeep / 8;// ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; Info->CompressionSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; break; } case 3://& { CThreadPoolExecutor * pExecutor2 = new CThreadPoolExecutor(); pExecutor2->Init(1, HardwareParam.ExPointThreads + HardwareParam.CompThreads, 1); R *ExtractPoint = new R[HardwareParam.ExPointThreads]; RecR *RecExtractPoint = new RecR[HardwareParam.ExPointThreads]; T *Compression_grey = new T[HardwareParam.CompThreads]; TC *Compression = new TC[HardwareParam.CompThreads]; ThreadID = 0x01;// start = clock(); // if (gStructVarible.RecModelFlag == 0)// { for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** ****/ ExtractPoint[i].HardwarePar.DeviceID = i; ExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.ExPointThreads; ExtractPoint[i].HardwarePar.CUDAStreamNum = 5; //ExtractPoint[i].Devpar.DataReadPath = "C:\\pic\\img_data"; ExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; ExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; ExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; ExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; ExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; ExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; ExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; ExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; ExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; /**** ****/ pExecutor2->Execute(&ExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } } else // { for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** ****/ RecExtractPoint[i].HardwarePar.DeviceID = i; RecExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.ExPointThreads; RecExtractPoint[i].HardwarePar.CUDAStreamNum = 5; //RecExtractPoint[i].Devpar.DataReadPath = "C:\\pic\\img_data"; RecExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; RecExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; RecExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; RecExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; RecExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; RecExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; RecExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; RecExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; RecExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; /**** ****/ pExecutor2->Execute(&RecExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } } for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** ****/ Compression_grey[i].param.DeviceID = i; Compression_grey[i].param.CompThreads = HardwareParam.CompThreads; Compression[i].param.DeviceID = i; Compression[i].param.CompThreads = HardwareParam.CompThreads; if (gStructVarible.ImgBitDeep == 8) { pExecutor2->Execute(&Compression_grey[i], ThreadID); ThreadID = ThreadID << 1; } else if (gStructVarible.ImgBitDeep == 24) { pExecutor2->Execute(&Compression[i], ThreadID); ThreadID = ThreadID << 1; } } pExecutor2->Terminate(); delete pExecutor2; finish = clock();// // Difftime = (float)(finish - start) / CLOCKS_PER_SEC; Info->SynchronizeTimes = Difftime; ImgChannel = gStructVarible.ImgBitDeep / 8;// ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; Info->SynchronizeSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; break; } default: return 1; } return 0; } /************************************************* : OnlineImageExperiment // : -- // const char *Imgpath . ChooseMode 1 . 2 . 3 &// Infomation *Info // : bool -- // : . // *************************************************/ IMGSIMULATION_API bool OnlineImageRecExperiment(int ChooseMode, Infomation *Info) { clock_t start, finish; int mWidth, mHeight; float Difftime;// float ImageSize;// int ImgChannel;// int ThreadID; switch (ChooseMode) { case 1:// { /**** ****/ CThreadPoolExecutor * pExecutor = new CThreadPoolExecutor(); int ThreadsNum; if (gStructVarible.RecModelFlag == 1)//qwt ThreadsNum = HardwareParam.ExPointThreads + 3; else ThreadsNum = HardwareParam.ExPointThreads + 2; pExecutor->Init(1, 10, 1); R *ExtractPoint = new R[HardwareParam.ExPointThreads]; RecR *RecExtractPoint = new RecR[HardwareParam.ExPointThreads]; RecUpData recupdate; ReadImg readimg; DataRefresh datarefresh; readimg.CompressionWorkingFlag = false; readimg.ExtractPointWorkingFlag = true; if (gStructVarible.RecModelFlag == 0)// { start = clock(); // ThreadID = 0x01;// for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** ****/ ExtractPoint[i].HardwarePar.DeviceID = i; ExtractPoint[i].HardwarePar.GpuId = gWorkingGpuId[i]; ExtractPoint[i].HardwarePar.CUDAStreamNum = CUDAStreams; ExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.ExPointThreads; //sprintf_s(ExtractPoint[i].Devpar.ImgReadPath, "%s", gStructVarible.ImgReadPath); //sprintf_s(ExtractPoint[i].Devpar.ImgSavePath, "%s", gStructVarible.ImgSavePath); //sprintf_s(ExtractPoint[i].Devpar.DataReadPath, "%s", gStructVarible.DataReadPath); sprintf_s(ExtractPoint[i].Devpar.ImgReadPath, "%s", "C:\\pic\\img_read"); sprintf_s(ExtractPoint[i].Devpar.ImgSavePath, "%s", "C:\\pic\\img_write"); sprintf_s(ExtractPoint[i].Devpar.DataReadPath, "%s", "C:\\pic\\img_data"); ExtractPoint[i].Devpar.ImgBitDeep = gStructVarible.ImgBitDeep; ExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; ExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; ExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; ExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; ExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; ExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; ExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; ExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; ExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; ExtractPoint[i].Devpar.ImgChannelNum = gStructVarible.ImgBitDeep / 8; /**** ****/ pExecutor->Execute(&ExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } pExecutor->Execute(&readimg, ThreadID); ThreadID = ThreadID << 1; pExecutor->Execute(&datarefresh, ThreadID); pExecutor->Terminate();// delete pExecutor;// finish = clock();// // Difftime = (float)(finish - start) / CLOCKS_PER_SEC; Info->PointNumbers = SignPoint.PointNumbers; Info->ExtractPointTimes = Difftime; ImgChannel = gStructVarible.ImgBitDeep / 8;// ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; Info->ExtractPointSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; } else // { ThreadID = 0x01;// GetImgBoxHost(gStructVarible.ImgReadPath); /**** ****/ for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** ****/ RecExtractPoint[i].HardwarePar.DeviceID = i; RecExtractPoint[i].HardwarePar.GpuId = gWorkingGpuId[i]; RecExtractPoint[i].HardwarePar.CUDAStreamNum = CUDAStreams; RecExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.DeviceCount; sprintf_s(RecExtractPoint[i].Devpar.ImgReadPath, "%s", gStructVarible.ImgReadPath); sprintf_s(RecExtractPoint[i].Devpar.ImgSavePath, "%s", gStructVarible.ImgSavePath); sprintf_s(RecExtractPoint[i].Devpar.DataReadPath, "%s", gStructVarible.DataReadPath); RecExtractPoint[i].Devpar.ImgBitDeep = gStructVarible.ImgBitDeep; RecExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; RecExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; RecExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; RecExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; RecExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; RecExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; RecExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; RecExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; RecExtractPoint[i].Devpar.ImgChannelNum = gStructVarible.ImgBitDeep / 8; pExecutor->Execute(&RecExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } pExecutor->Execute(&recupdate, ThreadID); ThreadID = ThreadID << 1; pExecutor->Execute(&readimg, ThreadID); ThreadID = ThreadID << 1; pExecutor->Execute(&datarefresh, ThreadID); pExecutor->Terminate(); //cout << "" << endl; delete pExecutor; } break; } case 2:// { CThreadPoolExecutor * pExecutor1 = new CThreadPoolExecutor(); pExecutor1->Init(1, HardwareParam.CompThreads + 2, 1); T *Compression_grey = new T[HardwareParam.CompThreads]; TC *Compression = new TC[HardwareParam.CompThreads]; ReadImg readimg; DataRefresh datarefresh; readimg.CompressionWorkingFlag = true; readimg.ExtractPointWorkingFlag = false; ThreadID = 0x01;// for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** ****/ Compression_grey[i].param.DeviceID = i; Compression_grey[i].param.GpuId = gWorkingGpuId[i]; Compression_grey[i].param.CompThreads = HardwareParam.CompThreads; Compression[i].param.DeviceID = i; Compression[i].param.GpuId = gWorkingGpuId[i]; Compression[i].param.CompThreads = HardwareParam.CompThreads; if (gStructVarible.ImgBitDeep == 8) { pExecutor1->Execute(&Compression_grey[i], ThreadID); ThreadID = ThreadID << 1; } else if (gStructVarible.ImgBitDeep == 24) { pExecutor1->Execute(&Compression[i], ThreadID); ThreadID = ThreadID << 1; } } pExecutor1->Execute(&datarefresh, ThreadID); ThreadID = ThreadID << 1; pExecutor1->Execute(&readimg, ThreadID); pExecutor1->Terminate(); delete pExecutor1; break; } case 3://& { CThreadPoolExecutor * pExecutor2 = new CThreadPoolExecutor(); //if (gStructVarible.RecModelFlag == 1)//qwt // pExecutor2->Init(1, HardwareParam.ExPointThreads + HardwareParam.CompThreads+2, 1); //else // pExecutor2->Init(1, HardwareParam.ExPointThreads + HardwareParam.CompThreads+1, 1); pExecutor2->Init(1, 10, 1); R *ExtractPoint = new R[HardwareParam.ExPointThreads]; RecR *RecExtractPoint = new RecR[HardwareParam.ExPointThreads]; T *Compression_grey = new T[HardwareParam.CompThreads]; TC *Compression = new TC[HardwareParam.CompThreads]; RecUpData recupdate; ReadImg readimg; DataRefresh datarefresh; readimg.CompressionWorkingFlag = true; readimg.ExtractPointWorkingFlag = true; ThreadID = 0x01;// if (gStructVarible.RecModelFlag == 0)// { for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** ****/ ExtractPoint[i].HardwarePar.DeviceID = i; ExtractPoint[i].HardwarePar.GpuId = gWorkingGpuId[i]; ExtractPoint[i].HardwarePar.CUDAStreamNum = CUDAStreams; ExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.ExPointThreads; sprintf_s(ExtractPoint[i].Devpar.ImgReadPath, "%s", gStructVarible.ImgReadPath); sprintf_s(ExtractPoint[i].Devpar.ImgSavePath, "%s", gStructVarible.ImgSavePath); sprintf_s(ExtractPoint[i].Devpar.DataReadPath, "%s", gStructVarible.DataReadPath); ExtractPoint[i].Devpar.ImgBitDeep = gStructVarible.ImgBitDeep; ExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; ExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; ExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; ExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; ExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; ExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; ExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; ExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; ExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; ExtractPoint[i].Devpar.ImgChannelNum = gStructVarible.ImgBitDeep / 8; /**** ****/ pExecutor2->Execute(&ExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } } else // { GetImgBoxHost(gStructVarible.ImgReadPath); for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** ****/ RecExtractPoint[i].HardwarePar.DeviceID = i; RecExtractPoint[i].HardwarePar.GpuId = gWorkingGpuId[i]; RecExtractPoint[i].HardwarePar.CUDAStreamNum = CUDAStreams; RecExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.ExPointThreads; sprintf_s(RecExtractPoint[i].Devpar.ImgReadPath, "%s", gStructVarible.ImgReadPath); sprintf_s(RecExtractPoint[i].Devpar.ImgSavePath, "%s", gStructVarible.ImgSavePath); sprintf_s(RecExtractPoint[i].Devpar.DataReadPath, "%s", gStructVarible.DataReadPath); RecExtractPoint[i].Devpar.ImgBitDeep = gStructVarible.ImgBitDeep; RecExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; RecExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; RecExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; RecExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; RecExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; RecExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; RecExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; RecExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; RecExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; RecExtractPoint[i].Devpar.ImgChannelNum = gStructVarible.ImgBitDeep / 8; /**** ****/ pExecutor2->Execute(&RecExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; pExecutor2->Execute(&recupdate, ThreadID); ThreadID = ThreadID << 1; } } // for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** ****/ Compression_grey[i].param.DeviceID = i; Compression_grey[i].param.GpuId = gWorkingGpuId[i]; Compression_grey[i].param.CompThreads = HardwareParam.CompThreads; Compression[i].param.DeviceID = i; Compression[i].param.GpuId = gWorkingGpuId[i]; Compression[i].param.CompThreads = HardwareParam.CompThreads; if (gStructVarible.ImgChannelNum == 1) { pExecutor2->Execute(&Compression_grey[i], ThreadID); ThreadID = ThreadID << 1; } else if (gStructVarible.ImgChannelNum == 3) { pExecutor2->Execute(&Compression[i], ThreadID); ThreadID = ThreadID << 1; } } //+ pExecutor2->Execute(&readimg, ThreadID); ThreadID = ThreadID << 1; pExecutor2->Execute(&datarefresh, ThreadID); pExecutor2->Terminate(); delete pExecutor2; break; } default: return 1; } return 0; } /************************************************* : OnlineImageRefresh // : // // // : // : // *************************************************/ IMGSIMULATION_API int OnlineImageRefresh(unsigned char *pImg) { if (gCameraBuffer[0] == NULL) return 1; //pImg memcpy(pImg, gCameraBuffer[0], gStructVarible.ImgWidth * gStructVarible.ImgHeight * gStructVarible.ImgChannelNum * sizeof(unsigned char)); return 0; } /************************************************* : OfflineImageExperiment // : // const char *Imgpath Infomation *Info // : bool -- // : . // *************************************************/ IMGSIMULATION_API bool OfflineImageExperiment(const char *Imgpath, Infomation *Info) { hipError_t err; int mWidth, mHeight; char strFilename[100]; clock_t start, finish; float Difftime;// float ImageSize;// int ImgChannel;// for (int i = 0; i<5; i++) { sprintf_s(strFilename, "%s", Imgpath); //strFilename hipHostMalloc((void**)&gHostImage[i], gStructVarible.ImgHeight * gStructVarible.ImgWidth * sizeof(unsigned char), hipHostMallocDefault); if (gStructVarible.ImgBitDeep == 24) { gHostColorImage[i] = new unsigned char[gStructVarible.ImgHeight * gStructVarible.ImgWidth * 3]; } RmwRead8BitBmpFile2Img(strFilename, gHostColorImage[i], gHostImage[i], &mWidth, &mHeight); } gHostPathImgNumber = 5;// /**** ****/ CThreadPoolExecutor * pExecutor = new CThreadPoolExecutor(); pExecutor->Init(1, 1, 1); R ExtractPoint; RecR RecExtractPoint; if (gStructVarible.RecModelFlag == 0)// { start = clock(); // /**** ****/ ExtractPoint.HardwarePar.DeviceID = 0; //ExtractPoint.Devpar.DataReadPath = "C:\\pic\\img_data"; ExtractPoint.Devpar.ImgHeight = gStructVarible.ImgHeight; ExtractPoint.Devpar.ImgWidth = gStructVarible.ImgWidth; ExtractPoint.Devpar.Threshold = gStructVarible.Threshold; ExtractPoint.Devpar.LengthMin = gStructVarible.LengthMin; ExtractPoint.Devpar.LengthMax = gStructVarible.LengthMax; ExtractPoint.Devpar.AreaMin = gStructVarible.AreaMin; ExtractPoint.Devpar.AreaMax = gStructVarible.AreaMax; ExtractPoint.Devpar.PictureNum = gStructVarible.PictureNum; ExtractPoint.Devpar.PicBlockSize = gStructVarible.PicBlockSize; /**** ****/ pExecutor->Execute(&ExtractPoint, 0x01); pExecutor->Terminate();// delete pExecutor;// finish = clock();// // Difftime = (float)(finish - start) / CLOCKS_PER_SEC; Info->PointNumbers = SignPoint.PointNumbers; Info->ExtractPointTimes = Difftime; ImgChannel = gStructVarible.ImgBitDeep / 8;// ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; Info->ExtractPointSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; } else // { start = clock(); // /**** ****/ RecExtractPoint.HardwarePar.DeviceID = 0; //RecExtractPoint.Devpar.DataReadPath = "C:\\pic\\img_data"; RecExtractPoint.Devpar.ImgHeight = gStructVarible.ImgHeight; RecExtractPoint.Devpar.ImgWidth = gStructVarible.ImgWidth; RecExtractPoint.Devpar.Threshold = gStructVarible.Threshold; RecExtractPoint.Devpar.LengthMin = gStructVarible.LengthMin; RecExtractPoint.Devpar.LengthMax = gStructVarible.LengthMax; RecExtractPoint.Devpar.AreaMin = gStructVarible.AreaMin; RecExtractPoint.Devpar.AreaMax = gStructVarible.AreaMax; RecExtractPoint.Devpar.PictureNum = gStructVarible.PictureNum; RecExtractPoint.Devpar.PicBlockSize = gStructVarible.PicBlockSize; /**** ****/ pExecutor->Execute(&RecExtractPoint, 0x01); pExecutor->Terminate(); delete pExecutor; finish = clock();// // Difftime = (float)(finish - start) / CLOCKS_PER_SEC; Info->PointNumbers = SignPoint.PointNumbers; Info->ExtractPointTimes = Difftime; ImgChannel = gStructVarible.ImgBitDeep / 8;// ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; Info->ExtractPointSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; } for (int i = 0; i<5; i++) { err = hipHostFree(gHostImage[i]); if (gStructVarible.ImgBitDeep == 24) { err = hipHostFree(gHostColorImage[i]); } } return 0; } /************************************************* : SinglePictureExtractPoint // : // const char *Imgpath const char *outputPath // // : // : outputPath\\OffLine.bin // . *************************************************/ IMGSIMULATION_API bool SinglePictureExtractPoint(const char *Imgpath, const char*outputPath) { char strfilename[255]; Parameter Devpar; Devpar.ImgHeight = gStructVarible.ImgHeight; Devpar.ImgWidth = gStructVarible.ImgWidth; Devpar.Threshold = gStructVarible.Threshold; Devpar.LengthMin = gStructVarible.LengthMin; Devpar.LengthMax = gStructVarible.LengthMax; Devpar.AreaMin = gStructVarible.AreaMin; Devpar.AreaMax = gStructVarible.AreaMax; Devpar.PictureNum = 1; Devpar.PicBlockSize = gStructVarible.PicBlockSize; Devpar.ImgChannelNum = gStructVarible.ImgChannelNum; Devpar.ImgMakeborderWidth = (Devpar.ImgWidth + 127) / 128 * 128; Devpar.ColThreadNum = (Devpar.ImgMakeborderWidth / Devpar.PicBlockSize + 127) / 128 * 128; Devpar.RowThreadNum = Devpar.ImgHeight*Devpar.PictureNum / Devpar.PicBlockSize; // dim3 mGrid1(Devpar.ImgMakeborderWidth / 128, Devpar.ImgHeight*Devpar.PictureNum, 1); dim3 mGrid2(Devpar.ColThreadNum / 128, Devpar.RowThreadNum, 1); // unsigned char *tHostImage; hipHostMalloc((void**)&tHostImage, Devpar.ImgHeight * Devpar.ImgWidth *Devpar.ImgChannelNum *Devpar.PictureNum * sizeof(unsigned char), hipHostMallocDefault); if (Devpar.ImgChannelNum == 1) { RmwRead8BitBmpFile2Img(Imgpath, NULL, tHostImage, &Devpar.ImgWidth, &Devpar.ImgHeight); } else { RmwRead8BitBmpFile2Img(Imgpath, tHostImage, NULL, &Devpar.ImgWidth, &Devpar.ImgHeight); } //------------------------------------------------------------------------------------------------------------------------------- unsigned char * tDevColorImage; unsigned char * tDevGrayImage; unsigned char * tDevpad; unsigned char * tDev2val; unsigned char * tDevcounter; hipMalloc((void**)&tDevColorImage, sizeof(unsigned char)* Devpar.ImgWidth* Devpar.ImgHeight*Devpar.ImgChannelNum*Devpar.PictureNum); hipMalloc((void**)&tDevGrayImage, sizeof(unsigned char)* Devpar.ImgWidth* Devpar.ImgHeight*Devpar.PictureNum); hipMalloc((void**)&tDevpad, sizeof(unsigned char)* Devpar.ImgMakeborderWidth* Devpar.ImgHeight*Devpar.PictureNum); hipMalloc((void**)&tDev2val, sizeof(unsigned char)* Devpar.ImgMakeborderWidth* Devpar.ImgHeight*Devpar.PictureNum); hipMalloc((void**)&tDevcounter, sizeof(unsigned char)* Devpar.ImgMakeborderWidth* Devpar.ImgHeight*Devpar.PictureNum); // short * tDevRecXLeft; short * tDevRecYLeft; short * tDevRecXRight; short * tDevRecYRight; short * tDevLength; short * tDevArea; double *tDevXpos; double *tDevYpos; short *tDevIndex; hipMalloc((void**)&tDevRecXLeft, Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// xmin hipMalloc((void**)&tDevRecYLeft, Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// ymin hipMalloc((void**)&tDevRecXRight, Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// xmax hipMalloc((void**)&tDevRecYRight, Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// ymax hipMalloc((void**)&tDevLength, Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// hipMalloc((void**)&tDevArea, Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// hipMalloc((void**)&tDevXpos, Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(double));// xpos hipMalloc((void**)&tDevYpos, Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(double));// ypos hipMalloc((void**)&tDevIndex, Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// // short * tHostRecXLeft = new short[Devpar.ColThreadNum*Devpar.RowThreadNum]; short * tHostRecYLeft = new short[Devpar.ColThreadNum*Devpar.RowThreadNum]; short * tHostRecXRight = new short[Devpar.ColThreadNum*Devpar.RowThreadNum]; short * tHostRecYRight = new short[Devpar.ColThreadNum*Devpar.RowThreadNum]; short * tHostLength = new short[Devpar.ColThreadNum*Devpar.RowThreadNum]; short * tHostArea = new short[Devpar.ColThreadNum*Devpar.RowThreadNum]; double * tHostXpos = new double[Devpar.ColThreadNum*Devpar.RowThreadNum]; double * tHostYpos = new double[Devpar.ColThreadNum*Devpar.RowThreadNum]; short * tHostIndex = new short[Devpar.ColThreadNum*Devpar.RowThreadNum]; // if (Devpar.ImgChannelNum == 1) { hipMemcpy(tDevGrayImage, tHostImage, sizeof(unsigned char)* Devpar.ImgHeight *Devpar.ImgWidth*Devpar.ImgChannelNum*Devpar.PictureNum, hipMemcpyHostToDevice); // GrayMakeBorder << <mGrid1, 128 >> > (tDevGrayImage, tDevpad, Devpar); } else { hipMemcpy(tDevColorImage, tHostImage, sizeof(unsigned char)* Devpar.ImgHeight *Devpar.ImgWidth*Devpar.ImgChannelNum*Devpar.PictureNum, hipMemcpyHostToDevice); ColorMakeBorder << <mGrid1, 128 >> > (tDevColorImage, tDevpad, Devpar); } // Binarization << <mGrid1, 128 >> > (tDevpad, tDev2val, tDevcounter, Devpar); // Dilation << <mGrid1, 128 >> > (tDev2val, tDevcounter, Devpar); hipMemcpy(tDev2val, tDevcounter, sizeof(unsigned char)* Devpar.ImgHeight *Devpar.ImgMakeborderWidth*Devpar.PictureNum, hipMemcpyDeviceToDevice); Erosion << <mGrid1, 128 >> > (tDev2val, tDevcounter, Devpar); // GetCounter << <mGrid2, 128 >> > (tDevcounter, tDevLength, tDevRecXLeft, tDevRecYLeft, tDevRecXRight, tDevRecYRight, Devpar);// SelectTrueBox << <mGrid2, 128 >> >(tDevcounter, tDevLength, tDevRecXLeft, tDevRecYLeft, tDevRecXRight, tDevRecYRight, tDevIndex, Devpar); SelectNonRepeatBox << <mGrid2, 128 >> > (tDevRecXLeft, tDevRecYLeft, tDevIndex, Devpar); GetNonRepeatBox << <mGrid2, 128 >> >(tDevRecXLeft, tDevRecYLeft, tDevIndex, Devpar); GetInfo << <mGrid2, 128 >> > (tDevpad, tDevIndex, tDevRecXLeft, tDevRecYLeft, tDevRecXRight, tDevRecYRight, tDevXpos, tDevYpos, tDevArea, Devpar); // hipMemcpy(tHostLength, tDevLength, sizeof(short)* Devpar.ColThreadNum * Devpar.RowThreadNum, hipMemcpyDeviceToHost); hipMemcpy(tHostArea, tDevArea, sizeof(short)* Devpar.ColThreadNum * Devpar.RowThreadNum, hipMemcpyDeviceToHost); hipMemcpy(tHostXpos, tDevXpos, sizeof(double)* Devpar.ColThreadNum * Devpar.RowThreadNum, hipMemcpyDeviceToHost); hipMemcpy(tHostYpos, tDevYpos, sizeof(double)* Devpar.ColThreadNum * Devpar.RowThreadNum, hipMemcpyDeviceToHost); hipMemcpy(tHostIndex, tDevIndex, sizeof(short)* Devpar.ColThreadNum * Devpar.RowThreadNum, hipMemcpyDeviceToHost); vector<CircleInfo>myInfo; int mtempindex = 0; for (int j = 0; j <Devpar.ColThreadNum * Devpar.RowThreadNum; j++) { if (tHostIndex[j] != 0) { CircleInfo temp; mtempindex++; temp.index = (short)mtempindex; temp.length = tHostLength[j]; temp.area = tHostArea[j]; temp.xpos = tHostXpos[j]; temp.ypos = tHostYpos[j]; myInfo.push_back(temp); } } if (myInfo.size() > 0) { FILE* fp; sprintf(strfilename, "%s\\OffLine.bin", outputPath); //3strFilename fp = fopen(strfilename, "wb"); fwrite(&myInfo[0], sizeof(CircleInfo)*myInfo.size(), 1, fp); fclose(fp); } // hipHostFree(tHostImage); hipFree(tDevRecXLeft); hipFree(tDevRecYLeft); hipFree(tDevRecXRight); hipFree(tDevRecYRight); hipFree(tDevLength); hipFree(tDevArea); hipFree(tDevXpos); hipFree(tDevYpos); hipFree(tDevIndex); hipFree(tDevColorImage); hipFree(tDevGrayImage); hipFree(tDevpad); hipFree(tDev2val); hipFree(tDevcounter); delete[]tHostRecXLeft; delete[]tHostRecYLeft; delete[]tHostRecXRight; delete[] tHostRecYRight; delete[]tHostLength; delete[]tHostArea; delete[]tHostXpos; delete[]tHostYpos; delete[]tHostIndex; return 0; } /************************************************* : DrawPointFlag // : // const char *pathBin . const char *pathImg // const char *pathWrite // : // : . . // *************************************************/ IMGSIMULATION_API void DrawPointFlag(const char *pathBin, const char *pathImg, const char *pathWrite) { // FILE *fr; fr = fopen(pathBin, "rb"); // fseek(fr, 0, SEEK_END);//stream long lSize = ftell(fr);// rewind(fr);//stream // int FlagSize = lSize / sizeof(CircleInfo); CircleInfo *RInfo = (CircleInfo*)malloc(sizeof(CircleInfo)*FlagSize); // fread(RInfo, sizeof(CircleInfo), FlagSize, fr); fclose(fr); // Mat Img = imread(pathImg, IMREAD_COLOR); cv::Vec3b pflag(0, 0, 255); for (int i = 0; i < FlagSize; i++) { CircleInfo myinfo = RInfo[i]; Img.at<Vec3b>(myinfo.xpos, myinfo.ypos) = pflag; if (myinfo.xpos - 3 >= 0) { Img.at<Vec3b>(myinfo.xpos - 1, myinfo.ypos) = pflag; Img.at<Vec3b>(myinfo.xpos - 2, myinfo.ypos) = pflag; Img.at<Vec3b>(myinfo.xpos - 3, myinfo.ypos) = pflag; } if (myinfo.xpos + 3 <= gStructVarible.ImgHeight) { Img.at<Vec3b>(myinfo.xpos + 1, myinfo.ypos) = pflag; Img.at<Vec3b>(myinfo.xpos + 2, myinfo.ypos) = pflag; Img.at<Vec3b>(myinfo.xpos + 3, myinfo.ypos) = pflag; } if (myinfo.xpos - 3 >= 0) { Img.at<Vec3b>(myinfo.xpos, myinfo.ypos - 1) = pflag; Img.at<Vec3b>(myinfo.xpos, myinfo.ypos - 2) = pflag; Img.at<Vec3b>(myinfo.xpos, myinfo.ypos - 3) = pflag; } if (myinfo.ypos + 3 <= gStructVarible.ImgWidth) { Img.at<Vec3b>(myinfo.xpos, myinfo.ypos + 1) = pflag; Img.at<Vec3b>(myinfo.xpos, myinfo.ypos + 2) = pflag; Img.at<Vec3b>(myinfo.xpos, myinfo.ypos + 3) = pflag; } } imwrite(pathWrite, Img); free(RInfo); } /************************************************* : Memory_application // : // // // : // : // *************************************************/ IMGSIMULATION_API void Memory_application() { compress_old_Width = gStructVarible.ImgWidth; compress_old_Height = gStructVarible.ImgHeight * gStructVarible.PictureNum; //imgWidth = gStructVarible.ImgWidth; // //imgHeight = gStructVarible.ImgHeight * gStructVarible.PictureNum; compress_imgWidth = (compress_old_Width + 7) / 8 * 8; compress_imgHeight = (compress_old_Height + 7) / 8 * 8; // compressratio = gStructVarible.CompressionRatio; // int bmpSize = compress_imgWidth * compress_imgHeight; gpHudata = new unsigned char[bmpSize]; // gpHvdata = new unsigned char[bmpSize]; memset(gpHudata, 128, compress_imgHeight * compress_imgWidth); memset(gpHvdata, 128, compress_imgHeight * compress_imgWidth); blocks.x = compress_imgWidth / 8; //cudablocks(imgWidth / 8,imgHeight / 8) blocks.y = compress_imgHeight / 8; blocks.z = 1; quantityassgnment(); // /**/ // gCameraDress= (unsigned char*)malloc(gStructVarible.ImgWidth*gStructVarible.ImgHeight *gStructVarible.ImgChannelNum * sizeof(unsigned char) * Bufferlength* HardwareParam.DeviceCount * 2); for (int i = 0; i < HardwareParam.DeviceCount * 2; i++) { gCameraBuffer[i] = gCameraDress + i*gStructVarible.ImgWidth*gStructVarible.ImgHeight *gStructVarible.ImgChannelNum * sizeof(unsigned char) * Bufferlength; } // for (int i = 0; i < HardwareParam.DeviceCount + 1; i++) { gHostComressiongBuffer[i] = (unsigned char*)malloc(gStructVarible.ImgWidth*gStructVarible.ImgHeight *gStructVarible.ImgChannelNum * sizeof(unsigned char) * Bufferlength); } // for (int i = 0; i < HardwareParam.DeviceCount + 1; i++) { hipHostMalloc((void**)&gHostBuffer[i], gStructVarible.ImgWidth*gStructVarible.ImgHeight *gStructVarible.ImgChannelNum * sizeof(unsigned char)*Bufferlength, hipHostMallocDefault); } // gRecupImgData = (unsigned char*)malloc(gStructVarible.ImgWidth*gStructVarible.ImgHeight *gStructVarible.PictureNum*gStructVarible.ImgChannelNum * sizeof(unsigned char)); } /************************************************* : Memory_release // : // // // : // : // *************************************************/ IMGSIMULATION_API void Memory_release() { free(gCameraDress); gCameraDress = NULL; for (int i = 0; i < HardwareParam.DeviceCount * 2; i++) { //free(gCameraBuffer[i]); gCameraBuffer[i] = NULL; } for (int i = 0; i < HardwareParam.DeviceCount + 1; i++) { hipHostFree(gHostBuffer[i]); free(gHostComressiongBuffer[i]); gHostComressiongBuffer[i] = NULL; } free(gRecupImgData); delete[]gpHudata; delete[]gpHvdata;//qwt } /************************************************* : SetCameraPar // : // int ScrBufferlength . null// : bool -- // : DLL// *************************************************/ IMGSIMULATION_API bool SetCameraPar(int ScrBufferlength) { Bufferlength = ScrBufferlength; return 0; } /************************************************* : SetParameter // : // Parameter *info . int len // Parameter gStructVarible // : bool -- // : DLL// *************************************************/ IMGSIMULATION_API bool SetParameter(Parameter *info, int len) { char count = 0; if (info->ImgReadPath != NULL) { //gStructVarible.ImgReadPath = info->ImgReadPath; sprintf_s(gStructVarible.ImgReadPath, "%s//1.bmp", info->ImgReadPath); count++; } if (info->ImgSavePath != NULL) { //gStructVarible.ImgSavePath = info->ImgSavePath; sprintf_s(gStructVarible.ImgSavePath, "%s", info->ImgSavePath); count++; } if (info->DataReadPath != NULL) { //gStructVarible.DataReadPath = info->DataReadPath; sprintf_s(gStructVarible.DataReadPath, "%s", info->DataReadPath); count++; } if (info->ImgBitDeep != -1) { gStructVarible.ImgBitDeep = info->ImgBitDeep; count++; } if (info->ImgChannelNum != -1) { gStructVarible.ImgChannelNum = info->ImgChannelNum; count++; } if (info->ImgHeight != -1) { gStructVarible.ImgHeight = info->ImgHeight; count++; } if (info->ImgWidth != -1) { gStructVarible.ImgWidth = info->ImgWidth; count++; } if (info->Threshold != -1) { gStructVarible.Threshold = info->Threshold; count++; } if (info->LengthMin != -1) { gStructVarible.LengthMin = info->LengthMin; count++; } if (info->LengthMax != -1) { gStructVarible.LengthMax = info->LengthMax; count++; } if (info->PicBlockSize != -1) { gStructVarible.PicBlockSize = info->PicBlockSize; count++; } if (info->AreaMin != -1) { gStructVarible.AreaMin = info->AreaMin; count++; } if (info->AreaMax != -1) { gStructVarible.AreaMax = info->AreaMax; count++; } if (info->CompressionRatio != -1) { gStructVarible.CompressionRatio = info->CompressionRatio; count++; } if (info->PictureNum != -1) { gStructVarible.PictureNum = info->PictureNum; count++; } if (info->TerminateFlag != -1) { gStructVarible.TerminateFlag = info->TerminateFlag; if (gStructVarible.TerminateFlag == 1) { ExtractPointSuccess = true;// } else { ExtractPointSuccess = false;// } count++; } if (info->RecModelFlag != -1) { gStructVarible.RecModelFlag = info->RecModelFlag; count++; } if (info->RecPadding != -1) { gStructVarible.RecPadding = info->RecPadding; count++; } gStructVarible.ImgChannelNum = gStructVarible.ImgBitDeep / 8;//=/8 // if (count == len) return true; return false; } /************************************************* : GetParameter // : // Parameter *info Parameter gStructVarible // : NULL // : // *************************************************/ IMGSIMULATION_API void GetParameter(Parameter *info) { /*>1<*/sprintf_s(info->ImgReadPath, "%s", gStructVarible.ImgReadPath); /*>2<*/sprintf_s(info->ImgSavePath, "%s", gStructVarible.ImgSavePath); /*>3<*/sprintf_s(info->DataReadPath, "%s", gStructVarible.DataReadPath); /*>4<*/info->ImgBitDeep = gStructVarible.ImgBitDeep; /*>5<*/info->ImgChannelNum = gStructVarible.ImgChannelNum; /*>6<*/info->ImgHeight = gStructVarible.ImgHeight; /*>7<*/info->ImgWidth = gStructVarible.ImgWidth; /*>8<*/info->ImgMakeborderWidth = gStructVarible.ImgMakeborderWidth; /*>9<*/info->Threshold = gStructVarible.Threshold; /*>10<*/info->LengthMin = gStructVarible.LengthMin; /*>11<*/info->LengthMax = gStructVarible.LengthMax; /*>12<*/info->PicBlockSize = gStructVarible.PicBlockSize; /*>13<*/info->ColThreadNum = gStructVarible.ColThreadNum; /*>14<*/info->RowThreadNum = gStructVarible.RowThreadNum; /*>15<*/info->AreaMin = gStructVarible.AreaMin; /*>16<*/info->AreaMax = gStructVarible.AreaMax; /*>17<*/info->CompressionRatio = gStructVarible.CompressionRatio; /*>18<*/info->PictureNum = gStructVarible.PictureNum; /*>19<*/info->TerminateFlag = gStructVarible.TerminateFlag; /*>20<*/info->RecModelFlag = gStructVarible.RecModelFlag; /*>21<*/info->RecPadding = gStructVarible.RecPadding; } /************************************************* : ClearDataCache // : DLL // // // : // : // . *************************************************/ IMGSIMULATION_API void ClearDataCache() { // sprintf_s(gStructVarible.ImgReadPath, "%s//1.bmp", "C://pic//img_read"); sprintf_s(gStructVarible.ImgSavePath, "%s", "C://pic//img_write"); sprintf_s(gStructVarible.DataReadPath, "%s", "C://pic//img_data"); gStructVarible.AreaMax = 99999; gStructVarible.AreaMin = 0; gStructVarible.ColThreadNum = 320; gStructVarible.CompressionRatio = 2000; gStructVarible.ImgChannelNum = 1; gStructVarible.ImgHeight = 5120; gStructVarible.ImgMakeborderWidth = 5120; gStructVarible.ImgWidth = 5120; gStructVarible.LengthMax = 99999; gStructVarible.LengthMin = 0; gStructVarible.PicBlockSize = 16; gStructVarible.PictureNum = 1; gStructVarible.RecModelFlag = false; gStructVarible.RecPadding = 4; gStructVarible.RowThreadNum = 320; gStructVarible.Threshold = 60; gStructVarible.TerminateFlag = 0; // R::mRindex = 0; RecR::mRecindex = 0; T::mTindex = 0; TC::mTCindex = 0; // Bufferlength = 50; ExtractPointInitialSuccessFlag[0] = false; ExtractPointInitialSuccessFlag[1] = false; ExtractPointInitialSuccessFlag[2] = false; ExtractPointSuccess = false; // gHostRecData.clear(); gRecNum = gHostRecData.size(); gSingleImgRecNum = gHostRecData.size(); gRecupImgData = NULL; DevUpdateRec[0] = false; DevUpdateRec[1] = false; DevUpdateRec[2] = false; HostUpdateRec = false; RecupdataInitialSuccessFlag = false; // for (int i = 0; i < 6; i++) { BufferBlockIndex[i] = 0; gCameraBuffer[i] = false; CameraBufferFull[i] = false; } for (int i = 0; i<4; i++) { // gHostBuffer[i] = NULL; PageLockBufferEmpty[i] = true; PageLockBufferWorking[i] = false; PageLockBufferStartIndex[i] = 0; // gHostComressiongBuffer[i] = NULL; gComressionBufferEmpty[i] = true; gComressionBufferWorking[i] = false; gComressionBufferStartIndex[i] = 0; } } /*******************************************************************************/ /************************************************* : GetFiles // : (.bin)(.bin), // const char * path .bin vector<string>& files binstring // : // : // . *************************************************/ IMGSIMULATION_API void GetFiles(const char * path, vector<string>& files) { // intptr_t hFile = 0; // struct __finddata64_t fileinfo; string p;// if ((hFile = _findfirst64(p.assign(path).append("\\*").c_str(), &fileinfo)) != -1)// { do { //, if ((fileinfo.attrib & _A_SUBDIR)) { //"."&&".." //. //.. // if (strcmp(fileinfo.name, ".") != 0 && strcmp(fileinfo.name, "..") != 0) GetFiles(p.assign(path).append("\\").append(fileinfo.name).c_str(), files); } //, else { files.push_back(p.assign(path).append("\\").append(fileinfo.name)); } } while (_findnext64(hFile, &fileinfo) == 0); //_findclose _findclose(hFile); } } /************************************************* : UnzipFeatureBins // : (.bin)(.bin), // const char *InputPath 50bufferlenth const char *OutputFilename // // : // : // . *************************************************/ void UnzipFeatureBins(const char *InputPath, const char *OutputFilename) { char strFilename[255]; FILE *fr; fr = fopen(InputPath, "rb"); if (fr == NULL)//return { cout << "FILE fail open" << endl; return; } fseek(fr, 0, SEEK_END); long lSize = ftell(fr);// rewind(fr); int Datalength = lSize / sizeof(CircleInfo); CircleInfo *RInfo = (CircleInfo*)malloc(sizeof(CircleInfo)*Datalength); fread(RInfo, sizeof(CircleInfo), Datalength, fr); fclose(fr); // int Dataoffset = 0; int Dataindex = 0; while (Dataoffset < Datalength) { CircleInfo mHead = RInfo[Dataoffset]; Dataoffset++; int mlen = 0; if (mHead.area == 0 && int(mHead.xpos) == 99999)// { Dataindex = mHead.index; mlen = mHead.length; if (mlen > 0 && Dataoffset + mlen <= Datalength) { FILE* fp; sprintf_s(strFilename, "%s\\%d.bin", OutputFilename, Dataindex); //3strFilename fp = fopen(strFilename, "wb"); fwrite(&RInfo[Dataoffset], sizeof(CircleInfo)*mlen, 1, fp); fclose(fp); Dataoffset = Dataoffset + mlen; } } } }; /************************************************* : UnzipMultiFeatureBins // : // const char *Filepath const int arrsize const char* Binfile[] // : // : // . *************************************************/ IMGSIMULATION_API void UnzipMultiFeatureBins(const char* Filepath, const int arrsize, const int BinfileIndex[]) { if (arrsize == 0)// return; char Binfile[200];// //5 for (int index = 0; index < arrsize; index++) { sprintf(Binfile, "%s//%d%s", Filepath, BinfileIndex[index], ".bin"); UnzipFeatureBins(Binfile, Filepath); } delete[] Binfile; } /************************************************* : UnzipFeatureFiles // : .bin GPUbufferlenth bufferlenth const char *Filepath // : // : // . *************************************************/ IMGSIMULATION_API void UnzipFeatureFiles(const char * Filepath) { vector<string>FeatureFilesPass; GetFiles(Filepath, FeatureFilesPass); if (FeatureFilesPass.size() > 0) { for (int i = 0; i < FeatureFilesPass.size(); i++) { UnzipFeatureBins(FeatureFilesPass[i].c_str(), Filepath); } } } // IMGSIMULATION_API void UnzipOneBin(const char* Filepath, const char* BinPath) { //Package temp(BinPath, content); Package temp(BinPath); temp.UnPack(BinPath, Filepath); return; } /************************************************* : UnzipSomeImgBins // : // const char *Filepath const int arrsize const char* Binfile[] // : // : // . *************************************************/ IMGSIMULATION_API void UnzipMultiImgBins(const char* Filepath, const int arrsize, const int BinfileIndex[]) { if (arrsize == 0)// return; char Binfile[200];// int index = 0; for (int i = 0; i + 5 < arrsize; i = i + 5) { sprintf(Binfile, "%s//%d%s", Filepath, BinfileIndex[i], ".bin"); thread th1(UnzipOneBin, Filepath, Binfile); sprintf(Binfile, "%s//%d%s", Filepath, BinfileIndex[i+1], ".bin"); thread th2(UnzipOneBin, Filepath, Binfile); sprintf(Binfile, "%s//%d%s", Filepath, BinfileIndex[i+2], ".bin"); thread th3(UnzipOneBin, Filepath, Binfile); sprintf(Binfile, "%s//%d%s", Filepath, BinfileIndex[i+3], ".bin"); thread th4(UnzipOneBin, Filepath, Binfile); sprintf(Binfile, "%s//%d%s", Filepath, BinfileIndex[i+4], ".bin"); thread th5(UnzipOneBin, Filepath, Binfile); index = i + 5; th1.join(); th2.join(); th3.join(); th4.join(); th5.join(); } //5 for (; index < arrsize; index++) { sprintf(Binfile, "%s//%d%s", Filepath, BinfileIndex[index], ".bin"); Package cc(Binfile); cc.UnPack(Binfile, Filepath); } delete[] Binfile; } /************************************************* : UnzipPictureFiles // : // const char *Filepath 50bufferlenth // : // : // . *************************************************/ IMGSIMULATION_API void UnzipPictureFiles(const char * Filepath) { vector<string>FeatureFilesPass; GetFiles(Filepath, FeatureFilesPass); int interpret = 0; if (FeatureFilesPass.size() > 0) { for (int i = 0; i + 5 < FeatureFilesPass.size(); i = i + 5) { //cout << FeatureFilesPass[i].c_str() << endl; //Package cc(FeatureFilesPass[i].c_str(), Bufferlength/ gStructVarible.PictureNum); //cc.UnPack(FeatureFilesPass[i].c_str(), Filepath); //thread th1(UnzipOneBin, Filepath, FeatureFilesPass[i].c_str(), Bufferlength / gStructVarible.PictureNum); //thread th2(UnzipOneBin, Filepath, FeatureFilesPass[i+1].c_str(), Bufferlength / gStructVarible.PictureNum); //thread th3(UnzipOneBin, Filepath, FeatureFilesPass[i+2].c_str(), Bufferlength / gStructVarible.PictureNum); //thread th4(UnzipOneBin, Filepath, FeatureFilesPass[i+3].c_str(), Bufferlength / gStructVarible.PictureNum); //thread th5(UnzipOneBin, Filepath, FeatureFilesPass[i+4].c_str(), Bufferlength / gStructVarible.PictureNum); thread th1(UnzipOneBin, Filepath, FeatureFilesPass[i].c_str()); thread th2(UnzipOneBin, Filepath, FeatureFilesPass[i + 1].c_str()); thread th3(UnzipOneBin, Filepath, FeatureFilesPass[i + 2].c_str()); thread th4(UnzipOneBin, Filepath, FeatureFilesPass[i + 3].c_str()); thread th5(UnzipOneBin, Filepath, FeatureFilesPass[i + 4].c_str()); interpret = i + 5; th1.join(); th2.join(); th3.join(); th4.join(); th5.join(); } for (; interpret < FeatureFilesPass.size(); interpret++) { //Package cc(FeatureFilesPass[interpret].c_str(), Bufferlength / gStructVarible.PictureNum); Package cc(FeatureFilesPass[interpret].c_str()); cc.UnPack(FeatureFilesPass[interpret].c_str(), Filepath); } } } /*---------------------------------------------------------------------------------------*/
9dd51eb28a8419a9eced154fce909cfe9faa106a.cu
#include"Imgsimulation.h" #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <cuda.h> #include <device_functions.h> #include <opencv2/opencv.hpp> #include <iostream> #include <string.h> #include <fstream> #include <string> #include <io.h> #include <vector> #include <time.h> #include <thread> #include <mutex> #include <atomic> #include <stdio.h> #include <algorithm> #include "Thread.h" #include "ThreadPoolExecutor.h" #include "cuda_profiler_api.h" #include <helper_cuda.h>//错误处理 #include <helper_string.h> #include <npp.h> #include "Endianess.h" #include "useful.h" #include "kernel.h" #include <Windows.h> #include <GdiPlus.h> #pragma comment( lib, "GdiPlus.lib" ) using namespace Gdiplus; using namespace std; using namespace cv; double Timedatarefresh = 0.5; bool SimulationSuccessFlaf = false; //线程索申请; std::mutex gExtrackPointLock;//R类和Rec类的线程安全锁 std::mutex gComressReadDataLock; std::mutex compress_process_lock;//线程锁申 std::mutex compress_write_lock;//线程锁申 std::mutex compress_writeCPU_lock;//线程锁申 //根据设备性能定义 #define ExtractPointThreads 2 #define CompressionThreads 2 #define CUDAStreams 5 #define GRAYCompressStreams 5 //磁盘剩余存储空间阈值(GB) #define DiskRemainingSpaceThreshold 50 //根据图片大小定义block和thread个数 int gHostImgblock = ExtractPointThreads * CUDAStreams; int gDeviceCount; int gHostPathImgNumber; dim3 blocks; //压缩程序需要的cuda 分块配置 dim3 threads(8, 8); //压缩程序需要的block 线程数配置 //界面传参结构体 Parameter gStructVarible{ NULL,NULL,NULL,8,1,5120,5120,5120,60,30,300,8,640,640,0,99999,2000,5,0,0 ,4 }; //标志点信息结构体 Infomation SignPoint; //硬件配置结构体 HardwareInfo HardwareParam;//硬件配置结构体 #define Pretreatment #ifdef Pretreatment #define ReadImageNumber 250 #endif // Pretreatment unsigned char* gHostImage[250] = { NULL }; unsigned char* gHostColorImage[250] = { NULL }; //-------------------------方位盒Model数据-----------------------------// typedef struct { short RecXmin; short RecYmin; short RecXmax; short RecYmax; }RecData;//方位盒数据结构 vector<RecData> gHostRecData;//CPU方位盒数据容器 int gRecNum;//方位盒数量(这个是拼图后和规整后的方位盒数量) int gSingleImgRecNum;//单张图方位盒数量 /*-------------------------数据缓冲数据定义-----------------------*/ struct CircleInfo//特征存储结构体(24字节) { short index; short length; short area; double xpos; double ypos; }; //实时刷新图像 unsigned char * OnlineRefreshIMG; //通信变量 int BufferBlockIndex[6] = { 0 };//缓冲区刷新的次数(更新了多少次600张图片) int Bufferlength;//每个缓冲区的长度(需要初始化) vector<int>gWorkingGpuId;//用来存能用设备的设备号 bool ExtractPointInitialSuccessFlag[3] = { false };//用于标记各个提点类是否初始化完成 bool ExtractPointSuccess = false;//实验结束标志位 //矩形盒更新标志位 unsigned char * gRecupImgData = NULL;//这个矩形盒数据更新时所对应的缓冲区(一个图片大小) bool DevUpdateRec[3] = { false };//当该标志位为true时,表示 CPU端矩形盒数据已经更新完成,GPU端需要拷贝CPU端矩形盒数据至GPU端来更新包围盒子 bool HostUpdateRec = false; //当该标志为true时表示主机端矩形盒子数据更新了 bool RecupdataInitialSuccessFlag = false; //相机缓冲区 unsigned char * gCameraDress=NULL; unsigned char * gCameraBuffer[6] = { NULL }; bool CameraBufferFull[6] = { false };//用于通信提点线程,相机内存数据准备就绪 //页锁内存缓冲区(这个用于提点) unsigned char * gHostBuffer[4] = { NULL }; bool PageLockBufferEmpty[4] = { true }; bool PageLockBufferWorking[4] = { false }; int PageLockBufferStartIndex[4]; //压缩缓冲区(用于压缩) unsigned char *gHostComressiongBuffer[4] = { NULL }; bool gComressionBufferEmpty[4] = { true }; bool gComressionBufferWorking[4] = { false }; int gComressionBufferStartIndex[4]; //--------------------------------------------------------开始---------------------------------------------// /*********************************************************************************************** Function: RGBtoYUV(核函数) Description: 只在彩图压缩中使用 将.bmp图像的R、G、B数据转化为压缩所需要的亮度和色差数据,这些数据都在显存中 Calls: 无 Input: unsigned char* dataIn(显存原图像地址) int imgHeight(图像高度) int imgWidth(图像宽度) unsigned int nPitch(经过对齐后的数据宽度) Output: unsigned char* Y, unsigned char* Cb, unsigned char* Cr(得到的亮度和色差数据) ************************************************************************************************/ __global__ void RGBtoYUV(unsigned char* dataIn, unsigned char* Y, unsigned char* Cb, unsigned char* Cr, int imgHeight, int imgWidth, int nPitch, int old_Height, int old_Width) { int xIndex = threadIdx.x + blockIdx.x * blockDim.x; int yIndex = threadIdx.y + blockIdx.y * blockDim.y; if (xIndex < old_Width && yIndex < old_Height) { unsigned char blue = dataIn[yIndex * old_Width * 3 + xIndex * 3 + 2]; unsigned char green = dataIn[yIndex * old_Width * 3 + xIndex * 3 + 1]; unsigned char red = dataIn[yIndex * old_Width * 3 + xIndex * 3]; unsigned char y = 0.299 * red + 0.587 * green + 0.114 * blue; unsigned char cb = -0.1687 * red - 0.3313 * green + 0.5 * blue + 127; unsigned char cr = 0.5 * red - 0.4187 * green - 0.0813 * blue + 127; Y[yIndex * nPitch + xIndex] = y; Cb[yIndex * nPitch + xIndex] = cb; Cr[yIndex * nPitch + xIndex] = cr; } } /*--------------------------------压缩程序需要的结构体needmemory-------------------------------*/ struct needmemory { Npp16s *pDCT[3] = { 0,0,0 }; //GPU内DCT变换后数据储存变量 Npp32s DCTStep[3]; //记录字节对齐的DCT变换数据大小 NppiDCTState *pDCTState; Npp8u *pDImage[3] = { 0,0,0 }; //GPU内图像的YCbCr数据 Npp32s DImageStep[3]; //记录字节对齐的YCbCr数据大小 Npp8u *pDScan; //GPU内霍夫曼编码扫描数据 Npp32s nScanSize; //pDScan的初始化大小 Npp8u *pDJpegEncoderTemp; //GPU内霍夫曼编码的中间数据 size_t nTempSize; // pDJpegEncoderTemp的大小 Npp32s nScanLength; //霍夫曼编码后的pDScan大小 Npp8u *hpCodesDC[3]; //标准霍夫曼表的DC、AC值和编码 Npp8u *hpCodesAC[3]; Npp8u *hpTableDC[3]; Npp8u *hpTableAC[3]; }; /*--------------------------------压缩程序需要的结构体needdata-------------------------------*/ struct needdata { NppiSize oDstImageSize; //输出的jpg图片大小(长宽值) NppiSize aDstSize[3]; //实际压缩图片区域范围 Npp8u *pdQuantizationTables; //GPU中的标准量化表 NppiEncodeHuffmanSpec *apDHuffmanDCTable[3]; // GPU中的霍夫曼直流表 NppiEncodeHuffmanSpec *apDHuffmanACTable[3]; // GPU中的霍夫曼交流表 }; struct Pk { int Offest;//每个文件的偏移量 int FileLen;//文件长度 //int FileNameLen;//文件名长度 //char* FileName;//需要打包的文件名 int FileNumber; }; class Package { public: //Package(const char* Fname, int FileNum) :Fname(Fname), FileNum(FileNum) Package(const char* Fname) :Fname(Fname) { table_scale = 0; concordancesize = 0; head_cache = new char[20000]; head_bias = 0; } ~Package() { delete[] head_cache; delete[]concordance; } //void Form_one_head(int index, char* Filename, int FileLen); void Package_init(int Num) { FileNum = Num; concordance = new Pk[FileNum]; } void Form_one_head(int index, int one_picture_index, int FileLen); void UnPack(const char* name, const char* save_path); //解包 //void Form_total_head(); void Form_total_head(int one_picture_width, int one_picture_height, int picture_number, int picture_index); Pk* concordance; fstream file; int concordancesize; //索引表大小 int FileNum; //文件个数 const char* Fname; //打包完成后的文件名 char* head_cache; //头文件总大小 int head_bias; //头文件总偏移 int table_scale; }; //void Package::Form_one_head(int index, char* Filename, int FileLen) void Package::Form_one_head(int index, int one_picture_index, int FileLen) { if (index == 0) //得到每个文件的偏移位置 { concordance[index].Offest = 0; } else { concordance[index].Offest = concordance[index - 1].Offest + concordance[index - 1].FileLen; } //table_scale = table_scale + strlen(Filename) + 1 + 3 * sizeof(int); //算索引表大小 table_scale = table_scale + 3 * sizeof(int); //concordance[index].FileNameLen = strlen(Filename) + 1; //文件名大小 //concordance[index].FileName = new char[50]; //strcpy(concordance[index].FileName, Filename); //cout << concordance[index].FileName << endl; concordance[index].FileNumber = one_picture_index; concordance[index].FileLen = FileLen; } void Package::Form_total_head(int one_picture_width, int one_picture_height, int picture_number, int picture_index) { concordancesize = table_scale + 6 * sizeof(int); //得到索引表大小 memcpy(head_cache, (char*)&concordancesize, sizeof(int)); head_bias += sizeof(int); memcpy(head_cache + head_bias, (char*)&FileNum, sizeof(int)); head_bias += sizeof(int); memcpy(head_cache + head_bias, (char*)&one_picture_width, sizeof(int)); head_bias += sizeof(int); memcpy(head_cache + head_bias, (char*)&one_picture_height, sizeof(int)); head_bias += sizeof(int); memcpy(head_cache + head_bias, (char*)&picture_number, sizeof(int)); head_bias += sizeof(int); memcpy(head_cache + head_bias, (char*)&picture_index, sizeof(int)); head_bias += sizeof(int); //cout << FileNum << endl; for (int i = 0; i < FileNum; ++i) { memcpy(head_cache + head_bias, (char*)&concordance[i].Offest, sizeof(int)); head_bias += sizeof(int); memcpy(head_cache + head_bias, (char*)&concordance[i].FileLen, sizeof(int)); head_bias += sizeof(int); //memcpy(head_cache + head_bias, &concordance[i].FileNameLen, sizeof(int)); memcpy(head_cache + head_bias, &concordance[i].FileNumber, sizeof(int)); head_bias += sizeof(int); //memcpy(head_cache + head_bias, concordance[i].FileName, concordance[i].FileNameLen); //head_bias += concordance[i].FileNameLen; } } void Package::UnPack(const char *name, const char* save_path) //解包 { int one_picture_width, one_picture_height, picture_number, picture_index; file.open(name, ios::in | ios::binary); file.read((char*)&concordancesize, sizeof(int)); //读取索引表大小 file.read((char*)&FileNum, sizeof(int)); //读取文件个数 file.read((char*)&one_picture_width, sizeof(int)); file.read((char*)&one_picture_height, sizeof(int)); file.read((char*)&picture_number, sizeof(int)); file.read((char*)& picture_index, sizeof(int)); file.seekg(8 + 4 * 4, ios::beg); concordance = new Pk[FileNum]; for (int i = 0; i < FileNum; ++i) //读取索引表具体的内容 { file.read((char*)&concordance[i].Offest, sizeof(int)); //读取偏移量 file.read((char*)&concordance[i].FileLen, sizeof(int)); //读取文件大小 file.read((char*)&concordance[i].FileNumber, sizeof(int)); //file.read((char*)&concordance[i].FileNameLen, sizeof(int)); //读取文件名大小 //concordance[i].FileName = new char[concordance[i].FileNameLen]; //memset(concordance[i].FileName, 0, sizeof(char)*concordance[i].FileNameLen);//设置为零 //file.read(concordance[i].FileName, concordance[i].FileNameLen);//读取文件名 } fstream file1; for (int i = 0; i < FileNum; ++i) { char arr[1024] = { 0 }; //sprintf(arr, "%s", concordance[i].FileName); //另存在文件夹map中 sprintf_s(arr, "%s\\%d.jpg", save_path, concordance[i].FileNumber); file1.open(arr, ios::out | ios::binary); file.seekg(concordancesize + concordance[i].Offest, ios::beg); //打开文件 for (int j = 0; j < concordance[i].FileLen; ++j) //copy文件 { file1.put(file.get()); } file1.close(); Mat img = imread(arr, IMREAD_UNCHANGED); for (int j = 0; j < picture_number; j++) { char one_image_save_path[50]; sprintf_s(one_image_save_path, "%s\\%d.jpg", save_path, picture_index + i * picture_number + j); cv::Rect rect(0, j * one_picture_height / picture_number, one_picture_width, one_picture_height / picture_number); Mat image_cut = Mat(img, rect); Mat image_copy = image_cut.clone(); imwrite(one_image_save_path, image_copy); } //char one_image_save_path[50]; //sprintf_s(one_image_save_path, "%s\\%d.bin", save_path, picture_index); } file.close(); //for (int i = 0; i < FileNum; ++i)//释放内存 //{ //delete[]concordance[i].FileName; //} } unsigned char* gpHudata; //灰度图片压缩时使用,用来初始化固定的色差值 unsigned char* gpHvdata; //-------------------------------------标志点提取核函数----------------------------------------// /************************************************* 函数名称: ColorMakeBorder // 函数描述: 此函数在图像Width方向上对像素数目进行填充,将Width方向填充为128的整数倍; . 宽度填充计算公式为:int imgWidth = (width + 127) / 128 * 128; // 输入参数:const unsigned char *colorimg ;colorimg是24位彩色图像数据; . Parameter devpar;devpar是包含了图像信息的参数; // 输出参数:unsigned char *dst;dst是在Width方向上填充像素点后的图像数据,填充的点的像素值取值0; // 返回值 : 无 // 其他说明: 函数为核函数,在主机端调用,设备端执行; . 该核函数倍调用时,线程配置为: block(128,1,1)、 Grid(ImgMakeborderWidth/128, ImgHeight,1); . GPU中一个线程对应处理一个像素点// *************************************************/ __global__ void ColorMakeBorder(const unsigned char * colorimg, unsigned char *dst, Parameter devpar) { const int Id_y = threadIdx.x + blockIdx.x*blockDim.x;//图像列索引 const int Id_x = blockIdx.y;//图像行索引 int b = 0; int g = 0; int r = 0; if (Id_y < devpar.ImgWidth) { b = colorimg[3 * Id_y + Id_x * devpar.ImgWidth *devpar.ImgChannelNum]; g = colorimg[3 * Id_y + 1 + Id_x * devpar.ImgWidth * devpar.ImgChannelNum]; r = colorimg[3 * Id_y + 2 + Id_x * devpar.ImgWidth * devpar.ImgChannelNum]; dst[Id_y + Id_x * devpar.ImgMakeborderWidth] = unsigned char((r * 30 + g * 59 + b * 11 + 50) / 100); } }; /************************************************* 函数名称: GrayMakeBorder // 函数描述: 此函数在图像Width方向上对像素数目进行填充,将Width方向填充为128的整数倍; . 宽度填充计算公式为:int imgWidth = (width + 127) / 128 * 128; // 输入参数:const unsigned char *src ;Src是灰度图像数据; . Parameter devpar;devpar是包含了图像信息的参数; // 输出参数:unsigned char *dst;dst是在Width方向上填充像素点后的图像数据,填充的点的像素值取值0; // 返回值 : 无 // 其他说明: 函数为核函数,在主机端调用,设备端执行; . 该核函数倍调用时,线程配置为: block(128,1,1)、 Grid(ImgMakeborderWidth/128, ImgHeight,1); . GPU中一个线程对应处理一个像素点// *************************************************/ __global__ void GrayMakeBorder(const unsigned char *src, unsigned char *dst, Parameter devpar) { const int Id_y = threadIdx.x + blockIdx.x*blockDim.x;//图像列索引 const int Id_x = blockIdx.y;//图像行索引 if (Id_y < devpar.ImgWidth) { dst[Id_y + Id_x * devpar.ImgMakeborderWidth] = src[Id_y + Id_x * devpar.ImgWidth]; } } /************************************************* 函数名称: Binarization // 函数描述: 函数根据设定的图像阈值,对图像进行二值化;二值化阈值保存在输入参数 Parameter devpar中; . 当像素值大于阈值时,将该点像素值置为255;当像素值小于阈值时,将该点像素值置为0; // 输入参数:unsigned char *psrcgray 是灰度图像数据,实参是填充宽度后的灰度图; . Parameter devpar 是包含了图像信息参数; // 输出参数:unsigned char *pdst2val 是二值化结果的数据,实参对应二值图; . unsigned char *pdstcounter 是二值化结果的数据副本, 实参对应轮廓图 // 返回值 : 无 // 其他说明: 函数为核函数,在主机端调用,设备端执行; . 该核函数倍调用时,线程配置为: block(128,1,1)、 Grid(ImgMakeborderWidth/128, ImgHeight,1); . GPU中一个线程对应处理一个像素点 ; // *************************************************/ __global__ void Binarization(unsigned char *psrcgray, unsigned char *pdst2val, unsigned char *pdstcounter, Parameter devpar) { const int Id = threadIdx.x + (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x;//线程号索引 int temp = int(psrcgray[Id]);//寄存器保存像素,提高访存效率 if (Id < devpar.ImgMakeborderWidth * devpar.ImgHeight*devpar.PictureNum)//边界保护 { pdst2val[Id] = unsigned char(255 * int(temp>devpar.Threshold));//二值化 pdstcounter[Id] = unsigned char(255 * int(temp>devpar.Threshold)); } } /************************************************* 函数名称: Dilation // 函数描述: 函数对二值化图进行8邻域膨胀操作,即若某一个点像素值为0的点的八邻域内有非0像素点,则将该点置为255; // 输入参数:unsigned char *psrc 是二值化图数据,该参数作用是作为腐蚀操作的模板副本; . Parameter devpar 是包含了图像信息参数; // 输出参数:unsigned char *pdst 是腐蚀操作结果的数据,实际调用时,该参数输入二值化图数据,通过膨胀操作对其进行更新; // 返回值 : 无 // 其他说明: 函数为核函数,在主机端调用,设备端执行; . 该核函数倍调用时,线程配置为: block(128,1,1)、 Grid(ImgMakeborderWidth/128, ImgHeight,1); . GPU中一个线程对应处理一个像素点 ; // *************************************************/ __global__ void Dilation(unsigned char *psrc, unsigned char *pdst, Parameter devpar) { const int Id_y = threadIdx.x + blockIdx.x *blockDim.x;//Id_y代表列索引 const int Id_x = blockIdx.y;//Id_x代表行信息 int temp;//临时变量:用于累加八邻域像素值 if (Id_y> 1 && Id_y < (devpar.ImgMakeborderWidth - 1) && Id_x>0 && Id_x < devpar.PictureNum*devpar.ImgHeight - 1) { if (psrc[Id_y + Id_x * devpar.ImgMakeborderWidth] == 0) { temp = int(psrc[Id_y - 1 + (Id_x - 1)* devpar.ImgMakeborderWidth]) + int(psrc[Id_y + (Id_x - 1)* devpar.ImgMakeborderWidth]) + int(psrc[Id_y + 1 + (Id_x - 1)* devpar.ImgMakeborderWidth]) + int(psrc[Id_y - 1 + Id_x * devpar.ImgMakeborderWidth]) + int(psrc[Id_y + 1 + Id_x * devpar.ImgMakeborderWidth]) + int(psrc[Id_y - 1 + (Id_x + 1)* devpar.ImgMakeborderWidth]) + int(psrc[Id_y + (Id_x + 1)* devpar.ImgMakeborderWidth]) + int(psrc[Id_y + 1 + (Id_x + 1)* devpar.ImgMakeborderWidth]); pdst[Id_y + Id_x * devpar.ImgMakeborderWidth] = temp > 0 ? 255 : 0;//膨胀操作 } } } /************************************************* 函数名称: Erosion // 函数描述: 函数对膨胀操作后的图进行4邻域腐蚀操作,即若某一个点像素值为255的点的4邻域(十字架邻域)内有0像素点,则将该点置为0; // 输入参数:unsigned char *psrc 是膨胀操作后的图像数据; . Parameter devpar 是包含了图像信息参数; // 输出参数:unsigned char *pdst 是腐蚀操作结果的数据,即标志点轮廓图。 . 实际调用时,该参数输入膨胀操作后的图像数据,通过腐蚀操作对其进行更新;// 返回值 : 无 // 其他说明: 函数为核函数,在主机端调用,设备端执行; . 该核函数倍调用时,线程配置为: block(128,1,1)、 Grid(ImgMakeborderWidth/128, ImgHeight,1); . GPU中一个线程对应处理一个像素点 ; // *************************************************/ __global__ void Erosion(unsigned char *psrc, unsigned char *pdst, Parameter devpar) { const int Id_y = threadIdx.x + blockIdx.x *blockDim.x;//Id_y代表行信息 const int Id_x = blockIdx.y;//Id_x代表列信息 int temp;//临时变量累加4邻域像素值 //利用4领域值掏空内部点,提取轮廓信息,现在的dst就是存储轮廓的信息 if (Id_y > 0 && Id_y < (devpar.ImgMakeborderWidth - 1) && Id_x>0 && Id_x <devpar.ImgHeight*devpar.PictureNum - 1) { if (psrc[Id_y + Id_x * devpar.ImgMakeborderWidth] != 0) { temp = int(psrc[Id_y + (Id_x - 1)*devpar.ImgMakeborderWidth]) + int(psrc[Id_y - 1 + Id_x * devpar.ImgMakeborderWidth]) + int(psrc[Id_y + 1 + Id_x * devpar.ImgMakeborderWidth]) + int(psrc[Id_y + (Id_x + 1)*devpar.ImgMakeborderWidth]);//用4领域腐蚀 pdst[Id_y + Id_x * devpar.ImgMakeborderWidth] = temp >= 1020 ? 0 : 255;//腐蚀操作 } } } /************************************************* 函数名称: GetCounter // 函数描述: 根据输入轮廓图,利用8邻域追踪法提取标志点的周长和包围盒; // . 输入参数:unsigned char *psrc 是轮廓图数据; . Parameter devpar 是包含了图像信息参数; // 输出参数:short *c_length 是提取的标志点周长,当提取失败时,将周长特征置为0; . x_min、y_min、x_max、y_max是标志点的包围盒数据,包围盒是一个与标志点相切的矩形,包围盒 . 数据包括矩形的左上角坐标(x_min,y_min)和右下角坐标(x_max,y_max); . 当特征提取失败时,将包围盒数据置0; 返回值 : 无 // 其他说明: 函数为核函数,在主机端调用,设备端执行; . GPU中一个线程对应处理一个图像块,一个线程至多提取出一个标志点的特征信息。 . 该核函数倍调用时,线程配置为: block(128,1,1)、Grid(Devpar.ColThreadNum / 128, Devpar.RowThreadNum, 1); . 其中ColThreadNum和RowThreadNum分别是对图像进行分块后的列块数和行块数,且对列方向图像块数目ColThreadNum . 进行了填充,填充为了128的整数倍; . 图像块大小一般为PicBlockSize×PicBlockSize,其中PicBlockSize取值一般为8、16、32; *************************************************/ __global__ void GetCounter(unsigned char *src, short *c_length, short* x_min, short * y_min, short* x_max, short *y_max, Parameter devpar) { /*八零域方向数组,用于更新轮廓点,初始化方向为正右方(0号位),顺时针旋转45°(索引加1)*/ const int direction_y[8] = { 1,1,0,-1,-1,-1,0,1 }; const int direction_x[8] = { 0,1,1,1,0,-1,-1,-1 }; //short Picblocksize = devpar.PicBlockSize;//获取图像块大小 /*获取行列索引号*/ const int y = (blockIdx.x*blockDim.x + threadIdx.x) * devpar.PicBlockSize;//y代表列索引 const int x = blockIdx.y * devpar.PicBlockSize;//x代表行索引 const int Id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*blockDim.x*gridDim.x;//线程号 /*初始化输出结果值*/ c_length[Id] = 0; x_min[Id] = 0; x_max[Id] = 0; y_min[Id] = 0; y_max[Id] = 0; bool SuccessFlag = false;//用于标记提点是否成功,当为true时,表示当前线程块已经成功提取到一个标志点特征 /*初始化包围盒数据*/ short Rec_xmx = 0, Rec_xmm = 0; short Rec_ymx = 0, Rec_ymm = 0; if ((y / devpar.PicBlockSize) < (devpar.ImgWidth / devpar.PicBlockSize) && (x / devpar.PicBlockSize) < (devpar.ImgHeight*devpar.PictureNum / devpar.PicBlockSize))//边界判断 { for (int i = x; i < (x + devpar.PicBlockSize); i++) { for (int j = y; j < (y + devpar.PicBlockSize); j++) { if (255 == src[j + i * devpar.ImgMakeborderWidth]) { /*初始化包围盒数据*/ Rec_ymx = j; Rec_ymm = j; Rec_xmx = i; Rec_xmm = i; /*定义根节点*/ short root_x = i;//行索引 short root_y = j;//列索引 short counts;//用于8邻域循环计数 short curr_d = 0;//方向数组索引计数,取值0-7表示八零域的8各不用的方位 /*进行跟踪*/ for (short cLengthCount = 2; cLengthCount < devpar.LengthMax; cLengthCount++)// { /*定义根标记点*/ short boot_x = root_x; short boot_y = root_y; /*更新方位盒数据*/ Rec_xmx = Rec_xmx > root_x ? Rec_xmx : root_x; Rec_ymx = Rec_ymx > root_y ? Rec_ymx : root_y; Rec_xmm = Rec_xmm < root_x ? Rec_xmm : root_x; Rec_ymm = Rec_ymm < root_y ? Rec_ymm : root_y; /*搜索根节点的八邻域点*/ for (counts = 0; counts < 8; counts++) { /*防止索引出界*/ curr_d -= curr_d >= 8 ? 8 : 0; curr_d += curr_d < 0 ? 8 : 0; /*事实上,只需要判断7个领域内的信息(除了第一次之外),当count=6时刚好循环到上一个轮廓点*/ if (cLengthCount >2 && (counts == 6)) { curr_d++; continue; } /*获取邻域点boot*/ boot_x = root_x + direction_x[curr_d];//更新行索引 boot_y = root_y + direction_y[curr_d];//更新列索引 /*判断点是否越界,超过图像的索引区域*/ if (boot_x < 0 || boot_x >= devpar.ImgHeight*devpar.PictureNum || boot_y < 0 || boot_y >= devpar.ImgWidth) { curr_d++; continue; } /*如果存在边缘*/ if (255 == src[boot_y + boot_x * devpar.ImgMakeborderWidth]) { curr_d -= 2; //更新当前方向 root_x = boot_x;//更新根节点 root_y = boot_y; break; } curr_d++; } // end for /*边界条件判断*/ if (8 == counts || (root_x >= (x + devpar.PicBlockSize) && root_y >= (y + devpar.PicBlockSize))) { break; } /*正常结束*/ if (root_y == j && root_x == i) { x_min[Id] = Rec_xmm; x_max[Id] = Rec_xmx; y_min[Id] = Rec_ymm; y_max[Id] = Rec_ymx; c_length[Id] = cLengthCount; SuccessFlag = true; break; }//正常结束if }//外围for结束 }//判断前景点if结束 if (SuccessFlag) break; j = Rec_ymx > j ? Rec_ymx : j;//更新列方向搜索步长 }//第一个for结束 if (SuccessFlag) break; i = Rec_xmx > i ? Rec_xmx : i;//更新行方向搜索步长 }//第二个for 结束 } }//核函数结束 /*筛选方位盒*/ __global__ void SelectTrueBox(unsigned char *ImgCounter, short *clength, short* Recxmm, short * Recymm, short* Recxmx, short *Recymx, short*index, Parameter devpar) { const int Id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*blockDim.x*gridDim.x; index[Id] = 0; short temp1 = 0; short yMidPos = 0; short xMidPos = 0; short Rxmm = Recxmm[Id]; short Rymm = Recymm[Id]; short RecBoxHeight = Recxmx[Id] - Recxmm[Id]; short RecBoxWidth = Recymx[Id] - Recymm[Id]; if (clength[Id] > devpar.LengthMin) { if ((float(RecBoxHeight) / float(RecBoxWidth))<1.5&& float((RecBoxHeight) / float(RecBoxWidth)) >0.7)//方位盒长款比怎么确定 { if (Rxmm > 0 && Rymm > 0 && Recxmx[Id] < devpar.ImgHeight*devpar.PictureNum - 1 && Recymx[Id] < devpar.ImgWidth - 1) { yMidPos = Rymm + RecBoxWidth / 2;//中心坐标 xMidPos = Rxmm + RecBoxHeight / 2;//中心坐标 for (int i = -1; i < 2; i++)//看矩形盒子中心9领域是否有点 { if (xMidPos + 1 < devpar.ImgHeight*devpar.PictureNum&&yMidPos + 1 < devpar.ImgWidth) { temp1 += ImgCounter[yMidPos - 1 + (xMidPos + i)*devpar.ImgMakeborderWidth]; temp1 += ImgCounter[yMidPos + (xMidPos + i)*devpar.ImgMakeborderWidth]; temp1 += ImgCounter[yMidPos + 1 + (xMidPos + i)*devpar.ImgMakeborderWidth]; } } for (int i = 0; Rxmm + i <= Rxmm + RecBoxHeight - i; i++)//判断Height方向 { temp1 += ImgCounter[yMidPos + (Rxmm + i)*devpar.ImgMakeborderWidth] > 0 ? 1 : 0; temp1 += ImgCounter[yMidPos + (Rxmm + RecBoxHeight - i)*devpar.ImgMakeborderWidth] > 0 ? 1 : 0; } for (int i = 0; Rymm + i <= Rymm + RecBoxWidth - i; i++)//判断width方向 { temp1 += ImgCounter[Rymm + i + xMidPos * devpar.ImgMakeborderWidth] > 0 ? 1 : 0; temp1 += ImgCounter[Rymm + RecBoxWidth - i + xMidPos * devpar.ImgMakeborderWidth] > 0 ? 1 : 0; } index[Id] = temp1 > 4 ? 0 : 1; } } } } __global__ void SelectNonRepeatBox(short* Recxmm, short * Recymm, short*index, Parameter devpar) { const int Id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*blockDim.x*gridDim.x;//获取线程索引号 short temp = 0;//建立临时变量,用于表示当前块提取的特征是否应当删除 if (index[Id] != 0) { if ((Id > devpar.ColThreadNum) && (Id < devpar.ColThreadNum*(devpar.RowThreadNum - 1)))//边界判定 { if (Recxmm[Id] != 0)//判断当前块提取特征是否有效 { /*判断一个图像块获取的坐标是否和与它相邻的右图像块(列+1)、下图像块(行+1)和右上图像块(行-1,列+1)获取的坐标一致*/ temp += ((short(Recxmm[Id]) == short(Recxmm[Id + 1])) && (Recymm[Id] == Recymm[Id + 1])) ? 1 : 0;//右 temp += ((short(Recxmm[Id]) == short(Recxmm[Id + devpar.ColThreadNum])) && (short(Recymm[Id]) == short(Recymm[Id + devpar.ColThreadNum]))) ? 1 : 0;//下 temp += ((short(Recxmm[Id]) == short(Recxmm[Id - devpar.ColThreadNum + 1])) && (short(Recymm[Id]) == short(Recymm[Id - devpar.ColThreadNum + 1]))) ? 1 : 0;//右上 index[Id] = temp > 0 ? 0 : 1;//输出特征有效标志 } } } } __global__ void GetNonRepeatBox(short *Recxmm, short *Recymm, short*index, Parameter devpar) { const int Id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*blockDim.x*gridDim.x;//线程索引 const int y = blockIdx.x*blockDim.x + threadIdx.x;//列块数索引 const int x = blockIdx.y;//行块数索引 int Id2 = 0; if (index[Id] != 0) { for (int i = x - 4; i < x + 4; i++) for (int j = y - 4; j < y + 4; j++) if (j > 0 && j < devpar.ImgWidth / devpar.PicBlockSize&&i > 0 && i < devpar.ImgHeight*devpar.PictureNum / devpar.PicBlockSize) { Id2 = j + i * devpar.ColThreadNum; if (index[Id2] != 0) { if ((short(Recxmm[Id]) == short(Recxmm[Id2])) && (short(Recymm[Id]) == short(Recymm[Id2]))) { index[Id] = Id > Id2 ? 0 : 1; } } } } } /************************************************* 函数名称: GetInfo // 函数描述: 根据方位盒信息和输入灰度图像,提取标志点重心和面积特征 // . 输入参数:unsigned char* src_gray 是灰度图像; . short *length 是 提取出的周长特征,当length>LengthMin 时,表示提取出的方位盒信息有效; . x_min、y_min、x_max、y_max是标志点的包围盒数据; . Parameter devpar 是包含了图像信息参数; // 输出参数:short *xpos、short*ypos 是利用灰度重心法提取出的标志点重心坐标; . short *area 是提取出来的面积特征 . 当方位盒数据无效时,将short *xpos、short*ypos、short *area都置0; 返回值 : 无 // 其他说明: 函数为核函数,在主机端调用,设备端执行; . GPU中一个线程对应处理一个图像块的方位盒数据; . 该核函数倍调用时,线程配置与GetCounter函数一致: block(128,1,1)、Grid(Devpar.ColThreadNum / 128, Devpar.RowThreadNum, 1); *************************************************/ __global__ void GetInfo(unsigned char* src_gray, short *index, short* x_min, short * y_min, short* x_max, short *y_max, double *xpos, double*ypos, short *area, Parameter devpar) { const int Id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*blockDim.x*gridDim.x; short myArea = 0; double sum_gray = 0;//圆点区域的灰度值之和 double x_sum = 0;//x灰度值加权和 double y_sum = 0;//y灰度值加权和 short mThreshold = devpar.Threshold;//二值化阈值 xpos[Id] = 0; ypos[Id] = 0; int xRealIndex = 0; //保存方位盒边界 short ymm = y_min[Id]; short ymx = y_max[Id]; short jcount = (ymx - ymm + 3) / 4 * 4; unsigned char temp0, temp1, temp2, temp3;//用寄存器暂存图像数据,减小全局内存的访问,提高访存效率 if (index[Id] >0) { //循环优化,这种情况会多计算一些区域的值(需要处理一下) for (int i = x_min[Id]; i <= x_max[Id]; i++) for (int j = ymm; j <= ymm + jcount; j = j + 4) { xRealIndex = i%devpar.ImgHeight; //防止越界 temp0 = j > ymx ? 0 : 1; //qwt temp1 = j + 1 > ymx ? 0 : 1; temp2 = j + 2 > ymx ? 0 : 1; temp3 = j + 3 > ymx ? 0 : 1; //根据二值化阈值 temp0 *= src_gray[j *temp0 + i * devpar.ImgMakeborderWidth] > mThreshold ? src_gray[j *temp0 + i * devpar.ImgMakeborderWidth] : 0; temp1 *= src_gray[(j + 1)*temp1 + i * devpar.ImgMakeborderWidth] > mThreshold ? src_gray[(j + 1)*temp1 + i * devpar.ImgMakeborderWidth] : 0; temp2 *= src_gray[(j + 2)*temp2 + i * devpar.ImgMakeborderWidth] > mThreshold ? src_gray[(j + 2)*temp2 + i * devpar.ImgMakeborderWidth] : 0; temp3 *= src_gray[(j + 3)*temp3 + i * devpar.ImgMakeborderWidth] > mThreshold ? src_gray[(j + 3)*temp3 + i * devpar.ImgMakeborderWidth] : 0; myArea += temp0 > 0 ? 1 : 0;//面积累加 myArea += temp1 > 0 ? 1 : 0; myArea += temp2 > 0 ? 1 : 0; myArea += temp3 > 0 ? 1 : 0; sum_gray += temp0 + temp1 + temp2 + temp3; x_sum += xRealIndex* temp0 + xRealIndex * temp1 + xRealIndex * temp2 + xRealIndex * temp3; y_sum += j * temp0 + (j + 1)*temp1 + (j + 2)*temp2 + (j + 3)*temp3; } index[Id] = (myArea > devpar.AreaMin&&myArea < devpar.AreaMax) ? 1 : 0; area[Id] = myArea; xpos[Id] = x_sum / sum_gray; ypos[Id] = y_sum / sum_gray; } } /************************************************* 函数名称: GetRecInfo // 函数描述: 矩形模式的特征提取函数;根据预提取的包围盒数据、灰度图和轮廓图,提取标志点的特征信息 // 输入参数:RecData* mRec 预提取的方位盒数据 . unsigned char *psrcgray 灰度图数据 . unsigned char *psrccounter 轮廓图数据 . Parameter devpar 图像信息结构体 // 输出参数:short *length 周长特征 . short* area 面积特征 . short *xpos, short *ypos 重心坐标 返回值 : 无 // 其他说明: 函数为核函数,在主机端调用,设备端执行; . GPU中一个线程对应处理一个图像块的方位盒数据; . 核函数的线程配置为block(128,1,1) Grid(Gridsize, 1, 1);其中Gridsize= mRecCount / 128,mRecCount为预提取的包围盒数量, . 在预提取包围盒时,对包围盒数量进行了填充,填充为了128的整数倍 *************************************************/ __global__ void GetRecInfo(RecData* mRec, unsigned char *psrcgray, unsigned char *psrccounter, short *length, short* area, double *xpos, double *ypos, Parameter devpar) { const int Id = threadIdx.x + blockIdx.x*blockDim.x;//获取线程号 int mThreshold = devpar.Threshold;//二值化阈值 short myArea = 0;//用于面积计数 int clengthCount = 0;//计算周长的临时变量 short clength = 0;//周长计数 double sum_gray = 0;//圆点区域的灰度值之和 double x_sum = 0;//x灰度值加权和 double y_sum = 0;//y灰度值加权和 int xRealIndex = 0; /*读取方位盒*/ short xmm = mRec[Id].RecXmin; short xmx = mRec[Id].RecXmax; short ymm = mRec[Id].RecYmin; short ymx = mRec[Id].RecYmax; short jcount = (ymx - ymm + 3) / 4 * 4;//列向循环次数规整 unsigned char temp0, temp1, temp2, temp3;//temp保存灰度图像数据临时变量(用寄存器储存图像数据,提高访问速度) unsigned char t0, t1, t2, t3;//t用于保存轮廓图像数据临时变量 /*输出特征初始化*/ area[Id] = 0; xpos[Id] = 0; ypos[Id] = 0; length[Id] = 0; for (int i = xmm; i <= xmx; i++) for (int j = ymm; j <= ymm + jcount; j = j + 4) { xRealIndex = i%devpar.ImgHeight; /*防止越界*/ temp0 = j > ymx ? 0 : 1; temp1 = j + 1> ymx ? 0 : 1; temp2 = j + 2> ymx ? 0 : 1; temp3 = j + 3> ymx ? 0 : 1; t0 = temp0;//qwt t1 = temp1; t2 = temp2; t3 = temp3; /*读取列向相邻4个像素点像素值*/ temp0 *= psrcgray[j *temp0 + i * devpar.ImgMakeborderWidth]>mThreshold ? psrcgray[j *temp0 + i * devpar.ImgMakeborderWidth] : 0; temp1 *= psrcgray[(j + 1)*temp1 + i * devpar.ImgMakeborderWidth]>mThreshold ? psrcgray[(j + 1)*temp1 + i * devpar.ImgMakeborderWidth] : 0; temp2 *= psrcgray[(j + 2)*temp2 + i * devpar.ImgMakeborderWidth]>mThreshold ? psrcgray[(j + 2)*temp2 + i * devpar.ImgMakeborderWidth] : 0; temp3 *= psrcgray[(j + 3)*temp3 + i * devpar.ImgMakeborderWidth]>mThreshold ? psrcgray[(j + 3)*temp3 + i * devpar.ImgMakeborderWidth] : 0; t0 *= psrccounter[j *t0 + i * devpar.ImgMakeborderWidth]; t1 *= psrccounter[(j + 1)*t1 + i * devpar.ImgMakeborderWidth]; t2 *= psrccounter[(j + 2)*t2 + i * devpar.ImgMakeborderWidth]; t3 *= psrccounter[(j + 3)*t3 + i * devpar.ImgMakeborderWidth]; myArea += temp0 > 0 ? 1 : 0; //面积计算 myArea += temp1 > 0 ? 1 : 0; myArea += temp2 > 0 ? 1 : 0; myArea += temp3 > 0 ? 1 : 0; clengthCount += t0 + t1 + t2 + t3;//周长计算 sum_gray += temp0 + temp1 + temp2 + temp3;//灰度累加 x_sum += xRealIndex* temp0 + xRealIndex * temp1 + xRealIndex * temp2 + xRealIndex * temp3; y_sum += j * temp0 + (j + 1)*temp1 + (j + 2)*temp2 + (j + 3)*temp3;//y灰度加权累加 } clength = clengthCount / 255;//计算周长 /*输出特征*/ length[Id] = clength; area[Id] = myArea; xpos[Id] = x_sum / sum_gray; ypos[Id] = y_sum / sum_gray; } //-------------------------------------------------------结束----------------------------------------// //-------------------------------------灰度图像压缩核函数----------------------------------------// /** * 常量存储器中的值分解(输入范围从-4096到4095…(包括这两种)从系数值映射到值的代码中,以确定其位大小。 */ __device__ unsigned int GPUjpeg_huffman_value[8 * 1024]; /** * H * huffman编码表- 每一种编码表都有257个成员 (256 + 1 extra) * 依次包括以下四个huffman编码表: * - luminance (Y) AC * - luminance (Y) DC * - chroma (cb/cr) AC * - chroma (cb/cr) DC */ __device__ uint32_t gpujpeg_huffman_gpu_tab[(256 + 1) * 4]; dim3 gpujpeg_huffman_encoder_grid_size(int tblock_count) { dim3 size(tblock_count); while (size.x > 0xffff) { size.x = (size.x + 1) >> 1; size.y <<= 1; } return size; } /* 内部函数实现 */ static int ALIGN(int x, int y) { //取y的整数倍 // y must be a power of 2. return (x + y - 1) & ~(y - 1); } /*********************************************************************************************************** /***函数名称:write_bitstream /***函数功能:将bitstream写入到图像bit流d_JPEGdata中去 /***输 入:bit_location 每个mcu图像单元编码得到的bit流开始的位置 /***输 入:bit_length 每个mcu图像单元编码得到的bit流位长度 /***输 入:bit_code 每个mcu图像单元每个非零数字编码得到的huffman编码 /***输 出:d_JPEGdata 用于存储图像数据编码得到的最终bitstream /***返 回:无返回 ************************************************************************************************************/ __device__ void write_bitstream(unsigned int even_code, unsigned int odd_code, int length, int bit_location, int even_code_size, BYTE *d_JPEGdata) { //将一个线程的数据编码写入数据编码缓存空间 const int byte_restbits = (8 - (bit_location & MASK)); const int byte_location = bit_location >> SHIFT; int write_bytelocation = byte_location; uint64_t threadwrite_code = ((uint64_t)even_code << (24 + byte_restbits)) + ((uint64_t)odd_code << (24 + byte_restbits - even_code_size)); int right_shift = 56; if (byte_restbits != 8) { write_bytelocation++; length -= byte_restbits; right_shift -= 8; } for (int i = length; i > 0; i = i - 8) { d_JPEGdata[write_bytelocation] = (threadwrite_code >> right_shift) & 0XFF; right_shift -= 8; write_bytelocation++; } if (byte_restbits != 8) { d_JPEGdata[byte_location] = d_JPEGdata[byte_location] | (threadwrite_code >> 56) & 0XFF; } } /** *初始化huffman编码的数据,形成常量数据编码表 */ __global__ static void GPUjpeg_huffman_encoder_value_init_kernel() { // fetch some value const int tid = threadIdx.x + blockIdx.x * blockDim.x; const int value = tid - 4096; // decompose it unsigned int value_code = value; int absolute = value; if (value < 0) { // valu eis now absolute value of input absolute = -absolute; // For a negative input, want temp2 = bitwise complement of abs(input) // This code assumes we are on a two's complement machine value_code--; } // 计算编码数据的bit位数 unsigned int value_nbits = 0; while (absolute) { value_nbits++; absolute >>= 1; } //将数据结果存于表中 (编码数据的值存在高位,左对齐;编码数据的bit位数存在低位右对齐) GPUjpeg_huffman_value[tid] = value_nbits | (value_code << (32 - value_nbits)); } __device__ static unsigned int gpuhuffman_encode_value(const int preceding_zero_count, const int coefficient, const int huffman_lut_offset) { // 读取编码数据的huffman编码 const unsigned int packed_value = GPUjpeg_huffman_value[4096 + coefficient]; // 将packed_value分解成编码和编码bit位长度 const int value_nbits = packed_value & 0xf; const unsigned int value_code = packed_value & ~0xf; // find prefix of the codeword and size of the prefix const int huffman_lut_idx = huffman_lut_offset + preceding_zero_count * 16 + value_nbits; const unsigned int packed_prefix = gpujpeg_huffman_gpu_tab[huffman_lut_idx]; const unsigned int prefix_nbits = packed_prefix & 31; // 返回编码数据的编码和它的编码长度 return (packed_prefix + value_nbits) | (value_code >> prefix_nbits); } __global__ static void gpujpeg_huffman_gpu_encoder_encode_block(BSI16 *d_ydst, int MCU_total, BYTE *d_JPEGdata, int *prefix_num, int offset, const int huffman_lut_offset) { //计算对应的图像block id号 const int block_idx = (blockIdx.y * gridDim.x << 2) + (blockIdx.x << 2) + threadIdx.y; if (block_idx >= MCU_total) return; __shared__ int Length_count[(THREAD_WARP + 1) * 4]; d_ydst += block_idx << 6; const int load_idx = threadIdx.x * 2; int in_even = d_ydst[load_idx]; const int in_odd = d_ydst[load_idx + 1]; //对直流分量进行差分编码 if (threadIdx.x == 0 && block_idx != 0) in_even = in_even - d_ydst[load_idx - 64]; if (threadIdx.x == 0 && block_idx == 0) in_even = in_even - 64; //计算当前编码数据前面0的个数 const unsigned int nonzero_mask = (1 << threadIdx.x) - 1; const unsigned int nonzero_bitmap_0 = 1 | __ballot(in_even); // DC数据都看作是非零数据 const unsigned int nonzero_bitmap_1 = __ballot(in_odd); const unsigned int nonzero_bitmap_pairs = nonzero_bitmap_0 | nonzero_bitmap_1; const int zero_pair_count = __clz(nonzero_bitmap_pairs & nonzero_mask); //计算当前线程偶编码数据编码前面0的个数 int zeros_before_even = 2 * (zero_pair_count + threadIdx.x - 32); if ((0x80000000 >> zero_pair_count) > (nonzero_bitmap_1 & nonzero_mask)) { zeros_before_even += 1; } // true if any nonzero pixel follows thread's odd pixel const bool nonzero_follows = nonzero_bitmap_pairs & ~nonzero_mask; // 计算奇数位编码数据前面的编码 ,如果交流分量in_even是0,则in_odd前面的0的个数+1 // (the count is actually multiplied by 16) int zeros_before_odd = (in_even || !threadIdx.x) ? 0 : zeros_before_even + 1; // clear zero counts if no nonzero pixel follows (so that no 16-zero symbols will be emited) // otherwise only trim extra bits from the counts of following zeros const int zero_count_mask = nonzero_follows ? 0xF : 0; zeros_before_even &= zero_count_mask; zeros_before_odd &= zero_count_mask; int even_lut_offset = huffman_lut_offset; if (0 == threadIdx.x) { // first thread uses DC part of the table for its even value even_lut_offset += 256 + 1; } // 一个block的结束标志 if (0 == ((threadIdx.x ^ 31) | in_odd)) { // 如果需要添加结束标志,则将zeros_before_odd的值改为16 zeros_before_odd = 16; } // each thread gets codeword for its two pixels unsigned int even_code = gpuhuffman_encode_value(zeros_before_even, in_even, even_lut_offset); unsigned int odd_code = gpuhuffman_encode_value(zeros_before_odd, in_odd, huffman_lut_offset); int *bl_ptr = Length_count + (THREAD_WARP + 1) * threadIdx.y; const unsigned int even_code_size = even_code & 31; const unsigned int odd_code_size = odd_code & 31; int bit_length = even_code_size + odd_code_size; even_code = even_code & ~31; odd_code = odd_code & ~31; int code_nbits = bit_length; //计算每个BLOCK中非零编码的数据个数 unsigned int prefix_bitmap = __ballot(bit_length); int prefix_count = __popc(prefix_bitmap & nonzero_mask); if (bit_length) { bl_ptr[prefix_count] = bit_length; __syncthreads(); //进行前缀求和运算 for (int j = 0; j < prefix_count; j++) { code_nbits = code_nbits + bl_ptr[j]; } } if (threadIdx.x == 31) { prefix_num[block_idx * 3 + offset] = code_nbits; } //计算写入缓存区的具体字节位置,确定写入d_JPEGdata的位置 BYTE *Write_JPEGdata = d_JPEGdata + (block_idx << 6); const int bit_location = code_nbits - bit_length; const int byte_restbits = (8 - (bit_location & MASK)); const int byte_location = bit_location >> SHIFT; int write_bytelocation = byte_location; //将一个线程的数据编码写入数据编码缓存空间 int length = bit_length; uint64_t threadwrite_code = ((uint64_t)even_code << (24 + byte_restbits)) + ((uint64_t)odd_code << (24 + byte_restbits - even_code_size)); int right_shift = 56; if (byte_restbits != 8) { write_bytelocation++; length -= byte_restbits; right_shift -= 8; } for (int i = length; i > 0; i = i - 8) { Write_JPEGdata[write_bytelocation] = (threadwrite_code >> right_shift) & 0XFF; right_shift -= 8; write_bytelocation++; } if (byte_restbits != 8) { if (bit_length < byte_restbits && bit_length) Write_JPEGdata[byte_location] = Write_JPEGdata[byte_location] | (threadwrite_code >> 56) & 0XFF; __syncthreads(); if (bit_length >= byte_restbits) Write_JPEGdata[byte_location] = Write_JPEGdata[byte_location] | (threadwrite_code >> 56) & 0XFF; } } /*********************************************************************************************************** /***函数名称:CUDA_RGB2YUV_kernel /***函数功能:将位图的BMP数据GRB模式转换为YUV数据模式 /***输 入:d_bsrc 原始的位图数据 /***输 入:nPitch 字节对齐的RGB数据大小 /***输 入:Size 字节对齐的YCrCb数据大小 /***输 出:Y\Cr\Cb 转换后的3个颜色分量 /***返 回:无返回 ************************************************************************************************************/ __global__ void CUDA_RGB2YUV_kernel(BYTE *d_bsrc, BYTE *Y, BYTE *Cr, BYTE *Cb, size_t nPitch, size_t StrideF) { int tid = (blockIdx.x << 3) + threadIdx.x; d_bsrc += ((blockIdx.y << 3) + threadIdx.y) * nPitch + (tid << 1) + tid; int OffsThreadInRow = ((blockIdx.y << 3) + threadIdx.y) * StrideF + tid; float r = d_bsrc[2]; float g = d_bsrc[1]; float b = d_bsrc[0]; Y[OffsThreadInRow] = (g * C_Yg + b * C_Yb + r * C_Yr); Cr[OffsThreadInRow] = (g * C_Ug + b * C_Ub + 128.f + r* C_Ur); Cb[OffsThreadInRow] = (g * C_Vg + b * C_Vb + 128.f + r* C_Vr); } /*********************************************************************************************************** /***函数名称:work_efficient_PrefixSum_kernel(int *X, int *BlockSum, int InputSize) /***函数功能:前缀求和计算辅助函数,主要是数据块被分成n个小块以后,求每个小块的前缀和 /***输 入:X 需要进行前缀求和的数据 /***输 出:BlockSum 前缀求和每个小块的总和 /***输 出:X 前缀求和的数据的最终结果 /***返 回:无返回 ************************************************************************************************************/ __global__ void work_efficient_PrefixSum_kernel(int *X, int *BlockSum) { // XY[2*BLOCK_SIZE] is in shared memory __shared__ int XY[512]; __shared__ int XY1[512]; int index; int tid = threadIdx.x << 1; int i = (blockIdx.x << 10) + tid + 1; XY[tid] = X[i]; XY[tid + 1] = X[i] + X[i + 1]; XY1[tid] = X[512 + i]; XY1[tid + 1] = X[512 + i] + X[i + 513]; __syncthreads(); index = ((threadIdx.x + 1) << 2) - 1; if (index < 512) { XY[index] += XY[index - 2]; XY1[index] += XY1[index - 2]; } __syncthreads(); index = ((threadIdx.x + 1) << 3) - 1; if (index < 512) { XY[index] += XY[index - 4]; XY1[index] += XY1[index - 4]; } __syncthreads(); index = ((threadIdx.x + 1) << 4) - 1; if (index < 512) { XY[index] += XY[index - 8]; XY1[index] += XY1[index - 8]; } __syncthreads(); index = ((threadIdx.x + 1) << 5) - 1; if (index < 512) { XY[index] += XY[index - 16]; XY1[index] += XY1[index - 16]; } __syncthreads(); index = ((threadIdx.x + 1) << 6) - 1; if (index < 512) { XY[index] += XY[index - 32]; XY1[index] += XY1[index - 32]; } __syncthreads(); index = ((threadIdx.x + 1) << 7) - 1; if (index < 512) { XY[index] += XY[index - 64]; XY1[index] += XY1[index - 64]; } __syncthreads(); index = ((threadIdx.x + 1) << 8) - 1; if (index < 512) { XY[index] += XY[index - 128]; XY1[index] += XY1[index - 128]; } __syncthreads(); if (index < 512) { XY[511] += XY[255]; XY1[511] += XY1[255]; } __syncthreads(); index = ((threadIdx.x + 1) << 8) - 1; if (index < 384) { XY[index + 128] += XY[index]; XY1[index + 128] += XY1[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 7) - 1; if (index < 448) { XY[index + 64] += XY[index]; XY1[index + 64] += XY1[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 6) - 1; if (index < 480) { XY[index + 32] += XY[index]; XY1[index + 32] += XY1[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 5) - 1; if (index < 496) { XY[index + 16] += XY[index]; XY1[index + 16] += XY1[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 4) - 1; if (index < 504) { XY[index + 8] += XY[index]; XY1[index + 8] += XY1[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 3) - 1; if (index < 508) { XY[index + 4] += XY[index]; XY1[index + 4] += XY1[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 2) - 1; if (index < 510) { XY[index + 2] += XY[index]; XY1[index + 2] += XY1[index]; } __syncthreads(); if (threadIdx.x == 0) { X[1 + i] = XY[tid + 1]; X[513 + i] = XY1[tid + 1]; } else { X[i] = XY[tid] + XY[tid - 1]; X[1 + i] = XY[tid + 1]; X[512 + i] = XY1[tid] + XY1[tid - 1]; X[513 + i] = XY1[tid + 1]; BlockSum[(blockIdx.x << 1) + 1] = XY[511]; BlockSum[(blockIdx.x << 1) + 2] = XY1[511]; } } /*********************************************************************************************************** /***函数名称:work_efficient_BlockUp_kernel(int *dc_component) /***函数功能:前缀求和计算辅助函数,主要是数据块被分成n个小块以后,求每个小块的前缀和 /***输 入:BlockSum 需要进行前缀求和的数据 /***输 出:BlockSum 前缀求和的数据的最终结果 /***返 回:无返回 ************************************************************************************************************/ __global__ void work_efficient_BlockUp_kernel(int *BlockSum) { __shared__ int XY[512]; int index; int tid = threadIdx.x << 1; int i = (blockIdx.x << 9) + tid + 1; XY[tid] = BlockSum[i]; XY[tid + 1] = BlockSum[i] + BlockSum[i + 1]; __syncthreads(); index = ((threadIdx.x + 1) << 2) - 1; if (index < 512) { XY[index] += XY[index - 2]; } __syncthreads(); index = ((threadIdx.x + 1) << 3) - 1; if (index < 512) { XY[index] += XY[index - 4]; } __syncthreads(); index = ((threadIdx.x + 1) << 4) - 1; if (index < 512) { XY[index] += XY[index - 8]; } __syncthreads(); index = ((threadIdx.x + 1) << 5) - 1; if (index < 512) { XY[index] += XY[index - 16]; } __syncthreads(); index = ((threadIdx.x + 1) << 6) - 1; if (index < 512) { XY[index] += XY[index - 32]; } __syncthreads(); index = ((threadIdx.x + 1) << 7) - 1; if (index < 512) { XY[index] += XY[index - 64]; } __syncthreads(); index = ((threadIdx.x + 1) << 8) - 1; if (index < 512) { XY[index] += XY[index - 128]; } __syncthreads(); if (index < 512) { XY[511] += XY[255]; } __syncthreads(); index = ((threadIdx.x + 1) << 8) - 1; if (index < 384) { XY[index + 128] += XY[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 7) - 1; if (index < 448) { XY[index + 64] += XY[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 6) - 1; if (index < 480) { XY[index + 32] += XY[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 5) - 1; if (index < 496) { XY[index + 16] += XY[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 4) - 1; if (index < 504) { XY[index + 8] += XY[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 3) - 1; if (index < 508) { XY[index + 4] += XY[index]; } __syncthreads(); index = ((threadIdx.x + 1) << 2) - 1; if (index < 510) { XY[index + 2] += XY[index]; } __syncthreads(); if (threadIdx.x == 0) { BlockSum[1 + i] = XY[tid + 1]; } else { BlockSum[i] = XY[tid] + XY[tid - 1]; BlockSum[1 + i] = XY[tid + 1]; } } __global__ void work_efficient_Adds_kernel(int *BlockSum, int *prefix_num) { int tid = (blockIdx.x << 9) + (threadIdx.x << 1) + 1; //blockIdx.x*blockDim.x + threadIdx.x prefix_num[tid] = BlockSum[blockIdx.x] + prefix_num[tid]; prefix_num[tid + 1] = BlockSum[blockIdx.x] + prefix_num[tid + 1]; } /*********************************************************************************************************** /***函数名称:CUDA_DCT8_kernel /***函数功能:对灰度原始图像数据进行DCT变换 /***输 入:X 需要进行前缀求和的数据 /***输 入:MCU_total 需要进行前缀求和的数据个数 /***输 出:X 前缀求和的数据的最终结果 /***返 回:无返回 ************************************************************************************************************/ __global__ void CUDA_DCT8_kernel(BSI16 *d_ydst, BYTE *d_bsrc, RIM Size, int *DEV_ZIGZAG, float *DEV_STD_QUANT_TAB_LUMIN) { __shared__ float block[512]; int OffsThreadInRow = (blockIdx.x << 6) + (threadIdx.z << 5) + (threadIdx.y << 3) + threadIdx.x; if (OffsThreadInRow >= Size.width) return; OffsThreadInRow = OffsThreadInRow - (blockIdx.x << 6); //32*16中列偏移 d_bsrc += ((blockIdx.y << 3) + threadIdx.x) * Size.StrideF + (blockIdx.x << 6) + (threadIdx.z << 5) + (threadIdx.y << 3); float *bl_ptr = block + (threadIdx.z << 5) + (threadIdx.y << 3) + (threadIdx.x << 6); float Vect0 = d_bsrc[0]; float Vect1 = d_bsrc[1]; float Vect2 = d_bsrc[2]; float Vect3 = d_bsrc[3]; float Vect4 = d_bsrc[4]; float Vect5 = d_bsrc[5]; float Vect6 = d_bsrc[6]; float Vect7 = d_bsrc[7]; float X07P = Vect0 + Vect7; float X16P = Vect1 + Vect6; float X25P = Vect2 + Vect5; float X34P = Vect3 + Vect4; float X07M = Vect0 - Vect7; float X61M = Vect6 - Vect1; float X25M = Vect2 - Vect5; float X43M = Vect4 - Vect3; float X07P34PP = X07P + X34P; float X07P34PM = X07P - X34P; float X16P25PP = X16P + X25P; float X16P25PM = X16P - X25P; bl_ptr[0] = X07P34PP + X16P25PP; bl_ptr[2] = C_b * X07P34PM + C_e * X16P25PM; bl_ptr[4] = X07P34PP - X16P25PP; bl_ptr[6] = C_e * X07P34PM - C_b * X16P25PM; bl_ptr[1] = C_a * X07M - C_c * X61M + C_d * X25M - C_f * X43M; bl_ptr[3] = C_c * X07M + C_f * X61M - C_a * X25M + C_d * X43M; bl_ptr[5] = C_d * X07M + C_a * X61M + C_f * X25M - C_c * X43M; bl_ptr[7] = C_f * X07M + C_d * X61M + C_c * X25M + C_a * X43M; bl_ptr = block + OffsThreadInRow; Vect0 = bl_ptr[0]; Vect1 = bl_ptr[64]; Vect2 = bl_ptr[128]; Vect3 = bl_ptr[192]; Vect4 = bl_ptr[256]; Vect5 = bl_ptr[320]; Vect6 = bl_ptr[384]; Vect7 = bl_ptr[448]; X07P = Vect0 + Vect7; X16P = Vect1 + Vect6; X25P = Vect2 + Vect5; X34P = Vect3 + Vect4; X07M = Vect0 - Vect7; X61M = Vect6 - Vect1; X25M = Vect2 - Vect5; X43M = Vect4 - Vect3; X07P34PP = X07P + X34P; X07P34PM = X07P - X34P; X16P25PP = X16P + X25P; X16P25PM = X16P - X25P; d_ydst = d_ydst + blockIdx.y * (Size.width << 3) + (blockIdx.x << 9) + (threadIdx.z << 8) + (threadIdx.y << 6); DEV_STD_QUANT_TAB_LUMIN += threadIdx.x; DEV_ZIGZAG += threadIdx.x; d_ydst[DEV_ZIGZAG[0]] = (X07P34PP + X16P25PP)* DEV_STD_QUANT_TAB_LUMIN[0]; d_ydst[DEV_ZIGZAG[8]] = (C_a * X07M - C_c * X61M + C_d * X25M - C_f * X43M) * DEV_STD_QUANT_TAB_LUMIN[8]; d_ydst[DEV_ZIGZAG[16]] = (C_b * X07P34PM + C_e * X16P25PM) * DEV_STD_QUANT_TAB_LUMIN[16]; d_ydst[DEV_ZIGZAG[24]] = (C_c * X07M + C_f * X61M - C_a * X25M + C_d * X43M) * DEV_STD_QUANT_TAB_LUMIN[24]; d_ydst[DEV_ZIGZAG[32]] = (X07P34PP - X16P25PP) * DEV_STD_QUANT_TAB_LUMIN[32]; d_ydst[DEV_ZIGZAG[40]] = (C_d * X07M + C_a * X61M + C_f * X25M - C_c * X43M) * DEV_STD_QUANT_TAB_LUMIN[40]; d_ydst[DEV_ZIGZAG[48]] = (C_e * X07P34PM - C_b * X16P25PM) * DEV_STD_QUANT_TAB_LUMIN[48]; d_ydst[DEV_ZIGZAG[56]] = (C_f * X07M + C_d * X61M + C_c * X25M + C_a * X43M) * DEV_STD_QUANT_TAB_LUMIN[56]; } /*********************************************************************************************************** /***函数名称:Data_codelength_kernel /***函数功能:对扫描后数据进行编码,计算64个mcu bit流长度并进行scan扫描 /***int *dc_component 输入每个mcu的直流分量,并在kernel最后存储64个mcu bit流总长度 /***int *d_ydst 输入经过zigzag扫描后的数据 /***int *prefix_num用于在运算中存储每个mcu的bit流长度的前缀和 ************************************************************************************************************/ __global__ void Data_codelength_kernel(BSI16 *d_ydst, int MCU_total, BYTE *d_JPEGdata, int *prefix_num, int offset, const int huffman_lut_offset) { //计算对应的图像block id号 const int block_idx = (blockIdx.y * gridDim.x << 2) + (blockIdx.x << 2) + threadIdx.y; if (block_idx >= MCU_total) return; __shared__ int Length_count[(THREAD_WARP + 1) * 4]; d_ydst += block_idx << 6; const int load_idx = threadIdx.x * 2; int in_even = d_ydst[load_idx]; const int in_odd = d_ydst[load_idx + 1]; //对直流分量进行差分编码 if (threadIdx.x == 0 && block_idx != 0) in_even = in_even - d_ydst[load_idx - 64]; if (threadIdx.x == 0 && block_idx == 0) in_even = in_even - 85; //计算当前编码数据前面0的个数 const unsigned int nonzero_mask = (1 << threadIdx.x) - 1; const unsigned int nonzero_bitmap_0 = 1 | __ballot(in_even); // DC数据都看作是非零数据 const unsigned int nonzero_bitmap_1 = __ballot(in_odd); const unsigned int nonzero_bitmap_pairs = nonzero_bitmap_0 | nonzero_bitmap_1; const int zero_pair_count = __clz(nonzero_bitmap_pairs & nonzero_mask); //计算当前线程偶编码数据编码前面0的个数 int zeros_before_even = 2 * (zero_pair_count + threadIdx.x - 32); if ((0x80000000 >> zero_pair_count) > (nonzero_bitmap_1 & nonzero_mask)) { zeros_before_even += 1; } // true if any nonzero pixel follows thread's odd pixel const bool nonzero_follows = nonzero_bitmap_pairs & ~nonzero_mask; // 计算奇数位编码数据前面的编码 ,如果交流分量in_even是0,则in_odd前面的0的个数+1 // (the count is actually multiplied by 16) int zeros_before_odd = (in_even || !threadIdx.x) ? 0 : zeros_before_even + 1; // clear zero counts if no nonzero pixel follows (so that no 16-zero symbols will be emited) // otherwise only trim extra bits from the counts of following zeros const int zero_count_mask = nonzero_follows ? 0xF : 0; zeros_before_even &= zero_count_mask; zeros_before_odd &= zero_count_mask; int even_lut_offset = huffman_lut_offset; if (0 == threadIdx.x) { // first thread uses DC part of the table for its even value even_lut_offset += 256 + 1; } // 一个block的结束标志 if (0 == ((threadIdx.x ^ 31) | in_odd)) { // 如果需要添加结束标志,则将zeros_before_odd的值改为16 zeros_before_odd = 16; } // each thread gets codeword for its two pixels unsigned int even_code = gpuhuffman_encode_value(zeros_before_even, in_even, even_lut_offset); unsigned int odd_code = gpuhuffman_encode_value(zeros_before_odd, in_odd, huffman_lut_offset); int *bl_ptr = Length_count + (THREAD_WARP + 1) * threadIdx.y; const unsigned int even_code_size = even_code & 31; const unsigned int odd_code_size = odd_code & 31; int bit_length = even_code_size + odd_code_size; even_code = even_code & ~31; odd_code = odd_code & ~31; int code_nbits = bit_length; //计算每个BLOCK中非零编码的数据个数 unsigned int prefix_bitmap = __ballot(bit_length); int prefix_count = __popc(prefix_bitmap & nonzero_mask); if (bit_length) { bl_ptr[prefix_count] = bit_length; __syncthreads(); //进行前缀求和运算 for (int j = 0; j < prefix_count; j++) { code_nbits = code_nbits + bl_ptr[j]; } } if (threadIdx.x == 31) { prefix_num[block_idx + 1] = code_nbits + 8; } //计算写入缓存区的具体字节位置,确定写入d_JPEGdata的位置 BYTE *Write_JPEGdata = d_JPEGdata + (block_idx << 6); const int bit_location = code_nbits - bit_length; const int byte_restbits = (8 - (bit_location & MASK)); const int byte_location = bit_location >> SHIFT; int write_bytelocation = byte_location; //将一个线程的数据编码写入数据编码缓存空间 int length = bit_length; uint64_t threadwrite_code = ((uint64_t)even_code << (24 + byte_restbits)) + ((uint64_t)odd_code << (24 + byte_restbits - even_code_size)); int right_shift = 56; if (byte_restbits != 8) { write_bytelocation++; length -= byte_restbits; right_shift -= 8; } for (int i = length; i > 0; i = i - 8) { Write_JPEGdata[write_bytelocation] = (threadwrite_code >> right_shift) & 0XFF; right_shift -= 8; write_bytelocation++; } if (byte_restbits != 8) { if (bit_length < byte_restbits && bit_length) Write_JPEGdata[byte_location] = Write_JPEGdata[byte_location] | (threadwrite_code >> 56) & 0XFF; __syncthreads(); if (bit_length >= byte_restbits) Write_JPEGdata[byte_location] = Write_JPEGdata[byte_location] | (threadwrite_code >> 56) & 0XFF; } } __global__ void CUDA_YCrCb_codelength_kernel(BSI16 *d_ydst, BYTE *d_JPEGdata, int *prefix_num, int MCU_total, int offset, int cycle) { int tid = (blockIdx.x << 7) + threadIdx.x; //blockIdx.x*blockDim.x + threadIdx.x int bit_location = 0; if (tid >= MCU_total) return; int in_even, zeros_before = 0; d_ydst += tid << 6; //对直流分量和交流分量进行预处理 if (tid == 0) in_even = d_ydst[0] - 85; else in_even = d_ydst[0] - d_ydst[-64]; int in_odd = d_ydst[1]; d_JPEGdata = d_JPEGdata + (tid << 6); unsigned int even_code = gpuhuffman_encode_value(0, in_even, (256 + 1) * 3); unsigned int odd_code = gpuhuffman_encode_value(0, in_odd, (256 + 1) * 2); unsigned int even_code_size = even_code & 31; unsigned int odd_code_size = odd_code & 31; int bit_length = even_code_size + odd_code_size; even_code = even_code & ~31; odd_code = odd_code & ~31; write_bitstream(even_code, odd_code, bit_length, bit_location, even_code_size, d_JPEGdata); bit_location += bit_length; for (int j = 2; j < cycle; j = j + 2) { in_even = d_ydst[j]; in_odd = d_ydst[j + 1]; if (!in_even) zeros_before++; odd_code = 0; even_code = 0; if (in_even) even_code = gpuhuffman_encode_value(zeros_before, in_even, (256 + 1) * 2); zeros_before = in_even ? 0 : zeros_before + 1; if (in_odd) odd_code = gpuhuffman_encode_value(zeros_before, in_odd, (256 + 1) * 2); if (in_even || in_odd) { even_code_size = even_code & 31; odd_code_size = odd_code & 31; bit_length = even_code_size + odd_code_size; even_code = even_code & ~31; odd_code = odd_code & ~31; write_bitstream(even_code, odd_code, bit_length, bit_location, even_code_size, d_JPEGdata); bit_location += bit_length; } } write_bitstream(0, 0, 2, bit_location, 0, d_JPEGdata); prefix_num[tid * 3 + offset] = bit_location + 2; } __global__ void adds_prefixsum(int *dc_component, int *prefix_num, int MCU_total) { int tid = (blockIdx.x << 7) + threadIdx.x; //blockIdx.x*blockDim.x + threadIdx.x if (tid >= MCU_total) return; prefix_num[tid + 1] = dc_component[blockIdx.x] + prefix_num[tid + 1]; } __global__ void adds_prefixsum1(int *dc_component, int *prefix_num, int MCU_total) { int tid = (blockIdx.x << 7) + threadIdx.x; //blockIdx.x*blockDim.x + threadIdx.x if (tid >= (MCU_total - 1) >> 7) return; prefix_num[tid + 1] = dc_component[blockIdx.x] + prefix_num[tid + 1]; } __global__ void data_shift_kernel(BYTE *d_JPEGdata, int *prefix_num, int MCU_total, int *d_datalen, int *dc_component, int* last_prefix_num) { int tid = (blockIdx.x << 7) + threadIdx.x; //blockIdx.x*blockDim.x + threadIdx.x int byte_location = 0; if (tid >= MCU_total) return; //如果tid>MCU总数,则不执行 d_JPEGdata = d_JPEGdata + (tid << 6); //计算之前编码好的数据流首地址 BYTE *JPEG_Writedatalocation = d_JPEGdata + 63; //位移后的BYTE要写入的位置 BYTE byte_tmp; int length = prefix_num[tid + 1] - prefix_num[tid] - 8; //得到每个MCU编码数据bit流的所占的字节数 int right_shift = prefix_num[tid] & MASK; //得到前个MCU编码数据bit流在本MCU编码数据bit流中首字节所占的bit数 int left_shift = 8 - right_shift; //得到本MCU编码数据bit流首字节所占的bit数 byte_location = (length - 1) >> SHIFT; //得到本MCU编码数据bit流尾字节所在位置 int bit_rest = 8 - length + ((byte_location << SHIFT)); length = length + right_shift + 8; //得到本MCU编码数据bit流数据字节长度 length >>= SHIFT; if (right_shift >= bit_rest) { JPEG_Writedatalocation[0] = (d_JPEGdata[byte_location] << left_shift); JPEG_Writedatalocation--; } for (; byte_location > 0; byte_location--) { byte_tmp = (d_JPEGdata[byte_location] >> right_shift) | (d_JPEGdata[byte_location - 1] << left_shift); if (byte_tmp == 0xff) { length++; JPEG_Writedatalocation[0] = 0; JPEG_Writedatalocation[-1] = byte_tmp; JPEG_Writedatalocation -= 2; } else { JPEG_Writedatalocation[0] = byte_tmp; JPEG_Writedatalocation--; } } byte_tmp = d_JPEGdata[0] >> right_shift; if (byte_tmp == 0xff) { length++; JPEG_Writedatalocation[0] = 0; JPEG_Writedatalocation--; JPEG_Writedatalocation[0] = byte_tmp; } else { JPEG_Writedatalocation[0] = byte_tmp; } last_prefix_num[tid + 1] = length; } __global__ void Data_encodelater1_kernel(int *prefix_num, BYTE *d_JPEGdata, BYTE *last_AC, int MCU_total, int *d_datalen) { int tid = (blockIdx.x << 7) + threadIdx.x; //blockIdx.x*blockDim.x + threadIdx.x if (tid >= MCU_total) return; int length; if (tid == MCU_total - 1) d_datalen[0] = prefix_num[tid + 1]; length = prefix_num[tid + 1] - prefix_num[tid]; last_AC = last_AC + prefix_num[tid]; d_JPEGdata = d_JPEGdata + (tid << 6) + 64 - length; for (int i = 0; i < length; i++) { last_AC[i] = d_JPEGdata[i]; } } //-------------------------------------------------------结束----------------------------------------// /************************************************* 函数名称: RmwRead8BitBmpFile2Img // 函数描述: 函数将存储位置的.bmp格式图像读入内存中; // 输入参数:const char * filename :输入图像文件路径; . unsigned char* pImg :存放24位位图的指针; . unsigned char* Binarization :存放灰度图的指针; . int* width :读出图像列数; . int* width :读出图像行数;// 输出参数:unsigned char* pImg :若输入图像为灰度图,则指针指向NULL。 . unsigned char* Binarization :若输入图像为24位彩图,则指针指向NULL。;// 返回值 : bool -- 读入成功标志位// 其他说明: 函数仅用于调试阶段,实际工程中相机采样照片已经存放在内存区域中; . 该函数在调用前,需要先为图像指针分配图像大小的内存区域; . 内存区域大小(Byte) = width * height * ImgDeep; // *************************************************/ bool RmwRead8BitBmpFile2Img(const char * filename, unsigned char*pImg, unsigned char*Binarization, int *width, int *height) { FILE *binFile; BITMAPFILEHEADER fileHeader;//文件头 BITMAPINFOHEADER bmpHeader;//信息头 BOOL isRead = TRUE; int ImgDeep; int linenum, ex; // nenum:一行像素的字节总数,包括填充字节 //open file if ((binFile = fopen(filename, "rb")) == NULL) return NULL; //read struts if (fread((void *)&fileHeader, 1, sizeof(fileHeader), binFile) != sizeof(fileHeader)) isRead = FALSE; if (fread((void *)&bmpHeader, 1, sizeof(bmpHeader), binFile) != sizeof(bmpHeader)) isRead = FALSE; if (isRead == FALSE || fileHeader.bfOffBits<sizeof(fileHeader) + sizeof(bmpHeader)) { fclose(binFile); return NULL; } //read image info *width = bmpHeader.biWidth; *height = bmpHeader.biHeight; ImgDeep = bmpHeader.biBitCount / 8;//每个像素所占字节数目 linenum = (*width * ImgDeep + 3) / 4 * 4;//这里要改 ex = linenum - *width * ImgDeep; //每一行的填充字节 fseek(binFile, fileHeader.bfOffBits, SEEK_SET); //读取灰度图 if (ImgDeep == 1) { if (Binarization != NULL) for (int i = 0; i<*height; i++) { int r = fread(Binarization + (*height - i - 1)*(*width)*ImgDeep, sizeof(unsigned char), (*width)*ImgDeep, binFile); if (r != (*width)*ImgDeep) { delete Binarization; fclose(binFile); return NULL; } fseek(binFile, ex, SEEK_CUR); } fclose(binFile); return true; } //读取位图 else if (ImgDeep == 3) { //pImg = new uchar[(*width)*(*height)*ImgDeep]; if (pImg != NULL) { for (int i = 0; i < *height; i++) { int r = fread(pImg + (*height - i - 1)*(*width)*ImgDeep, sizeof(unsigned char), (*width)*ImgDeep, binFile);//** if (r != (*width)*ImgDeep)//** { fclose(binFile); return NULL; } fseek(binFile, ex, SEEK_CUR); } fclose(binFile); //bmp转灰度 if (Binarization != NULL) { for (int i = 0; i < *height; i++) for (int j = 0; j < *width; j++) { Binarization[j + i * (*width)] = pImg[j * ImgDeep + i * (*width) * ImgDeep] * 0.299 + pImg[j * ImgDeep + 1 + i * (*width) * ImgDeep] * 0.587 + pImg[j * ImgDeep + 2 + i * (*width) * ImgDeep] * 0.114; } } return true; } else// { unsigned char *tempImg = new uchar[(*width)*(*height)*ImgDeep]; if (tempImg != NULL) { for (int i = 0; i < *height; i++) { int r = fread(tempImg + (*height - i - 1)*(*width)*ImgDeep, sizeof(unsigned char), (*width)*ImgDeep, binFile);//** if (r != (*width)*ImgDeep)//** { delete[]tempImg; fclose(binFile); return NULL; } fseek(binFile, ex, SEEK_CUR); } fclose(binFile); //bmp转灰度 if (Binarization != NULL) { for (int i = 0; i < *height; i++) for (int j = 0; j < *width; j++) { Binarization[j + i * (*width)] = tempImg[j * ImgDeep + i * (*width) * ImgDeep] * 0.299 + tempImg[j * ImgDeep + 1 + i * (*width) * ImgDeep] * 0.587 + tempImg[j * ImgDeep + 2 + i * (*width) * ImgDeep] * 0.114; } } delete[]tempImg; return true; } } } else return false; } /************************************************* 函数名称: RmwWrite8bitImg2BmpFile // 函数描述: 函数将内存位置的.bmp格式图像写入到硬盘中; // 输入参数:unsigned char* pImg :存放灰度图的指针; . int* width :图像列数; . int* width :图像行数; . const char * filename :输出图像文件路径;// 输出参数:const char * filename :.bmp格式灰度图。;// 返回值 : Suc(bool型) -- 写出成功标志位 // 其他说明: 函数仅用于调试阶段,实际工程中相机采样照片已经存放在内存区域中; . 该函数在调用前,需要先为图像指针分配图像大小的内存区域; . 内存区域大小(Byte) = width * height * ImgDeep; // *************************************************/ bool RmwWrite8bitImg2BmpFile(unsigned char *pImg, int width, int height, const char * filename) { FILE * BinFile; BITMAPFILEHEADER FileHeader; BITMAPINFOHEADER BmpHeader; int i, extend; bool Suc = true; unsigned char p[4], *pCur; unsigned char* ex; extend = (width + 3) / 4 * 4 - width; // Open File if ((BinFile = fopen(filename, "w+b")) == NULL) { return false; } //参数填法见结构链接 FileHeader.bfType = ((WORD)('M' << 8) | 'B'); FileHeader.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER) + 256 * 4L;//2个头结构后加调色板 FileHeader.bfSize = FileHeader.bfOffBits + (width + extend)*height; FileHeader.bfReserved1 = 0; FileHeader.bfReserved2 = 0; if (fwrite((void *)&FileHeader, 1, sizeof(FileHeader), BinFile) != sizeof(FileHeader)) Suc = false; // Fill the ImgHeader BmpHeader.biSize = 40; BmpHeader.biWidth = width; BmpHeader.biHeight = height; BmpHeader.biPlanes = 1; BmpHeader.biBitCount = 8; BmpHeader.biCompression = 0; BmpHeader.biSizeImage = 0; BmpHeader.biXPelsPerMeter = 0; BmpHeader.biYPelsPerMeter = 0; BmpHeader.biClrUsed = 0; BmpHeader.biClrImportant = 0; if (fwrite((void *)&BmpHeader, 1, sizeof(BmpHeader), BinFile) != sizeof(BmpHeader)) Suc = false; // 写入调色板 for (i = 0, p[3] = 0; i<256; i++) { p[0] = p[1] = p[2] = i; // blue,green,red; //改255 - i则灰度反转 if (fwrite((void *)p, 1, 4, BinFile) != 4) { Suc = false; break; } } if (extend) { ex = new unsigned char[extend]; //填充数组大小为 0~3 memset(ex, 0, extend); } //write data for (pCur = pImg + (height - 1)*width; pCur >= pImg; pCur -= width) { if (fwrite((void *)pCur, 1, width, BinFile) != (unsigned int)width) Suc = false; // 真实的数据 if (extend) // 扩充的数据 这里填充0 if (fwrite((void *)ex, 1, extend, BinFile) != 1) Suc = false; } // return; fclose(BinFile); if (extend) delete[] ex; return Suc; } /************************************************* 函数名称: GetImgBoxHost // 函数描述: 预提取包围盒函数。在矩形模式时需要预先好提取包围盒。函数利用CPU版本的八邻域追踪法提取输入图像的包围盒。 . 提取出的包围盒数据保存在全局变量vector<RecData>gHostRecData中。 . 函数初始化了包围盒更新相关的全局变量// 输入参数:const char * filename -所要提取的位图(*.bmp)的绝对路径 // 输出参数:无 // 返回值 : 无 // 其他说明: 函数将提取的包围盒数据保存在全局变量vector<RecData>gHostRecData中,并且对容器 gHostRecData中元素数目进行了规整, . 在容器末尾添加元素0,将容器数目填充为了128的整数倍// *************************************************/ void GetImgBoxHost(const char *path) { Parameter devpar; //初始化图像信息参数 devpar.ImgHeight = gStructVarible.ImgHeight; devpar.ImgWidth = gStructVarible.ImgWidth; devpar.Threshold = gStructVarible.Threshold; devpar.LengthMin = gStructVarible.LengthMin; devpar.LengthMax = gStructVarible.LengthMax; devpar.AreaMin = gStructVarible.AreaMin; devpar.AreaMax = gStructVarible.AreaMax; devpar.PictureNum = gStructVarible.PictureNum; devpar.RecPadding = gStructVarible.RecPadding; //方位数组申明 const cv::Point directions[8] = { { 0, 1 },{ 1,1 },{ 1, 0 },{ 1, -1 },{ 0, -1 },{ -1, -1 },{ -1, 0 },{ -1, 1 } }; //初始化CPU端方位盒数据 if (gHostRecData.size() != 0) gHostRecData.clear(); //图像空间分配 unsigned char *ImgHostdata = new unsigned char[devpar.ImgWidth* devpar.ImgHeight*devpar.PictureNum]; //qwt这里程序有BUG unsigned char *m_ptr = new unsigned char[devpar.ImgWidth* devpar.ImgHeight*devpar.PictureNum];//二值化图 unsigned char *n_ptr = new unsigned char[devpar.ImgWidth* devpar.ImgHeight*devpar.PictureNum];//膨胀图 unsigned char *c_ptr = new unsigned char[devpar.ImgWidth* devpar.ImgHeight*devpar.PictureNum];//轮廓图 unsigned char *temp_ptr = new unsigned char[devpar.ImgWidth* devpar.ImgHeight*devpar.PictureNum];//临时变量图 //读取图片 int Picoffset = devpar.ImgHeight * devpar.ImgWidth; for (int j = 0; j < devpar.PictureNum; j++) { RmwRead8BitBmpFile2Img(path, NULL, ImgHostdata + j*Picoffset, &devpar.ImgWidth, &devpar.ImgHeight); } //二值化 for (int i = 0; i <devpar.ImgHeight*devpar.PictureNum; i++) { for (int j = 0; j < devpar.ImgWidth; j++) { m_ptr[j + i * devpar.ImgWidth] = ImgHostdata[j + i * devpar.ImgWidth] > devpar.Threshold ? 255 : 0; c_ptr[j + i * devpar.ImgWidth] = m_ptr[j + i * devpar.ImgWidth]; n_ptr[j + i * devpar.ImgWidth] = m_ptr[j + i * devpar.ImgWidth]; temp_ptr[j + i * devpar.ImgWidth] = m_ptr[j + i * devpar.ImgWidth]; } } //膨胀 for (int i = 1; i<devpar.ImgHeight*devpar.PictureNum - 1; i++) for (int j = 1; j <devpar.ImgWidth - 1; j++) { if (m_ptr[j + i * devpar.ImgWidth] == 0) { if (m_ptr[j - 1 + (i - 1)*devpar.ImgWidth] != 0 || m_ptr[j + (i - 1)*devpar.ImgWidth] != 0 || m_ptr[j + 1 + (i - 1)*devpar.ImgWidth] != 0 || m_ptr[j - 1 + i * devpar.ImgWidth] != 0 || m_ptr[j + 1 + i * devpar.ImgWidth] != 0 || m_ptr[j - 1 + (i + 1)*devpar.ImgWidth] != 0 || m_ptr[j + (i + 1)*devpar.ImgWidth] != 0 || m_ptr[j + 1 + (i + 1)*devpar.ImgWidth] != 0) { n_ptr[j + i * devpar.ImgWidth] = 255; c_ptr[j + i * devpar.ImgWidth] = 255; temp_ptr[j + i * devpar.ImgWidth] = 255; } } } //腐蚀 c_ptr是轮廓 for (int i = 1; i<devpar.ImgHeight*devpar.PictureNum - 1; i++) for (int j = 1; j < devpar.ImgWidth - 1; j++) { if (n_ptr[j + i * devpar.ImgWidth] != 0) { if (n_ptr[j + (i - 1)*devpar.ImgWidth] != 0 && n_ptr[j - 1 + i * devpar.ImgWidth] != 0 && n_ptr[j + 1 + i * devpar.ImgWidth] != 0 && n_ptr[j + (i + 1)*devpar.ImgWidth] != 0) { c_ptr[j + i * devpar.ImgWidth] = 0; temp_ptr[j + i * devpar.ImgWidth] = 0; } } } //方位盒 short xmax; short xmin; short ymax; short ymin; // 边缘跟踪 int i, j, counts = 0, curr_d = 0;//counts用于循环计数 curr_d是方向数组的索引ID short cLength; //提取方位盒子 for (i = 1; i <devpar.ImgHeight*devpar.PictureNum - 1; i++) for (j = 1; j <devpar.ImgWidth - 1; j++) { // 起始点及当前点 cv::Point b_pt = cv::Point(i, j); cv::Point c_pt = cv::Point(i, j); // 如果当前点为前景点 if (255 == c_ptr[j + i * devpar.ImgWidth]) { cLength = 1; xmin = xmax = i; ymin = ymax = j; bool tra_flag = false;//设置标志位 c_ptr[j + i * devpar.ImgWidth] = 0;// 用过的点直接给设置为0 // 进行跟踪 while (!tra_flag) { // 循环八次 for (counts = 0; counts < 8; counts++) { // 防止索引出界 if (curr_d >= 8) { curr_d -= 8; } if (curr_d < 0) { curr_d += 8; } // 跟踪的过程,是个连续的过程,需要不停的更新搜索的root点 c_pt = cv::Point(b_pt.x + directions[curr_d].x, b_pt.y + directions[curr_d].y); // 边界判断 if ((c_pt.x > 0) && (c_pt.x < devpar.ImgHeight*devpar.PictureNum - 1) && (c_pt.y > 0) && (c_pt.y < devpar.ImgWidth - 1)) { // 如果存在边缘 if (255 == c_ptr[c_pt.x*devpar.ImgWidth + c_pt.y]) { //更新包围盒 xmax = xmax > c_pt.x ? xmax : c_pt.x; ymax = ymax > c_pt.y ? ymax : c_pt.y; xmin = xmin < c_pt.x ? xmin : c_pt.x; ymin = ymin < c_pt.y ? ymin : c_pt.y; curr_d -= 2; //更新当前方向 c_ptr[c_pt.x*devpar.ImgWidth + c_pt.y] = 0; // 更新b_pt:跟踪的root点 b_pt.x = c_pt.x; b_pt.y = c_pt.y; cLength++; break; // 跳出for循环 } } curr_d++; } // end for // 跟踪的终止条件:如果8邻域都不存在边缘 if (8 == counts) { // 清零 curr_d = 0; tra_flag = true; if (cLength < devpar.LengthMax && (cLength > devpar.LengthMin)) { RecData tempRecData; int tempcount = 0; if (0.7<double(xmax - xmin) / double(ymax - ymin) < 1.5)//高/宽 { //轮廓图中心点9领域判断 for (int k = -1; k < 2; k++) { if ((xmax + xmax) / 2 < devpar.ImgHeight*devpar.PictureNum && (ymax + ymin) / 2 < devpar.ImgWidth) { tempcount += temp_ptr[(ymax + ymin) / 2 - 1 + ((xmax + xmin) / 2 + i)*devpar.ImgMakeborderWidth]; tempcount += temp_ptr[(ymax + ymin) / 2 + ((xmax + xmin) / 2 + i)*devpar.ImgMakeborderWidth]; tempcount += temp_ptr[(ymax + ymin) / 2 + 1 + ((xmax + xmin) / 2 + i)*devpar.ImgMakeborderWidth]; } } //轮廓横纵向-边判断 for (int k = xmin; k <= xmax; k++)//判断Height方向 { tempcount += temp_ptr[(ymax + ymin) / 2 + k*devpar.ImgWidth] > 0 ? 1 : 0; } for (int k = ymin; k <= ymax; k++)//判断width方向 { tempcount += temp_ptr[k + (xmax + xmin) / 2 * devpar.ImgWidth] > 0 ? 1 : 0; } if (tempcount <= 4) { if (xmin - devpar.RecPadding < 0) tempRecData.RecXmin = 0; else tempRecData.RecXmin = xmin - devpar.RecPadding; if (ymin - devpar.RecPadding < 0) tempRecData.RecYmin = 0; else tempRecData.RecYmin = ymin - devpar.RecPadding; if (xmax + devpar.RecPadding > devpar.ImgHeight*devpar.PictureNum - 1) tempRecData.RecXmax = devpar.ImgHeight*devpar.PictureNum - 1; else tempRecData.RecXmax = xmax + devpar.RecPadding; if (ymax + devpar.RecPadding > devpar.ImgWidth) tempRecData.RecYmax = devpar.ImgWidth - 1; else tempRecData.RecYmax = ymax + devpar.RecPadding; gHostRecData.push_back(tempRecData); } } } break; } } // end if } // end while } //规整方位盒数量,利用后续线程配置 gSingleImgRecNum = gHostRecData.size() / devpar.PictureNum;//这是单张图方位盒的实际数量 int rRecNum = (gHostRecData.size() + 127) / 128 * 128; gHostRecData.resize(rRecNum, RecData{ 0,0,0,0 }); gRecNum = rRecNum;//包围盒数量 //释放内存 delete[]ImgHostdata; delete[]m_ptr; delete[]n_ptr; delete[]c_ptr; delete[]temp_ptr; } //-----------------------------------------功能处理类---------------------------------------// //--------------------------------------------开始------------------------------------------// /*----------------------------------全图模式标志点提取处理类------------------------------*/ class SIM : public Runnable { public: HardwareInfo HardwarePar;//硬件参数 Parameter Devpar;//图像参数 ~SIM()//析构函数 { } void Run() { //设置GPU设备号 cudaSetDevice(HardwarePar.GpuId); //调试项 cudaError_t err, err1; clock_t start, end; clock_t startp, overp; clock_t time3; /*获取当前线程号*/ /***********/ int img_index; char DataFilename[100]; char strFilename[100]; const char* path = Devpar.DataReadPath; int OutPutInitialIndex = 0; //输出的Bin文件初始索引号 int BufferIndex = 0;//页锁缓冲区索引 long long Bufferoffset = 0;//缓冲区偏移量 bool DatafullFlag = false;//标志位:当为true的时候,表示该GPU对应的两个缓冲区中,至少有一个有有效数据。 /*----------------------参数计算------------------------------------------*/ Devpar.ImgChannelNum = Devpar.ImgBitDeep / 8;//位深转换成通道数 Devpar.ImgMakeborderWidth = (Devpar.ImgWidth + 127) / 128 * 128;//填充后的宽度计算 Devpar.RowThreadNum = Devpar.ImgHeight*Devpar.PictureNum / Devpar.PicBlockSize; Devpar.ColThreadNum = (Devpar.ImgWidth / Devpar.PicBlockSize + 127) / 128 * 128; dim3 mGrid1(Devpar.ImgMakeborderWidth / 128, Devpar.ImgHeight*Devpar.PictureNum, 1); dim3 mGrid2(Devpar.ColThreadNum / 128, Devpar.RowThreadNum, 1); /*----------------------内存申请------------------------------------------*/ //创建CUDA流 cudaStream_t *CStreams; CStreams = (cudaStream_t *)malloc(CUDAStreams * sizeof(cudaStream_t)); /**** 图像数据 ****/ unsigned char* DevPicColor[CUDAStreams]; unsigned char* DevPicGray[CUDAStreams];//设备内存 unsigned char* DevPadding[CUDAStreams];//填充边界后的图像内存 qwt7.26 unsigned char* Dev2Val[CUDAStreams];//二值化图 unsigned char* DevCounter[CUDAStreams];//轮廓图,在执行findcountores之后才生成 for (int i = 0; i < CUDAStreams; i++) { cudaStreamCreate(&(CStreams[i])); cudaMalloc((void**)&DevPicColor[i], Devpar.ImgHeight * Devpar.ImgWidth*Devpar.ImgChannelNum*Devpar.PictureNum * sizeof(unsigned char)); cudaMalloc((void**)&DevPicGray[i], Devpar.ImgHeight * Devpar.ImgWidth*Devpar.PictureNum * sizeof(unsigned char)); cudaMalloc((void**)&DevPadding[i], Devpar.ImgHeight * Devpar.ImgMakeborderWidth*Devpar.PictureNum * sizeof(unsigned char)); //qwt7.26 cudaMalloc((void**)&Dev2Val[i], sizeof(unsigned char) * Devpar.ImgHeight * Devpar.ImgMakeborderWidth * Devpar.PictureNum); cudaMalloc((void**)&DevCounter[i], sizeof(unsigned char) * Devpar.ImgHeight * Devpar.ImgMakeborderWidth * Devpar.PictureNum); } /*主机端*/ //输入 short *gpHostLength[CUDAStreams]; short *gpHostArea[CUDAStreams]; double *gpHostXpos[CUDAStreams]; double *gpHostYpos[CUDAStreams]; short *gpHostIndex[CUDAStreams]; /*设备端*/ short * gpDevRecXLeft[CUDAStreams]; short * gpDevRecYLeft[CUDAStreams]; short * gpDevRecXRight[CUDAStreams]; short * gpDevRecYRight[CUDAStreams]; //输出 short *gpDevLength[CUDAStreams]; short *gpDevArea[CUDAStreams]; double *gpDevXpos[CUDAStreams]; double *gpDevYpos[CUDAStreams]; short *gpDevIndex[CUDAStreams]; //申请的临时变量空间,包括有方位盒、输出特征的GPU端内存和GPU显存 for (int i = 0; i < CUDAStreams; i++) { cudaHostAlloc((void**)&gpHostLength[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short), cudaHostAllocDefault);//输出周长 cudaHostAlloc((void**)&gpHostArea[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short), cudaHostAllocDefault);//面积 cudaHostAlloc((void**)&gpHostXpos[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(double), cudaHostAllocDefault);//重心坐标x cudaHostAlloc((void**)&gpHostYpos[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(double), cudaHostAllocDefault);//重心坐标y cudaHostAlloc((void**)&gpHostIndex[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short), cudaHostAllocDefault);//特征索引号 cudaMalloc((void**)&gpDevRecXLeft[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short) * 2);//方位盒 xmin cudaMalloc((void**)&gpDevRecYLeft[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short) * 2);// ymin cudaMalloc((void**)&gpDevRecXRight[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short) * 2);// xmax cudaMalloc((void**)&gpDevRecYRight[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short) * 2);// ymax cudaMalloc((void**)&gpDevLength[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short) * 2);//设备端输出 周长 cudaMalloc((void**)&gpDevArea[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short) * 2);// 面积 cudaMalloc((void**)&gpDevXpos[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(double) * 2);// xpos cudaMalloc((void**)&gpDevYpos[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(double) * 2);// ypos err = cudaMalloc((void**)&gpDevIndex[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short) * 2);// 索引号 } //标志点提取完整流程 while ((img_index + CUDAStreams) <= gHostPathImgNumber && gStructVarible.TerminateFlag == 0) { //若图像类型为灰度图-即单通道,则直接将数据拷贝到DevPicGray if (Devpar.ImgChannelNum == 1) { for (int i = 0; i < CUDAStreams; i++) { Bufferoffset = long long(img_index + i)* Devpar.ImgHeight * Devpar.ImgWidth; cudaMemcpyAsync(DevPicGray[i], gHostBuffer[BufferIndex] + Bufferoffset, sizeof(unsigned char)* Devpar.ImgHeight * Devpar.ImgWidth*Devpar.PictureNum, cudaMemcpyHostToDevice, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { //执行灰度化,二值化核函数程序 GrayMakeBorder << <mGrid1, 128, 0, CStreams[i] >> > (DevPicGray[i], DevPadding[i], Devpar); } } else if (Devpar.ImgChannelNum == 3)//若图像类型为彩色图-即多通道,则直接将数据拷贝到DevPicColor { for (int i = 0; i < CUDAStreams; i++) { Bufferoffset = long long(img_index + i)*Devpar.ImgHeight * Devpar.ImgWidth*Devpar.ImgChannelNum; cudaMemcpyAsync(DevPicColor[i], gHostBuffer[BufferIndex] + Bufferoffset, sizeof(unsigned char)* Devpar.ImgHeight * Devpar.ImgWidth*Devpar.ImgChannelNum*Devpar.PictureNum, cudaMemcpyHostToDevice, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++)//转灰度+padding { ColorMakeBorder << <mGrid1, 128, 0, CStreams[i] >> > (DevPicColor[i], DevPadding[i], Devpar); } } for (int i = 0; i < CUDAStreams; i++) { //执行灰度化,二值化核函数程序 Binarization << <mGrid1, 128, 0, CStreams[i] >> > (DevPadding[i], Dev2Val[i], DevCounter[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { //边界提取 Dilation << <mGrid1, 128, 0, CStreams[i] >> > (Dev2Val[i], DevCounter[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { cudaMemcpyAsync(Dev2Val[i], DevCounter[i], sizeof(uchar)* Devpar.ImgHeight *Devpar.ImgMakeborderWidth*Devpar.PictureNum, cudaMemcpyDeviceToDevice, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { Erosion << <mGrid1, 128, 0, CStreams[i] >> > (Dev2Val[i], DevCounter[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { //提取轮廓和边缘盒 GetCounter << <mGrid2, 128, 0, CStreams[i] >> > (DevCounter[i], gpDevLength[i], gpDevRecXLeft[i], gpDevRecYLeft[i], gpDevRecXRight[i], gpDevRecYRight[i], Devpar);//提取轮廓的函数 } for (int i = 0; i < CUDAStreams; i++) { //筛选提取出的特征数组的非重复信息 SelectTrueBox << <mGrid2, 128, 0, CStreams[i] >> > (DevCounter[i], gpDevLength[i], gpDevRecXLeft[i], gpDevRecYLeft[i], gpDevRecXRight[i], gpDevRecYRight[i], gpDevIndex[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { //筛选提取出的特征数组的非重复信息 SelectTrueBox << <mGrid2, 128, 0, CStreams[i] >> > (DevCounter[i], gpDevLength[i], gpDevRecXLeft[i], gpDevRecYLeft[i], gpDevRecXRight[i], gpDevRecYRight[i], gpDevIndex[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { //筛选提取出的特征数组的非重复信息 GetNonRepeatBox << <mGrid2, 128, 0, CStreams[i] >> > (gpDevRecXLeft[i], gpDevRecYLeft[i], gpDevIndex[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { //提取面积和重心//提取特征信息核函数 GetInfo << <mGrid2, 128, 0, CStreams[i] >> > (DevPadding[i], gpDevIndex[i], gpDevRecXLeft[i], gpDevRecYLeft[i], gpDevRecXRight[i], gpDevRecYRight[i], gpDevXpos[i], gpDevYpos[i], gpDevArea[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { cudaMemcpyAsync(gpHostLength[i], gpDevLength[i], sizeof(short)* Devpar.ColThreadNum * Devpar.RowThreadNum, cudaMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { cudaMemcpyAsync(gpHostArea[i], gpDevArea[i], sizeof(short)* Devpar.ColThreadNum * Devpar.RowThreadNum, cudaMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { cudaMemcpyAsync(gpHostXpos[i], gpDevXpos[i], sizeof(double)* Devpar.ColThreadNum * Devpar.RowThreadNum, cudaMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { cudaMemcpyAsync(gpHostYpos[i], gpDevYpos[i], sizeof(double)* Devpar.ColThreadNum * Devpar.RowThreadNum, cudaMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { cudaMemcpyAsync(gpHostIndex[i], gpDevIndex[i], sizeof(short)* Devpar.ColThreadNum * Devpar.RowThreadNum, cudaMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { err = cudaStreamSynchronize(CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { //筛选打印提取的特征 vector<CircleInfo>myInfo; for (int j = 0; j < Devpar.ColThreadNum * Devpar.RowThreadNum; j++) { if (gpHostIndex[i][j] != 0) { CircleInfo temp; temp.index = (short)j; temp.length = gpHostLength[i][j]; temp.area = gpHostArea[i][j]; temp.xpos = gpHostXpos[i][j]; temp.ypos = gpHostYpos[i][j]; myInfo.push_back(temp); } } SignPoint.PointNumbers = myInfo.size(); //输出标志点数据 if (myInfo.size() > 0) { FILE* fp; sprintf_s(DataFilename, "%s\\%d.bin", Devpar.DataReadPath, img_index + HardwarePar.DeviceID * HardwarePar.CUDAStreamNum + i + 1); //【3】将图片的路径名动态的写入到DataFilename这个地址的内存空间 fp = fopen(DataFilename, "wb"); fwrite(&myInfo[0], sizeof(CircleInfo)*myInfo.size(), 1, fp); fclose(fp); } } img_index += HardwarePar.DeviceCount * HardwarePar.CUDAStreamNum; } /**** 测试用代码 ****/ /**** 用于测试手动停止位置 ****/ if (gStructVarible.TerminateFlag == 1) { char buffer[20]; sprintf_s(buffer, "%s%d", "img_index = ", img_index); FILE* fp; sprintf_s(DataFilename, "%s\\%d.txt", Devpar.DataReadPath, 0); //【3】将图片的路径名动态的写入到DataFilename这个地址的内存空间 fp = fopen(DataFilename, "wb"); fwrite(buffer, sizeof(char) * 20, 1, fp); fclose(fp); } /**********************/ //释放内存 for (int i = 0; i < CUDAStreams; i++) { cudaFree(DevPicColor[i]); cudaFree(DevPicGray[i]); cudaFree(DevPadding[i]); cudaFree(Dev2Val[i]); cudaFree(DevCounter[i]); cudaFreeHost(gpHostLength[i]); cudaFreeHost(gpHostArea[i]); cudaFreeHost(gpHostXpos[i]); cudaFreeHost(gpHostYpos[i]); cudaFreeHost(gpHostIndex[i]); //设备端内存 cudaFree(gpDevRecXLeft[i]); cudaFree(gpDevRecYLeft[i]); cudaFree(gpDevRecXRight[i]); cudaFree(gpDevRecYRight[i]); cudaFree(gpDevLength[i]); cudaFree(gpDevArea[i]); cudaFree(gpDevXpos[i]); cudaFree(gpDevYpos[i]); cudaFree(gpDevIndex[i]); cudaStreamDestroy(CStreams[i]); } } }; class R : public Runnable { public: Parameter Devpar;//变量传参 HardwareInfo HardwarePar;//硬件参数 static int mRindex; ~R() { } void mydelay(double sec)//延时函数,用于图像数据缓冲区的更新 { clock_t start_time, cur_time; start_time = clock(); do { cur_time = clock(); } while (double(cur_time - start_time) / CLOCKS_PER_SEC < sec); } void Run() { //设置GPU设备号 cudaSetDevice(HardwarePar.GpuId); //调试项 /***********/ int img_index; char strFilename[100]; const char* path = Devpar.DataReadPath; int OutPutInitialIndex = 0; //输出的Bin文件初始索引号 int BufferIndex = 0;//页锁缓冲区索引 long long Bufferoffset = 0;//缓冲区偏移量 bool DatafullFlag = false;//标志位:当为true的时候,表示该GPU对应的两个缓冲区中,至少有一个有有效数据。 /*----------------------参数计算------------------------------------------*/ Devpar.ImgChannelNum = Devpar.ImgBitDeep / 8;//位深转换成通道数 Devpar.ImgMakeborderWidth = (Devpar.ImgWidth + 127) / 128 * 128;//填充后的宽度计算 Devpar.RowThreadNum = Devpar.ImgHeight*Devpar.PictureNum / Devpar.PicBlockSize;//这里可能会有BUG-当高度不是PicBlock的整数倍时,可能出现问题 Devpar.ColThreadNum = (Devpar.ImgWidth / Devpar.PicBlockSize + 127) / 128 * 128; dim3 mGrid1(Devpar.ImgMakeborderWidth / 128, Devpar.ImgHeight*Devpar.PictureNum, 1); dim3 mGrid2(Devpar.ColThreadNum / 128, Devpar.RowThreadNum, 1); /*----------------------内存申请------------------------------------------*/ //创建CUDA流 cudaStream_t *CStreams; CStreams = (cudaStream_t *)malloc(CUDAStreams * sizeof(cudaStream_t)); /**** 图像数据 ****/ unsigned char* DevPicColor[CUDAStreams]; unsigned char* DevPicGray[CUDAStreams];//设备内存 unsigned char* DevPadding[CUDAStreams];//填充边界后的图像内存 qwt7.26 unsigned char* Dev2Val[CUDAStreams];//二值化图 unsigned char* DevCounter[CUDAStreams];//轮廓图,在执行findcountores之后才生成 for (int i = 0; i < CUDAStreams; i++) { cudaStreamCreate(&(CStreams[i])); cudaMalloc((void**)&DevPicColor[i], Devpar.ImgHeight * Devpar.ImgWidth*Devpar.ImgChannelNum*Devpar.PictureNum * sizeof(unsigned char)); cudaMalloc((void**)&DevPicGray[i], Devpar.ImgHeight * Devpar.ImgWidth*Devpar.PictureNum * sizeof(unsigned char)); cudaMalloc((void**)&DevPadding[i], Devpar.ImgHeight * Devpar.ImgMakeborderWidth*Devpar.PictureNum * sizeof(unsigned char)); cudaMalloc((void**)&Dev2Val[i], sizeof(unsigned char) * Devpar.ImgHeight * Devpar.ImgMakeborderWidth * Devpar.PictureNum); cudaMalloc((void**)&DevCounter[i], sizeof(unsigned char) * Devpar.ImgHeight * Devpar.ImgMakeborderWidth * Devpar.PictureNum); } /*主机端*/ //输入 short *gpHostLength[CUDAStreams]; short *gpHostArea[CUDAStreams]; double *gpHostXpos[CUDAStreams]; double *gpHostYpos[CUDAStreams]; short *gpHostIndex[CUDAStreams]; /*设备端*/ short * gpDevRecXLeft[CUDAStreams]; short * gpDevRecYLeft[CUDAStreams]; short * gpDevRecXRight[CUDAStreams]; short * gpDevRecYRight[CUDAStreams]; //输出 short *gpDevLength[CUDAStreams]; short *gpDevArea[CUDAStreams]; double *gpDevXpos[CUDAStreams]; double *gpDevYpos[CUDAStreams]; short *gpDevIndex[CUDAStreams]; //申请的临时变量空间,包括有方位盒、输出特征的GPU端内存和GPU显存 for (int i = 0; i < CUDAStreams; i++) { cudaHostAlloc((void**)&gpHostLength[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short), cudaHostAllocDefault);//输出周长 cudaHostAlloc((void**)&gpHostArea[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short), cudaHostAllocDefault);//面积 cudaHostAlloc((void**)&gpHostXpos[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(double), cudaHostAllocDefault);//重心坐标x cudaHostAlloc((void**)&gpHostYpos[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(double), cudaHostAllocDefault);//重心坐标y cudaHostAlloc((void**)&gpHostIndex[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short), cudaHostAllocDefault);//特征索引号 cudaMalloc((void**)&gpDevRecXLeft[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));//方位盒 xmin cudaMalloc((void**)&gpDevRecYLeft[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// ymin cudaMalloc((void**)&gpDevRecXRight[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// xmax cudaMalloc((void**)&gpDevRecYRight[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// ymax cudaMalloc((void**)&gpDevLength[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));//设备端输出 周长 cudaMalloc((void**)&gpDevArea[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// 面积 cudaMalloc((void**)&gpDevXpos[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(double));// xpos cudaMalloc((void**)&gpDevYpos[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(double));// ypos cudaMalloc((void**)&gpDevIndex[i], Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// 索引号 } ExtractPointInitialSuccessFlag[HardwarePar.DeviceID] = true; while (!ExtractPointSuccess) { mydelay(0.01); vector<CircleInfo>myInfo; img_index = 0;//图像计数 Bufferoffset = 0;//页锁内存偏移 //绑定缓冲区 while (true) { gExtrackPointLock.lock(); mRindex = mRindex % (HardwareParam.DeviceCount + 1); if (PageLockBufferEmpty[mRindex] == false && PageLockBufferWorking[mRindex] == false) { PageLockBufferWorking[mRindex] = true;//将页锁内存标志位置为工作状态--进行绑定 OutPutInitialIndex = PageLockBufferStartIndex[mRindex] * Bufferlength;//获取图像首索引 BufferIndex = mRindex; DatafullFlag = true; mRindex++; gExtrackPointLock.unlock(); break; } mRindex++; gExtrackPointLock.unlock(); if (ExtractPointSuccess) break; } //处理数据 while (DatafullFlag) { if (img_index >= Bufferlength) //qwt { gExtrackPointLock.lock(); PageLockBufferWorking[BufferIndex] = false;//处理结束--working标志位置为false gExtrackPointLock.unlock(); PageLockBufferEmpty[BufferIndex] = true; // DatafullFlag = false; break; } //若图像类型为灰度图-即单通道,则直接将数据拷贝到DevPicGray if (Devpar.ImgChannelNum == 1) { for (int i = 0; i < CUDAStreams; i++) { Bufferoffset = long long(img_index + i*Devpar.PictureNum)* Devpar.ImgHeight * Devpar.ImgWidth; cudaMemcpyAsync(DevPicGray[i], gHostBuffer[BufferIndex] + Bufferoffset, sizeof(unsigned char)* Devpar.ImgHeight * Devpar.ImgWidth*Devpar.PictureNum, cudaMemcpyHostToDevice, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { //执行灰度化,二值化核函数程序 GrayMakeBorder << <mGrid1, 128, 0, CStreams[i] >> > (DevPicGray[i], DevPadding[i], Devpar); } } else if (Devpar.ImgChannelNum == 3)//若图像类型为彩色图-即多通道,则直接将数据拷贝到DevPicColor { for (int i = 0; i < CUDAStreams; i++) { Bufferoffset = long long(img_index + i*Devpar.PictureNum)*Devpar.ImgHeight * Devpar.ImgWidth*Devpar.ImgChannelNum; cudaMemcpyAsync(DevPicColor[i], gHostBuffer[BufferIndex] + +Bufferoffset, sizeof(unsigned char)* Devpar.ImgHeight * Devpar.ImgWidth*Devpar.ImgChannelNum*Devpar.PictureNum, cudaMemcpyHostToDevice, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++)//转灰度+padding { ColorMakeBorder << <mGrid1, 128, 0, CStreams[i] >> > (DevPicColor[i], DevPadding[i], Devpar); } } for (int i = 0; i < CUDAStreams; i++) { //执行灰度化,二值化核函数程序 Binarization << <mGrid1, 128, 0, CStreams[i] >> > (DevPadding[i], Dev2Val[i], DevCounter[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { //边界提取 Dilation << <mGrid1, 128, 0, CStreams[i] >> > (Dev2Val[i], DevCounter[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { cudaMemcpyAsync(Dev2Val[i], DevCounter[i], sizeof(unsigned char)* Devpar.ImgHeight *Devpar.ImgMakeborderWidth*Devpar.PictureNum, cudaMemcpyDeviceToDevice, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { Erosion << <mGrid1, 128, 0, CStreams[i] >> > (Dev2Val[i], DevCounter[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { //提取轮廓和边缘盒 GetCounter << <mGrid2, 128, 0, CStreams[i] >> > (DevCounter[i], gpDevLength[i], gpDevRecXLeft[i], gpDevRecYLeft[i], gpDevRecXRight[i], gpDevRecYRight[i], Devpar);//提取轮廓的函数 } for (int i = 0; i < CUDAStreams; i++) { //筛选提取出的特征数组的非重复信息 SelectTrueBox << <mGrid2, 128, 0, CStreams[i] >> > (DevCounter[i], gpDevLength[i], gpDevRecXLeft[i], gpDevRecYLeft[i], gpDevRecXRight[i], gpDevRecYRight[i], gpDevIndex[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { //筛选提取出的特征数组的非重复信息 SelectTrueBox << <mGrid2, 128, 0, CStreams[i] >> > (DevCounter[i], gpDevLength[i], gpDevRecXLeft[i], gpDevRecYLeft[i], gpDevRecXRight[i], gpDevRecYRight[i], gpDevIndex[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { //筛选提取出的特征数组的非重复信息 GetNonRepeatBox << <mGrid2, 128, 0, CStreams[i] >> > (gpDevRecXLeft[i], gpDevRecYLeft[i], gpDevIndex[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { //提取面积和重心//提取特征信息核函数 GetInfo << <mGrid2, 128, 0, CStreams[i] >> > (DevPadding[i], gpDevIndex[i], gpDevRecXLeft[i], gpDevRecYLeft[i], gpDevRecXRight[i], gpDevRecYRight[i], gpDevXpos[i], gpDevYpos[i], gpDevArea[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { cudaMemcpyAsync(gpHostLength[i], gpDevLength[i], sizeof(short)* Devpar.ColThreadNum * Devpar.RowThreadNum, cudaMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { cudaMemcpyAsync(gpHostArea[i], gpDevArea[i], sizeof(short)* Devpar.ColThreadNum * Devpar.RowThreadNum, cudaMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { cudaMemcpyAsync(gpHostXpos[i], gpDevXpos[i], sizeof(double)* Devpar.ColThreadNum * Devpar.RowThreadNum, cudaMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { cudaMemcpyAsync(gpHostYpos[i], gpDevYpos[i], sizeof(double)* Devpar.ColThreadNum * Devpar.RowThreadNum, cudaMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { cudaMemcpyAsync(gpHostIndex[i], gpDevIndex[i], sizeof(short)* Devpar.ColThreadNum * Devpar.RowThreadNum, cudaMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { cudaStreamSynchronize(CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { for (int k = 0; k < Devpar.PictureNum; k++) { int hostindex = 0; int headpos = myInfo.size(); CircleInfo headInfo; headInfo.index = OutPutInitialIndex + img_index + i;//对应文件索引 headInfo.xpos = 99999; headInfo.ypos = 99999;//xpos 和 ypos作为头标志位 headInfo.area = 0; //area为0也作为特征标志位 myInfo.push_back(headInfo); for (int j = k*Devpar.ColThreadNum * Devpar.RowThreadNum / Devpar.PictureNum; j < (k + 1)*Devpar.ColThreadNum * Devpar.RowThreadNum / Devpar.PictureNum; j++) { if (gpHostIndex[i][j] != 0) { hostindex++; CircleInfo temp; temp.index = (short)hostindex; temp.length = gpHostLength[i][j]; temp.area = gpHostArea[i][j]; temp.xpos = gpHostXpos[i][j]; temp.ypos = gpHostYpos[i][j]; myInfo.push_back(temp); } } myInfo[headpos].length = hostindex;//长度置位 } } img_index += HardwarePar.CUDAStreamNum*Devpar.PictureNum; } // 写磁盘 if (myInfo.size() > 0) { FILE* fp; sprintf_s(strFilename, "%s\\%d.bin", path, OutPutInitialIndex); //【3】将图片的路径名动态的写入到strFilename这个地址的内存空间 fp = fopen(strFilename, "wb"); fwrite(&myInfo[0], sizeof(CircleInfo)*myInfo.size(), 1, fp); fclose(fp); } } for (int i = 0; i < CUDAStreams; i++) { cudaFree(DevPicColor[i]); cudaFree(DevPicGray[i]); cudaFree(DevPadding[i]); cudaFree(Dev2Val[i]); cudaFree(DevCounter[i]); cudaFreeHost(gpHostLength[i]); cudaFreeHost(gpHostArea[i]); cudaFreeHost(gpHostXpos[i]); cudaFreeHost(gpHostYpos[i]); cudaFreeHost(gpHostIndex[i]); //设备端内存 cudaFree(gpDevRecXLeft[i]); cudaFree(gpDevRecYLeft[i]); cudaFree(gpDevRecXRight[i]); cudaFree(gpDevRecYRight[i]); cudaFree(gpDevLength[i]); cudaFree(gpDevArea[i]); cudaFree(gpDevXpos[i]); cudaFree(gpDevYpos[i]); cudaFree(gpDevIndex[i]); cudaStreamDestroy(CStreams[i]); } } }; int R::mRindex = 0;//静态变量初始化 /*----------------------------------矩形模式标志点提取处理类------------------------------*/ class RecR : public Runnable { public: HardwareInfo HardwarePar;//硬件参数 Parameter Devpar;//变量传参 static int mRecindex; public: ~RecR()//析构函数 { } void mydelay(double sec)//延时函数,用于图像数据缓冲区的更新 { clock_t start_time, cur_time; start_time = clock(); do { cur_time = clock(); } while (double(cur_time - start_time) / CLOCKS_PER_SEC < sec); } void Run() { //设置GPU设备号 cudaSetDevice(HardwarePar.GpuId); //变量申明 char DataFilename[100]; //定义一个字符数组保存----图片的读取路径 int img_index = 0;//输出图像 bin索引 int OutPutInitialIndex = 0; //输出的Bin文件初始索引号 int BufferIndex = 0;//页锁缓冲区索引 long long Bufferoffset = 0;//缓冲区偏移量 bool DatafullFlag = false;//标志位:当为true的时候,表示该GPU对应的两个缓冲区中,至少有一个有有效数据。 const char* path = Devpar.DataReadPath; /*----------------------参数计算------------------------------------------*/ Devpar.ImgChannelNum = Devpar.ImgBitDeep / 8;//位深转换成通道数 Devpar.ImgMakeborderWidth = (Devpar.ImgWidth + 127) / 128 * 128;//填充后的宽度计算 int Gridsize = gRecNum / 128; if (Gridsize == 0)//qwt823 Gridsize = 1; /**** 核函数Grid ****/ dim3 mGrid1(Devpar.ImgMakeborderWidth / 128, Devpar.ImgHeight*Devpar.PictureNum, 1); dim3 mGrid2(Gridsize, 1, 1); /*----------------------存储区空间申请------------------------------------------*/ //创建CUDA流 cudaStream_t *CStreams; CStreams = (cudaStream_t *)malloc(CUDAStreams * sizeof(cudaStream_t)); /*** 图像数据 ****/ unsigned char* DevPicColor[CUDAStreams]; unsigned char* DevPicGray[CUDAStreams];//设备内存 unsigned char* DevPadding[CUDAStreams];//填充边界后的图像内存 qwt7.26 unsigned char* Dev2Val[CUDAStreams];//二值化图 unsigned char* DevCounter[CUDAStreams];//轮廓图,在执行findcountores之后才生成 for (int i = 0; i < CUDAStreams; i++) { cudaStreamCreate(&(CStreams[i])); cudaMalloc((void**)&DevPicColor[i], Devpar.ImgHeight * Devpar.ImgWidth*Devpar.ImgChannelNum*Devpar.PictureNum * sizeof(unsigned char)); cudaMalloc((void**)&DevPicGray[i], Devpar.ImgHeight * Devpar.ImgWidth*Devpar.PictureNum * sizeof(unsigned char)); cudaMalloc((void**)&DevPadding[i], Devpar.ImgHeight *Devpar.ImgMakeborderWidth*Devpar.PictureNum * sizeof(unsigned char)); //qwt7.26 cudaMalloc((void**)&Dev2Val[i], sizeof(unsigned char) * Devpar.ImgHeight * Devpar.ImgMakeborderWidth * Devpar.PictureNum); cudaMalloc((void**)&DevCounter[i], sizeof(unsigned char) * Devpar.ImgHeight * Devpar.ImgMakeborderWidth * Devpar.PictureNum); } /**** 主机端 ****/ //标志点信息输入 short *gpHostLength[CUDAStreams]; short *gpHostArea[CUDAStreams]; double *gpHostXpos[CUDAStreams]; double *gpHostYpos[CUDAStreams]; /**** 设备端 ****/ //标志点信息输出 short *gpDevLength[CUDAStreams]; short *gpDevArea[CUDAStreams]; double *gpDevXpos[CUDAStreams]; double *gpDevYpos[CUDAStreams]; RecData *gpRDevRecData[CUDAStreams];//qwt821 //拷贝方位盒数据 if (gRecNum > 0) { for (int i = 0; i < CUDAStreams; i++) { cudaMalloc((void**)&gpRDevRecData[i], gRecNum * sizeof(RecData) * 2);//这里这个2的作用是:方位盒可能在实验期间数目要变,可能会变多一点,防止变了之后内存越界 cudaMemcpy(gpRDevRecData[i], &gHostRecData[0], gRecNum * sizeof(RecData), cudaMemcpyHostToDevice); } } //申请的临时变量空间,包括有方位盒、输出特征的GPU端内存和GPU显存 for (int i = 0; i < CUDAStreams; i++) { cudaHostAlloc((void**)&gpHostLength[i], gRecNum * sizeof(short), cudaHostAllocDefault);//输出周长 cudaHostAlloc((void**)&gpHostArea[i], gRecNum * sizeof(short), cudaHostAllocDefault);//面积 cudaHostAlloc((void**)&gpHostXpos[i], gRecNum * sizeof(double), cudaHostAllocDefault);//重心坐标x cudaHostAlloc((void**)&gpHostYpos[i], gRecNum * sizeof(double), cudaHostAllocDefault);//重心坐标y cudaMalloc((void**)&gpDevLength[i], gRecNum * sizeof(short));//设备端输出 周长 cudaMalloc((void**)&gpDevArea[i], gRecNum * sizeof(short));// 面积 cudaMalloc((void**)&gpDevXpos[i], gRecNum * sizeof(double));// xpos cudaMalloc((void**)&gpDevYpos[i], gRecNum * sizeof(double));// ypos } ExtractPointInitialSuccessFlag[HardwarePar.DeviceID] = true; //标志点提取完整流程 while (!ExtractPointSuccess) { mydelay(0.01); vector<CircleInfo>myInfo; img_index = 0;//图像计数 Bufferoffset = 0;//页锁内存偏移 //绑定数据 while (true) { gExtrackPointLock.lock(); mRecindex = mRecindex % (HardwareParam.DeviceCount + 1); if (PageLockBufferEmpty[mRecindex] == false && PageLockBufferWorking[mRecindex] == false) { PageLockBufferWorking[mRecindex] = true;//将页锁内存标志位置为工作状态--进行绑定 OutPutInitialIndex = PageLockBufferStartIndex[mRecindex] * Bufferlength;//获取图像首索引 BufferIndex = mRecindex; DatafullFlag = true; mRecindex++; gExtrackPointLock.unlock(); break; } mRecindex++; gExtrackPointLock.unlock(); if (ExtractPointSuccess) break; } //提取特征 while (DatafullFlag) { if (img_index >= Bufferlength) //qwt { gExtrackPointLock.lock(); PageLockBufferWorking[BufferIndex] = false;//处理结束--working标志位置为false gExtrackPointLock.unlock(); PageLockBufferEmpty[BufferIndex] = true; // DatafullFlag = false; break; } if (Devpar.ImgChannelNum == 1) { for (int i = 0; i < CUDAStreams; i++) { Bufferoffset = long long(img_index + i*Devpar.PictureNum)* Devpar.ImgHeight * Devpar.ImgWidth; cudaMemcpyAsync(DevPicGray[i], gHostBuffer[BufferIndex] + Bufferoffset, sizeof(unsigned char)* Devpar.ImgHeight * Devpar.ImgWidth*Devpar.PictureNum, cudaMemcpyHostToDevice, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { //执行灰度化,二值化核函数程序 GrayMakeBorder << <mGrid1, 128, 0, CStreams[i] >> > (DevPicGray[i], DevPadding[i], Devpar); } } else if (Devpar.ImgChannelNum == 3)//若图像类型为彩色图-即多通道,则直接将数据拷贝到DevPicColor { for (int i = 0; i < CUDAStreams; i++) { Bufferoffset = long long(img_index + i*Devpar.PictureNum)*Devpar.ImgHeight * Devpar.ImgWidth*Devpar.ImgChannelNum; cudaMemcpyAsync(DevPicColor[i], gHostBuffer[BufferIndex] + +Bufferoffset, sizeof(unsigned char)* Devpar.ImgHeight * Devpar.ImgWidth*Devpar.ImgChannelNum*Devpar.PictureNum, cudaMemcpyHostToDevice, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++)//转灰度+padding { ColorMakeBorder << <mGrid1, 128, 0, CStreams[i] >> > (DevPicColor[i], DevPadding[i], Devpar); } } for (int i = 0; i < CUDAStreams; i++) { //执行灰度化,二值化核函数程序 Binarization << <mGrid1, 128, 0, CStreams[i] >> > (DevPadding[i], Dev2Val[i], DevCounter[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { //边界提取 Dilation << <mGrid1, 128, 0, CStreams[i] >> > (Dev2Val[i], DevCounter[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { cudaMemcpyAsync(Dev2Val[i], DevCounter[i], sizeof(unsigned char)* Devpar.ImgHeight *Devpar.ImgMakeborderWidth*Devpar.PictureNum, cudaMemcpyDeviceToDevice, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { Erosion << <mGrid1, 128, 0, CStreams[i] >> > (Dev2Val[i], DevCounter[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { //不同流中的核函数用同一GPU数据时,是否会影响核函数的性能qwt GetRecInfo << <mGrid2, 128, 0, CStreams[i] >> > (gpRDevRecData[i], DevPadding[i], DevCounter[i], gpDevLength[i], gpDevArea[i], gpDevXpos[i], gpDevYpos[i], Devpar); } for (int i = 0; i < CUDAStreams; i++) { cudaMemcpyAsync(gpHostLength[i], gpDevLength[i], sizeof(short)* gRecNum, cudaMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { cudaMemcpyAsync(gpHostArea[i], gpDevArea[i], sizeof(short)* gRecNum, cudaMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { cudaMemcpyAsync(gpHostXpos[i], gpDevXpos[i], sizeof(double)* gRecNum, cudaMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { cudaMemcpyAsync(gpHostYpos[i], gpDevYpos[i], sizeof(double)* gRecNum, cudaMemcpyDeviceToHost, CStreams[i]); } for (int i = 0; i < CUDAStreams; i++) { cudaStreamSynchronize(CStreams[i]); } for (int j = 0, int i = 0; i < CUDAStreams; i++) { for (int k = 0; k < Devpar.PictureNum; k++) { int hostindex = 0; int headpos = myInfo.size(); CircleInfo headInfo; headInfo.index = OutPutInitialIndex + img_index + i;//对应文件索引 headInfo.xpos = 99999; headInfo.ypos = 99999;//xpos 和 ypos作为头标志位 headInfo.area = 0; //area为0也作为特征标志位 myInfo.push_back(headInfo); while (gpHostXpos[i][j] < (k + 1)*Devpar.ImgHeight&&j < gRecNum) { if (0 < gpHostXpos[i][j]) { hostindex++; CircleInfo temp; temp.index = hostindex; temp.length = gpHostLength[i][j]; temp.area = gpHostArea[i][j]; temp.xpos = gpHostXpos[i][j]; temp.ypos = gpHostYpos[i][j]; myInfo.push_back(temp); } j++; } myInfo[headpos].length = hostindex;//长度置位 } } img_index += HardwarePar.CUDAStreamNum*Devpar.PictureNum; } //写磁盘 if (myInfo.size() > 0) { FILE* fp; sprintf_s(DataFilename, "%s\\%d.bin", path, OutPutInitialIndex); //【3】将图片的路径名动态的写入到strFilename这个地址的内存空间 fp = fopen(DataFilename, "wb"); fwrite(&myInfo[0], sizeof(CircleInfo)*myInfo.size(), 1, fp); fclose(fp); } //更新包围盒 if (DevUpdateRec[HardwarePar.DeviceID] == true) { for (int i = 0; i < CUDAStreams; i++) { cudaMemcpy(gpRDevRecData[i], &gHostRecData[0], gRecNum * sizeof(RecData), cudaMemcpyHostToDevice); } DevUpdateRec[HardwarePar.DeviceID] = false; } } for (int i = 0; i < CUDAStreams; i++) { cudaFree(DevPicColor[i]); cudaFree(DevPicGray[i]); cudaFree(DevPadding[i]); cudaFree(Dev2Val[i]); cudaFree(DevCounter[i]); cudaFreeHost(gpHostLength[i]); cudaFreeHost(gpHostArea[i]); cudaFreeHost(gpHostXpos[i]); cudaFreeHost(gpHostYpos[i]); //设备端内存 cudaFree(gpDevLength[i]); cudaFree(gpDevArea[i]); cudaFree(gpDevXpos[i]); cudaFree(gpDevYpos[i]); cudaFree(gpRDevRecData[i]); cudaStreamDestroy(CStreams[i]); } } }; int RecR::mRecindex = 0; /*----------------------------------矩形盒更新类------------------------------------------*/ class RecUpData : public Runnable { public: Parameter Devpar;//变量传参 ~RecUpData() { } void Run() { char strFilename[250]; //初始化图像信息参数 Devpar.ImgHeight = gStructVarible.ImgHeight; Devpar.ImgWidth = gStructVarible.ImgWidth; Devpar.Threshold = gStructVarible.Threshold; Devpar.LengthMin = gStructVarible.LengthMin; Devpar.LengthMax = gStructVarible.LengthMax; Devpar.AreaMin = gStructVarible.AreaMin; Devpar.AreaMax = gStructVarible.AreaMax; Devpar.PictureNum = gStructVarible.PictureNum; Devpar.RecPadding = gStructVarible.RecPadding; //方位数组申明 const cv::Point directions[8] = { { 0, 1 },{ 1,1 },{ 1, 0 },{ 1, -1 },{ 0, -1 },{ -1, -1 },{ -1, 0 },{ -1, 1 } }; //图像空间分配 unsigned char *ImgHostdata = new unsigned char[Devpar.ImgWidth* Devpar.ImgHeight*Devpar.PictureNum]; //qwt这里程序有BUG unsigned char *m_ptr = new unsigned char[Devpar.ImgWidth* Devpar.ImgHeight*Devpar.PictureNum];//二值化图 unsigned char *n_ptr = new unsigned char[Devpar.ImgWidth* Devpar.ImgHeight*Devpar.PictureNum];//膨胀图 unsigned char *c_ptr = new unsigned char[Devpar.ImgWidth* Devpar.ImgHeight*Devpar.PictureNum];//轮廓图 unsigned char *temp_ptr = new unsigned char[Devpar.ImgWidth* Devpar.ImgHeight*Devpar.PictureNum];//临时变量图 RecupdataInitialSuccessFlag = true; while (ExtractPointSuccess == false)//这里应该加监听(使得提取包围盒可以结束而不是死循环)**************************qwt10.26 { if (HostUpdateRec)//如果缓冲区里面的数据更新了一次 ,则提取包围盒 { vector<RecData>myTempRec; memcpy(ImgHostdata, gRecupImgData, sizeof(unsigned char)*Devpar.ImgWidth* Devpar.ImgHeight*Devpar.PictureNum);//这个是在内存区域拷贝图像 //二值化 for (int i = 0; i < Devpar.ImgHeight*Devpar.PictureNum; i++) { for (int j = 0; j < Devpar.ImgWidth; j++) { m_ptr[j + i * Devpar.ImgWidth] = ImgHostdata[j + i * Devpar.ImgWidth] > Devpar.Threshold ? 255 : 0; c_ptr[j + i * Devpar.ImgWidth] = m_ptr[j + i * Devpar.ImgWidth]; n_ptr[j + i * Devpar.ImgWidth] = m_ptr[j + i * Devpar.ImgWidth]; temp_ptr[j + i * Devpar.ImgWidth] = m_ptr[j + i * Devpar.ImgWidth]; } } //膨胀 for (int i = 1; i < Devpar.ImgHeight*Devpar.PictureNum - 1; i++) for (int j = 1; j < Devpar.ImgWidth - 1; j++) { if (m_ptr[j + i * Devpar.ImgWidth] == 0) { if (m_ptr[j - 1 + (i - 1)*Devpar.ImgWidth] != 0 || m_ptr[j + (i - 1)*Devpar.ImgWidth] != 0 || m_ptr[j + 1 + (i - 1)*Devpar.ImgWidth] != 0 || m_ptr[j - 1 + i * Devpar.ImgWidth] != 0 || m_ptr[j + 1 + i * Devpar.ImgWidth] != 0 || m_ptr[j - 1 + (i + 1)*Devpar.ImgWidth] != 0 || m_ptr[j + (i + 1)*Devpar.ImgWidth] != 0 || m_ptr[j + 1 + (i + 1)*Devpar.ImgWidth] != 0) { n_ptr[j + i * Devpar.ImgWidth] = 255; c_ptr[j + i * Devpar.ImgWidth] = 255; temp_ptr[j + i * Devpar.ImgWidth] = 255; } } } //腐蚀 c_ptr是轮廓 for (int i = 1; i < Devpar.ImgHeight*Devpar.PictureNum - 1; i++) for (int j = 1; j < Devpar.ImgWidth - 1; j++) { if (n_ptr[j + i * Devpar.ImgWidth] != 0) { if (n_ptr[j + (i - 1)*Devpar.ImgWidth] != 0 && n_ptr[j - 1 + i * Devpar.ImgWidth] != 0 && n_ptr[j + 1 + i * Devpar.ImgWidth] != 0 && n_ptr[j + (i + 1)*Devpar.ImgWidth] != 0) { c_ptr[j + i * Devpar.ImgWidth] = 0; temp_ptr[j + i * Devpar.ImgWidth] = 0; } } } //方位盒 short xmax; short xmin; short ymax; short ymin; // 边缘跟踪 int i, j, counts = 0, curr_d = 0;//counts用于循环计数 curr_d是方向数组的索引ID short cLength; //提取方位盒子 for (i = 1; i < Devpar.ImgHeight*Devpar.PictureNum - 1; i++) for (j = 1; j < Devpar.ImgWidth - 1; j++) { // 起始点及当前点 cv::Point b_pt = cv::Point(i, j); cv::Point c_pt = cv::Point(i, j); // 如果当前点为前景点 if (255 == c_ptr[j + i * Devpar.ImgWidth]) { cLength = 1; xmin = xmax = i; ymin = ymax = j; /* bool first_t = false;*/ bool tra_flag = false;//设置标志位 c_ptr[j + i * Devpar.ImgWidth] = 0;// 用过的点直接给设置为0 while (!tra_flag)// 进行跟踪 { // 循环八次 for (counts = 0; counts < 8; counts++) { // 防止索引出界 if (curr_d >= 8) { curr_d -= 8; } if (curr_d < 0) { curr_d += 8; } // 跟踪的过程,应该是个连续的过程,需要不停的更新搜索的root点 c_pt = cv::Point(b_pt.x + directions[curr_d].x, b_pt.y + directions[curr_d].y); // 边界判断 if ((c_pt.x > 0) && (c_pt.x < Devpar.ImgHeight*Devpar.PictureNum - 1) && (c_pt.y > 0) && (c_pt.y < Devpar.ImgWidth - 1)) { // 如果存在边缘 if (255 == c_ptr[c_pt.x*Devpar.ImgWidth + c_pt.y]) { //更新包围盒 xmax = xmax > c_pt.x ? xmax : c_pt.x; ymax = ymax > c_pt.y ? ymax : c_pt.y; xmin = xmin < c_pt.x ? xmin : c_pt.x; ymin = ymin < c_pt.y ? ymin : c_pt.y; curr_d -= 2; //更新当前方向 c_ptr[c_pt.x*Devpar.ImgWidth + c_pt.y] = 0; // 更新b_pt:跟踪的root点 b_pt.x = c_pt.x; b_pt.y = c_pt.y; cLength++; break; // 跳出for循环 } } curr_d++; } // end for // 跟踪的终止条件:如果8邻域都不存在边缘 if (8 == counts) { // 清零 curr_d = 0; tra_flag = true; //筛选方位盒 if (cLength < Devpar.LengthMax && (cLength > Devpar.LengthMin)) { RecData tempRecData; int tempcount = 0; if (0.7<double(xmax - xmin) / double(ymax - ymin) < 1.5)//高/宽 { for (int k = xmin; k <= xmax; k++)//判断Height方向 { tempcount += temp_ptr[(ymax + ymin) / 2 + k*Devpar.ImgWidth] > 0 ? 1 : 0; } for (int k = ymin; k <= ymax; k++)//判断width方向 { tempcount += temp_ptr[k + (xmax + xmin) / 2 * Devpar.ImgWidth] > 0 ? 1 : 0; } if (tempcount <= 4) { if (xmin - Devpar.RecPadding < 0) tempRecData.RecXmin = 0; else tempRecData.RecXmin = xmin - Devpar.RecPadding; if (ymin - Devpar.RecPadding < 0) tempRecData.RecYmin = 0; else tempRecData.RecYmin = ymin - Devpar.RecPadding; if (xmax + Devpar.RecPadding > Devpar.ImgHeight*Devpar.PictureNum - 1) tempRecData.RecXmax = Devpar.ImgHeight*Devpar.PictureNum - 1; else tempRecData.RecXmax = xmax + Devpar.RecPadding; if (ymax + Devpar.RecPadding > Devpar.ImgWidth) tempRecData.RecYmax = Devpar.ImgWidth - 1; else tempRecData.RecYmax = ymax + Devpar.RecPadding; myTempRec.push_back(tempRecData); } } } break; } } // end if } // end while } //规整方位盒数量,利用后续线程配置 gSingleImgRecNum = myTempRec.size() / Devpar.PictureNum;//单张图方位盒数量 int rRecNum = (myTempRec.size() + 127) / 128 * 128; myTempRec.resize(gRecNum, RecData{ 0,0,0,0 }); if (gRecNum != 0) { memcpy(&gHostRecData[0], &myTempRec[0], sizeof(RecData)*gRecNum); for (int m = 0; m < HardwareParam.DeviceCount; m++) { DevUpdateRec[m] = true; } } HostUpdateRec = false; } } //释放内存 delete[]ImgHostdata; delete[]m_ptr; delete[]n_ptr; delete[]c_ptr; delete[]temp_ptr; } }; /*----------------------------------实现彩图压缩功能的类----------------------------------*/ class TC : public Runnable { public: HardwareInfo param; //硬件参数 unsigned char* my_in; //显存中的原始位图数据 needmemory memory; //压缩程序所需显存 needdata staticdata; static int mTCindex; unsigned char* total_malloc; //每一包二进制文件占用内存 int pix_index; public: void mydelay(double sec)//延时函数,用于图像数据缓冲区的更新 { clock_t start_time, cur_time; start_time = clock(); do { cur_time = clock(); } while (double(cur_time - start_time) / CLOCKS_PER_SEC < sec); } /************************************************************************************************* Function: void Initialize() Description: 用来初始化数据结构和分配显存空间的成员函数 Calls: cudaMalloc()、nppiDCTInitAlloc()、cudaMemcpyAsync()、cudaMallocPitch()、 nppiEncodeHuffmanSpecInitAlloc_JPEG(),它们都是cuda库中的函数 Input: 无 Output: 无 ***************************************************************************************************/ void Initialize() { //cudaMalloc((void**)&(this->my_in), imgHeight * imgWidth * sizeof(unsigned char) * 3); //为my_in分配显存空间 cudaMalloc((void**)&(this->my_in), compress_old_Height * compress_old_Width * sizeof(unsigned char) * 3); nppiDCTInitAlloc(&(this->memory).pDCTState); //为memory.pDCTState分配显存空间 cudaMalloc(&(this->staticdata).pdQuantizationTables, 64 * 4); //staticdata.pdQuantizationTables分配显存空间 float nScaleFactor; nScaleFactor = 1.0f; int nMCUBlocksH = 0; int nMCUBlocksV = 0; quantityassgnment(); for (int i = 0; i < oFrameHeader.nComponents; ++i) { nMCUBlocksV = max(nMCUBlocksV, oFrameHeader.aSamplingFactors[i] & 0x0f); nMCUBlocksH = max(nMCUBlocksH, oFrameHeader.aSamplingFactors[i] >> 4); } Npp8u aZigzag[] = { 0, 1, 5, 6, 14, 15, 27, 28, 2, 4, 7, 13, 16, 26, 29, 42, 3, 8, 12, 17, 25, 30, 41, 43, 9, 11, 18, 24, 31, 40, 44, 53, 10, 19, 23, 32, 39, 45, 52, 54, 20, 22, 33, 38, 46, 51, 55, 60, 21, 34, 37, 47, 50, 56, 59, 61, 35, 36, 48, 49, 57, 58, 62, 63 }; for (int i = 0; i < 4; ++i) { Npp8u temp[64]; for (int k = 0; k < 32; ++k) { temp[2 * k + 0] = aQuantizationTables[i].aTable[aZigzag[k + 0]]; temp[2 * k + 1] = aQuantizationTables[i].aTable[aZigzag[k + 32]]; } cudaMemcpyAsync((unsigned char *)(this->staticdata).pdQuantizationTables + i * 64, temp, 64, cudaMemcpyHostToDevice); } float frameWidth = floor((float)oFrameHeader.nWidth * (float)nScaleFactor); float frameHeight = floor((float)oFrameHeader.nHeight * (float)nScaleFactor); (this->staticdata).oDstImageSize.width = (int)max(1.0f, frameWidth); (this->staticdata).oDstImageSize.height = (int)max(1.0f, frameHeight); size_t newPitch[3]; NppiSize oBlocks; for (int i = 0; i < oFrameHeader.nComponents; ++i) //根据图像大小计算一些参数,之后在DCT变换和Huffman编码中要用到 { //NppiSize oBlocks; NppiSize oBlocksPerMCU = { oFrameHeader.aSamplingFactors[i] & 0x0f, oFrameHeader.aSamplingFactors[i] >> 4 }; oBlocks.width = (int)ceil(((this->staticdata).oDstImageSize.width + 7) / 8 * static_cast<float>(oBlocksPerMCU.width) / nMCUBlocksH); oBlocks.width = DivUp(oBlocks.width, oBlocksPerMCU.width) * oBlocksPerMCU.width; oBlocks.height = (int)ceil(((this->staticdata).oDstImageSize.height + 7) / 8 * static_cast<float>(oBlocksPerMCU.height) / nMCUBlocksV); oBlocks.height = DivUp(oBlocks.height, oBlocksPerMCU.height) * oBlocksPerMCU.height; (this->staticdata).aDstSize[i].width = oBlocks.width * 8; (this->staticdata).aDstSize[i].height = oBlocks.height * 8; } // Scale to target image size // Assume we only deal with 420 images. int aSampleFactor[3] = { 1, 2, 2 }; (this->memory).nScanSize = (this->staticdata).oDstImageSize.width * (this->staticdata).oDstImageSize.height * 2; (this->memory).nScanSize = (this->memory).nScanSize > (4 << 20) ? (this->memory).nScanSize : (4 << 20); cudaMalloc(&(this->memory).pDScan, (this->memory).nScanSize); //为memory.pDScan分配显存空间 nppiEncodeHuffmanGetSize((this->staticdata).aDstSize[0], 3, &(this->memory).nTempSize); cudaMalloc(&(this->memory).pDJpegEncoderTemp, (this->memory).nTempSize); //为memory.pDJpegEncoderTemp分配显存空间 for (int j = 0; j < 3; j++) { size_t nPitch1; cudaMallocPitch(&(this->memory).pDCT[j], &nPitch1, oBlocks.width * 64 * sizeof(Npp16s), oBlocks.height); //为memory.pDCT分配内存空间 (this->memory).DCTStep[j] = static_cast<Npp32s>(nPitch1); cudaMallocPitch(&(this->memory).pDImage[j], &nPitch1, (this->staticdata).aDstSize[j].width, (this->staticdata).aDstSize[j].height); //为memory.pDImage分配显存空间 (this->memory).DImageStep[j] = static_cast<Npp32s>(nPitch1); dataduiqi[j] = nPitch1; } for (int i = 0; i < 3; ++i) //初始化显存中的staticdata.apDHuffmanDCTable 和 staticdata.apDHuffmanACTable { nppiEncodeHuffmanSpecInitAlloc_JPEG(pHuffmanDCTables[(oScanHeader.aHuffmanTablesSelector[i] >> 4)].aCodes, nppiDCTable, &(this->staticdata).apDHuffmanDCTable[i]); nppiEncodeHuffmanSpecInitAlloc_JPEG(pHuffmanACTables[(oScanHeader.aHuffmanTablesSelector[i] & 0x0f)].aCodes, nppiACTable, &(this->staticdata).apDHuffmanACTable[i]); } for (int iComponent = 0; iComponent < 2; ++iComponent) { (this->memory).hpCodesDC[iComponent] = pHuffmanDCTables[iComponent].aCodes; (this->memory).hpCodesAC[iComponent] = pHuffmanACTables[iComponent].aCodes; (this->memory).hpTableDC[iComponent] = pHuffmanDCTables[iComponent].aTable; (this->memory).hpTableAC[iComponent] = pHuffmanACTables[iComponent].aTable; } } /************************************************************************************************* Function: void process() Description: 首先调用jpegNPP工程的nppiDCTQuantFwd8x8LS_JPEG_8u16s_C1R_NEW函数, 作用是对memory.pDImage中的照片YUV数据进行DCT变换和量化,并将结果保存在memory.pDCT中; 之后调用jpegNPP工程的nppiEncodeOptimizeHuffmanScan_JPEG_8u16s_P3R函数, 作用是对经过DCT变换后的图像数据memory.pDCT进行霍夫曼编码,编码后的数据在memory.pDScan中保存,等待写入磁盘。 Calls: nppiDCTQuantFwd8x8LS_JPEG_8u16s_C1R_NEW()、nppiEncodeOptimizeHuffmanScan_JPEG_8u16s_P3R(),它们都是cuda库中的函数 Input: 无 Output: 无 ***************************************************************************************************/ void process() { for (int i = 0; i < 3; ++i) //对YCbCr三个通道的图片数据进行DCT变换 { nppiDCTQuantFwd8x8LS_JPEG_8u16s_C1R_NEW((this->memory).pDImage[i], (this->memory).DImageStep[i], (this->memory).pDCT[i], (this->memory).DCTStep[i], (this->staticdata).pdQuantizationTables + oFrameHeader.aQuantizationTableSelector[i] * 64, (this->staticdata).aDstSize[i], (this->memory).pDCTState); } nppiEncodeOptimizeHuffmanScan_JPEG_8u16s_P3R((this->memory).pDCT, (this->memory).DCTStep, //进行霍夫曼编码操作 0, oScanHeader.nSs, oScanHeader.nSe, oScanHeader.nA >> 4, oScanHeader.nA & 0x0f, (this->memory).pDScan, &(this->memory).nScanLength, (this->memory).hpCodesDC, (this->memory).hpTableDC, (this->memory).hpCodesAC, (this->memory).hpTableAC, (this->staticdata).apDHuffmanDCTable, (this->staticdata).apDHuffmanACTable, (this->staticdata).aDstSize, (this->memory).pDJpegEncoderTemp); } /************************************************************************************************* Function: void writedisk() Description: 完成jpg图片写入磁盘的工作。 先把JFIF标签、aQuantizationTables标准量化表、oFrameHeader结构头、 标准霍夫曼编码表、oScanHeader扫描头写入文件作为jpg图片的文件头, 之后将memory.pDScan编码后的数据输入写入文件,组成完整的.jpg图片。 Calls: writeMarker()、writeJFIFTag()、writeQuantizationTable()、writeHuffmanTable() 这些函数定义在useful.h中 Input: 无 Output: 无 ***************************************************************************************************/ //void writedisk(char* OutputFile) void writedisk(int picture_num, Package* a, int bag_index) { unsigned char *pDstJpeg = new unsigned char[(this->memory).nScanSize]; //为每一张.jpg图片数据开辟缓冲区 unsigned char *pDstOutput = pDstJpeg; oFrameHeader.nWidth = (this->staticdata).oDstImageSize.width; oFrameHeader.nHeight = (this->staticdata).oDstImageSize.height; writeMarker(0x0D8, pDstOutput); writeJFIFTag(pDstOutput); writeQuantizationTable(aQuantizationTables[0], pDstOutput); //写入标准量化表 writeQuantizationTable(aQuantizationTables[1], pDstOutput); writeFrameHeader(oFrameHeader, pDstOutput); writeHuffmanTable(pHuffmanDCTables[0], pDstOutput); //写入霍夫曼编码表 writeHuffmanTable(pHuffmanACTables[0], pDstOutput); writeHuffmanTable(pHuffmanDCTables[1], pDstOutput); writeHuffmanTable(pHuffmanACTables[1], pDstOutput); writeScanHeader(oScanHeader, pDstOutput); cudaMemcpy(pDstOutput, (this->memory).pDScan, (this->memory).nScanLength, cudaMemcpyDeviceToHost); pDstOutput += (this->memory).nScanLength; writeMarker(0x0D9, pDstOutput); char szOutputFiler[100]; sprintf_s(szOutputFiler, "%s\\%d.jpg", gStructVarible.ImgSavePath, picture_num); memcpy(total_malloc + pix_index, pDstJpeg, static_cast<int>(pDstOutput - pDstJpeg)); //将这一整张.jpg图片数据拷贝到大的内存区total_malloc pix_index += static_cast<int>(pDstOutput - pDstJpeg); //a->Form_one_head(bag_index / gStructVarible.PictureNum, szOutputFiler, pDstOutput - pDstJpeg); a->Form_one_head(bag_index / gStructVarible.PictureNum, picture_num, pDstOutput - pDstJpeg); //完成一张.jpg图片对应的包头, bag_index / gStructVarible.PictureNum代表这是第几张图 //{ //Write result to file. //std::ofstream outputFile1(OutputFile, ios::out | ios::binary); //outputFile1.write(reinterpret_cast<const char *>(pDstJpeg), static_cast<int>(pDstOutput - pDstJpeg)); //} delete[] pDstJpeg; } /************************************************************************************************* Function: void memoryfree() Description: 程序结束后,释放之前分配好的显存空间 Calls: cudaFree()、nppiEncodeHuffmanSpecFree_JPEG()、nppiDCTFree() 它们都是cuda库中的函数 Input: 无 Output: 无 ***************************************************************************************************/ void memoryfree() //释放之前申请的内存和显存 { cudaFree(this->my_in); for (int i = 0; i < 3; ++i) { cudaFree((this->memory).pDCT[i]); cudaFree((this->memory).pDImage[i]); nppiEncodeHuffmanSpecFree_JPEG((this->staticdata).apDHuffmanDCTable[i]); nppiEncodeHuffmanSpecFree_JPEG((this->staticdata).apDHuffmanACTable[i]); } nppiDCTFree((this->memory).pDCTState); cudaFree((this->memory).pDJpegEncoderTemp); cudaFree((this->memory).pDScan); cudaFree((this->staticdata).pdQuantizationTables); } ~TC() {} /************************************************************************************************* Function: Run() Description: 是多线程类T运行的入口函数,整个压缩模块从这里开始运行 Calls: 依次调用了Initialize()、RGBtoYUV <<<blocks, threads >>>、process()、 writedisk(szOutputFile)和memoryfree() Input: 无 Output: 无 ***************************************************************************************************/ void Run() { char ImgoutputPath[255]; total_malloc = new unsigned char[100000000]; pix_index = 0; char szOutputFile[100]; clock_t start, end; int img_index; //图像索引 int mFlagIndex = 0; int OutPutInitialIndex = 0; //输出的Bin文件初始索引号 int Bufferoffset = 0; //缓冲区偏移量 bool DatafullFlag = false; //标志位:当为true的时候,表示该GPU对应的两个缓冲区中,至少有一个有有效数据。 cudaSetDevice((this->param).GpuId); this->Initialize(); cout << "T GPU :" << param.GpuId << " initial success!" << endl; while (!ExtractPointSuccess) { mydelay(0.01); img_index = 0; //图像计数 Bufferoffset = 0; //获取数据 while (true) { gComressReadDataLock.lock(); mTCindex = mTCindex % (HardwareParam.DeviceCount + 1); if (gComressionBufferEmpty[mTCindex] == false && gComressionBufferWorking[mTCindex] == false) { //将页锁内存标志位置为工作状态--进行绑定 gComressionBufferWorking[mTCindex] = true; OutPutInitialIndex = gComressionBufferStartIndex[mTCindex] * Bufferlength;//获取图像首索引 mFlagIndex = mTCindex; DatafullFlag = true; mTCindex++; gComressReadDataLock.unlock(); break; } mTCindex++; gComressReadDataLock.unlock(); if (ExtractPointSuccess) break; } start = clock(); sprintf_s(ImgoutputPath, "%s\\%d.bin", gStructVarible.ImgSavePath, OutPutInitialIndex); cout << ImgoutputPath << endl; //Package data_bag(ImgoutputPath, Bufferlength / gStructVarible.PictureNum); Package data_bag(ImgoutputPath); data_bag.Package_init(Bufferlength / gStructVarible.PictureNum); //压缩pImg图片 while (DatafullFlag) { if (img_index >= Bufferlength) { end = clock(); gComressReadDataLock.lock(); gComressionBufferWorking[mFlagIndex] = false; gComressReadDataLock.unlock(); gComressionBufferEmpty[mFlagIndex] = true; DatafullFlag = false; compress_write_lock.lock(); data_bag.file.open(data_bag.Fname, ios::out | ios::binary); //50张.jpg完成后,打开一个二进制文件 //data_bag.Form_total_head(); //完成所有50张图片的包头信息 data_bag.Form_total_head(compress_imgWidth, compress_imgHeight, gStructVarible.PictureNum, OutPutInitialIndex); data_bag.file.write(data_bag.head_cache, data_bag.head_bias); //写入所有包头 data_bag.file.write(reinterpret_cast<const char *>(total_malloc), static_cast<int>(pix_index)); //写入所有数据 data_bag.file.close(); //data_bag.UnPack(data_bag.Fname); compress_write_lock.unlock(); memset(total_malloc, 0, 100000000); //缓冲区清空 pix_index = 0; //缓冲区索引归零 break; } //sprintf_s(szOutputFile, "%s\\%d.jpg", gStructVarible.ImgSavePath, OutPutInitialIndex + img_index); int picture_index = OutPutInitialIndex + img_index; Bufferoffset = gStructVarible.ImgWidth * gStructVarible.ImgHeight * gStructVarible.PictureNum * 3; cudaMemcpy(this->my_in, gHostComressiongBuffer[mFlagIndex] + Bufferoffset, compress_old_Width * compress_old_Height * sizeof(unsigned char) * 3, cudaMemcpyHostToDevice); RGBtoYUV << <blocks, threads >> > (this->my_in, (this->memory).pDImage[0], (this->memory).pDImage[2], (this->memory).pDImage[1], compress_imgHeight, compress_imgWidth, dataduiqi[0], compress_old_Height, compress_old_Width); this->process(); this->writedisk(picture_index, &data_bag, img_index); //img_index++; img_index = img_index + gStructVarible.PictureNum; //picture_index是一次实验总的图片标号 } } delete[] total_malloc; this->memoryfree(); } }; int TC::mTCindex = 0; //----------------------------------实现灰度图压缩功能的类------------------------------------------// //核函数自编版 class T : public Runnable //由于该类的实现和前者TC类及其相似,所以不再进行注释 { public: HardwareInfo param; //硬件参数 gpuneedmemory memory[GRAYCompressStreams]; static int mTindex; needconstdata staticdata; //压缩过程中用到的常量数据 RIM ImageSize; //记录图像大小,和每个图像分量对齐后的对齐宽度 size_t Org_Pitch; //记录原始图像数据对齐后的宽度 int h_MCUtotal; //单个图像分量8*8像素块总量 cudaStream_t stream[GRAYCompressStreams]; //申明CUDA流 int stridef; cpuneedmemory cpumemory[GRAYCompressStreams]; //存放压缩图像过程中CPU上的原始图像位图数据和最终编码图像数据 unsigned char* total_malloc; //打包数据在内存中的缓存空间 int pix_index; public: void mydelay(double sec) //延时函数,用于图像数据缓冲区的更新 { clock_t start_time, cur_time; start_time = clock(); do { cur_time = clock(); } while (double(cur_time - start_time) / CLOCKS_PER_SEC < sec); } //************************************************************************************************* //*Function: void Initialize() //*Description: 用来初始化数据结构和分配显存空间的成员函数 //*Calls: cudaMalloc()、cudaMemcpyAsync()、cudaMallocPitch()它们都是cuda库中的函数 //*Input: ImageSize 记录图像数据大小,用于分配显存与内存 //*Output: 无 //*************************************************************************************************** void Initialize() { size_t nPitch; this->stridef = ALIGN(compress_old_Width, 4); (this->ImageSize).width = ALIGN(compress_old_Width, 8); (this->ImageSize).height = ALIGN(compress_old_Height, 8); int h_MCUtotal = (this->ImageSize).height*(this->ImageSize).width / 64; int ARRAY_SIZE = ALIGN(h_MCUtotal + 1025, 1024); int ARRAY_SIZE1 = ALIGN(h_MCUtotal / 1024 + 1025, 1024); //为最后编码的图像数据确定内存大小 (this->staticdata).nScanSize = (this->ImageSize).width * (this->ImageSize).height * 2; (this->staticdata).nScanSize = (this->staticdata).nScanSize > (10 << 20) ? (this->staticdata).nScanSize : (10 << 20); for (int i = 0; i < GRAYCompressStreams; i++) { //为每一个流分配显存与内存 cudaMallocPitch((void **)&(this->memory[i].d_bsrc), &(this->ImageSize.StrideF), (this->ImageSize).width * sizeof(BYTE), (this->ImageSize).height); //为my_in分配显存空间 cudaMallocPitch((void **)&(this->memory[i].d_ydst), &nPitch, (this->ImageSize).width * (this->ImageSize).height * sizeof(BSI16), 1); cudaMallocPitch((void **)&(this->memory[i].d_JPEGdata), &nPitch, (this->ImageSize).width * sizeof(BYTE)*(this->ImageSize).height, 1); cudaMalloc((void **)&(this->memory[i].last_JPEGdata), (10 << 20)); cudaMalloc((void **)&(this->memory[i].prefix_num), ARRAY_SIZE * sizeof(int)); cudaMalloc((void **)&(this->memory[i].last_prefix_num), ARRAY_SIZE * sizeof(int)); cudaMalloc((void **)&(this->memory[i].dc_component), ARRAY_SIZE * sizeof(int)); cudaMalloc((void **)&(this->memory[i].d_blocksum), 768 * sizeof(int)); cudaMalloc((void **)&(this->memory[i].d_datalen), sizeof(int)); //创建CUDA流 cudaStreamCreate(&(this->stream[i])); //分配CPU内存 //cudaHostAlloc((BYTE**)&(this->cpumemory[i]).pDstJpeg, (this->staticdata).nScanSize, cudaHostAllocDefault); //最终编码数据 (this->cpumemory[i]).pDstJpeg = new unsigned char[(this->staticdata).nScanSize]; this->cpumemory[i].pDstOutput = this->cpumemory[i].pDstJpeg; } //-------------------------为灰度图像压缩配置常量数据-------------------- cudaMalloc(&(this->staticdata).DEV_STD_QUANT_TAB_LUMIN, 64 * sizeof(float)); cudaMalloc(&(this->staticdata).DEV_ZIGZAG, 64 * sizeof(int)); { //--------------------配置亮度量化表-------------------------------- float temp[64]; for (int i = 0; i<64; i++) { temp[i] = 1.0f / (float)STD_QUANT_TAB_LUMIN[i] * C_norm * C_norm; } cudaMemcpyAsync((this->staticdata).DEV_STD_QUANT_TAB_LUMIN, temp, 64 * sizeof(float), cudaMemcpyHostToDevice); } cudaMemcpyAsync((this->staticdata).DEV_ZIGZAG, aZIGZAG, 64 * sizeof(float), cudaMemcpyHostToDevice); { //----------------初始化huffman表 GPUjpeg_huffman_encoder_value_init_kernel << <32, 256 >> >(); // 8192 threads total // 创建GPU版本的Huffman表 ( CC >= 2.0) uint32_t gpujpeg_huffman_cpu_lut[(256 + 1) * 4]; memset(gpujpeg_huffman_cpu_lut, 0, (256 + 1) * 4 * sizeof(uint32_t)); Newhuffman_table_init(gpujpeg_huffman_cpu_lut + 257 * 0, STD_HUFTAB_LUMIN_AC, true); Newhuffman_table_init(gpujpeg_huffman_cpu_lut + 257 * 1, STD_HUFTAB_LUMIN_DC, false); Newhuffman_table_init(gpujpeg_huffman_cpu_lut + 257 * 2, STD_HUFTAB_CHROM_AC, true); Newhuffman_table_init(gpujpeg_huffman_cpu_lut + 257 * 3, STD_HUFTAB_CHROM_DC, false); cudaMemcpyToSymbol(gpujpeg_huffman_gpu_tab, gpujpeg_huffman_cpu_lut, (256 + 1) * 4 * sizeof(*gpujpeg_huffman_gpu_tab), 0, cudaMemcpyHostToDevice ); } } //************************************************************************************************** //**Function: void process() //**Description: 用来压缩图像的函数 //**Input: Size 记录图像数据大小,用于分配显存与内存 //**Output: 无 //*************************************************************************************************** void process() { const int ARRAY_SIZE = ImageSize.width * ImageSize.height; const int h_MCUtotal = ARRAY_SIZE / 64; //图像数据总的8*8MCU单元 const int Code_blocks = (h_MCUtotal + CODE_THREADS - 1) / CODE_THREADS; int Blocksums; int prexsum_blocks = 1; int prexsum_threads = (h_MCUtotal - 1) / CODE_THREADS; //prefix_sum前缀求和线程分配 int preSum_Blocks = (h_MCUtotal + 1023) / 1024; //DCT线程分配 dim3 DCT_blocks((ImageSize.width + 63) / DCT_BLOCK_WIDTH, ImageSize.height / DCT_BLOCK_HEIGHT); dim3 DCT_threads(8, 32 / 8, 2); dim3 Encode_thread(THREAD_WARP, 4); dim3 Encode_Blocks(gpujpeg_huffman_encoder_grid_size((h_MCUtotal + 3) / 4)); for (int i = 0; i < GRAYCompressStreams; i++) { CUDA_DCT8_kernel << <DCT_blocks, DCT_threads, 0, this->stream[i] >> >(this->memory[i].d_ydst, this->memory[i].d_bsrc, ImageSize, this->staticdata.DEV_ZIGZAG, this->staticdata.DEV_STD_QUANT_TAB_LUMIN); } for (int i = 0; i < GRAYCompressStreams; i++) { Data_codelength_kernel << <Encode_Blocks, Encode_thread, 0, this->stream[i] >> > (this->memory[i].d_ydst, h_MCUtotal, this->memory[i].d_JPEGdata, this->memory[i].prefix_num, 1, 0); //计算每个mcu比特流的具体位置,前缀求和算法 work_efficient_PrefixSum_kernel << <preSum_Blocks, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].prefix_num, this->memory[i].dc_component); if (h_MCUtotal <= PRESUM_THREADS * 512) { work_efficient_BlockUp_kernel << <1, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].dc_component); work_efficient_Adds_kernel << <(h_MCUtotal + 511) / 512, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].dc_component, this->memory[i].prefix_num); } else { work_efficient_PrefixSum_kernel << < ((h_MCUtotal - 1) / 512 + 1023) / 1024, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].dc_component, this->memory[i].d_blocksum); work_efficient_BlockUp_kernel << <1, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].d_blocksum); work_efficient_Adds_kernel << <((h_MCUtotal + 511) / 512 + 511) / 512, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].d_blocksum, this->memory[i].dc_component); work_efficient_Adds_kernel << <(h_MCUtotal + 511) / 512, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].dc_component, this->memory[i].prefix_num); } //对图像数据进行编码处理 data_shift_kernel << <Code_blocks, CODE_THREADS, 0, this->stream[i] >> >(this->memory[i].d_JPEGdata, this->memory[i].prefix_num, h_MCUtotal, this->memory[i].d_datalen, this->memory[i].dc_component, this->memory[i].last_prefix_num); //计算每个MCU BYTE流的具体位置,前缀求和算法 work_efficient_PrefixSum_kernel << <preSum_Blocks, PRESUM_THREADS, 0, this->stream[i] >> > (this->memory[i].last_prefix_num, this->memory[i].dc_component); if (h_MCUtotal <= PRESUM_THREADS * 512) { work_efficient_BlockUp_kernel << <1, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].dc_component); work_efficient_Adds_kernel << <(h_MCUtotal + 511) / 512, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].dc_component, this->memory[i].last_prefix_num); } else { work_efficient_PrefixSum_kernel << < ((h_MCUtotal - 1) / 512 + 1023) / 1024, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].dc_component, this->memory[i].d_blocksum); work_efficient_BlockUp_kernel << <1, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].d_blocksum); work_efficient_Adds_kernel << <((h_MCUtotal + 511) / 512 + 511) / 512, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].d_blocksum, this->memory[i].dc_component); work_efficient_Adds_kernel << <(h_MCUtotal + 511) / 512, PRESUM_THREADS, 0, this->stream[i] >> >(this->memory[i].dc_component, this->memory[i].last_prefix_num); } cudaMemsetAsync(this->memory[i].last_JPEGdata, 0, (10 << 20), this->stream[i]); Data_encodelater1_kernel << <Code_blocks, CODE_THREADS, 0, this->stream[i] >> >(this->memory[i].last_prefix_num, this->memory[i].d_JPEGdata, this->memory[i].last_JPEGdata, h_MCUtotal, this->memory[i].d_datalen); //得到图像数据编码长度 cudaMemcpyAsync(&this->cpumemory[i].dst_JPEGdatalength, (this->memory[i]).d_datalen, sizeof(int), cudaMemcpyDeviceToHost, this->stream[i]); } } void writedisk(int picture_num, Package* a, int bag_index) { for (int i = 0; i < GRAYCompressStreams; i++) { this->cpumemory[i].pDstOutput = this->cpumemory[i].pDstJpegDataStart; //编码后图像数据传输 cudaMemcpyAsync(this->cpumemory[i].pDstOutput, this->memory[i].last_JPEGdata, this->cpumemory[i].dst_JPEGdatalength, cudaMemcpyDeviceToHost, this->stream[i]); //-------------等待Stream流执行完成 cudaStreamSynchronize(this->stream[i]); this->cpumemory[i].pDstOutput += this->cpumemory[i].dst_JPEGdatalength; writeMarker(0x0D9, this->cpumemory[i].pDstOutput); //char szOutputFiler[100]; //sprintf_s(szOutputFiler, "%s\\%d.jpg", gStructVarible.ImgSavePath, picture_num); memcpy(total_malloc + pix_index, this->cpumemory[i].pDstJpeg, static_cast<int>(this->cpumemory[i].pDstOutput - this->cpumemory[i].pDstJpeg)); pix_index += static_cast<int>(this->cpumemory[i].pDstOutput - this->cpumemory[i].pDstJpeg); a->Form_one_head(bag_index / gStructVarible.PictureNum, picture_num, this->cpumemory[i].pDstOutput - this->cpumemory[i].pDstJpeg); picture_num = picture_num + gStructVarible.PictureNum; bag_index = bag_index + gStructVarible.PictureNum; } } void WriteJpgheader() { for (int i = 0; i < GRAYCompressStreams; i++) { writeMarker(0x0D8, this->cpumemory[i].pDstOutput); writeMarker(0x0DB, this->cpumemory[i].pDstOutput); writeWords(67, this->cpumemory[i].pDstOutput); writeChar(0, this->cpumemory[i].pDstOutput); for (int j = 0; j < 64; j++) { writeChar(STD_QUANT_TAB_LUMIN[ZIGZAG[j]], this->cpumemory[i].pDstOutput); } writeMarker(0x0DB, this->cpumemory[i].pDstOutput); writeWords(67, this->cpumemory[i].pDstOutput); writeChar(1, this->cpumemory[i].pDstOutput); for (int j = 0; j < 64; j++) { writeChar(STD_QUANT_TAB_CHROM[ZIGZAG[j]], this->cpumemory[i].pDstOutput); } writeMarker(0x0C0, this->cpumemory[i].pDstOutput); unsigned short len = 2 + 1 + 2 + 2 + 1 + 3 * 3; //3是颜色分量数 writeWords(len, this->cpumemory[i].pDstOutput); writeChar(8, this->cpumemory[i].pDstOutput); writeWords(compress_old_Height, this->cpumemory[i].pDstOutput); writeWords(compress_old_Width, this->cpumemory[i].pDstOutput); writeChar(3, this->cpumemory[i].pDstOutput); writeChar(1, this->cpumemory[i].pDstOutput); writeChar((1 << 0) | (1 << 4), this->cpumemory[i].pDstOutput); writeChar(0, this->cpumemory[i].pDstOutput); writeChar(2, this->cpumemory[i].pDstOutput); writeChar((1 << 0) | (1 << 4), this->cpumemory[i].pDstOutput); writeChar(1, this->cpumemory[i].pDstOutput); writeChar(3, this->cpumemory[i].pDstOutput); writeChar((1 << 0) | (1 << 4), this->cpumemory[i].pDstOutput); writeChar(1, this->cpumemory[i].pDstOutput); //********************************************************************************************* // output DHT AC 0xC4 霍夫曼(Huffman)表 writeMarker(0x0C4, this->cpumemory[i].pDstOutput); len = 2 + 1 + 16 + 162; writeWords(len, this->cpumemory[i].pDstOutput); writeChar(0 + 0x10, this->cpumemory[i].pDstOutput); memcpy(this->cpumemory[i].pDstOutput, STD_HUFTAB_LUMIN_AC, len - 3); this->cpumemory[i].pDstOutput += len - 3; writeMarker(0x0C4, this->cpumemory[i].pDstOutput); len = 2 + 1 + 16 + 162; writeWords(len, this->cpumemory[i].pDstOutput); writeChar(1 + 0x10, this->cpumemory[i].pDstOutput); memcpy(this->cpumemory[i].pDstOutput, STD_HUFTAB_CHROM_AC, len - 3); this->cpumemory[i].pDstOutput += len - 3; // output DHT DC 0xC4 霍夫曼(Huffman)表 writeMarker(0x0C4, this->cpumemory[i].pDstOutput); len = 2 + 1 + 16 + 12; writeWords(len, this->cpumemory[i].pDstOutput); writeChar(0 + 0x00, this->cpumemory[i].pDstOutput); memcpy(this->cpumemory[i].pDstOutput, STD_HUFTAB_LUMIN_DC, len - 3); this->cpumemory[i].pDstOutput += len - 3; writeMarker(0x0C4, this->cpumemory[i].pDstOutput); len = 2 + 1 + 16 + 12; writeWords(len, this->cpumemory[i].pDstOutput); writeChar(1 + 0x00, this->cpumemory[i].pDstOutput); memcpy(this->cpumemory[i].pDstOutput, STD_HUFTAB_CHROM_DC, len - 3); this->cpumemory[i].pDstOutput += len - 3; // output SOS 0xDA  扫描线开始 len = 2 + 1 + 2 * 3 + 3; writeMarker(0x0DA, this->cpumemory[i].pDstOutput); writeWords(len, this->cpumemory[i].pDstOutput); writeChar(3, this->cpumemory[i].pDstOutput); writeChar(1, this->cpumemory[i].pDstOutput); writeChar((0 << 0) | (0 << 4), this->cpumemory[i].pDstOutput); writeChar(2, this->cpumemory[i].pDstOutput); writeChar((1 << 0) | (1 << 4), this->cpumemory[i].pDstOutput); writeChar(3, this->cpumemory[i].pDstOutput); writeChar((1 << 0) | (1 << 4), this->cpumemory[i].pDstOutput); writeChar(0x00, this->cpumemory[i].pDstOutput); writeChar(0x3f, this->cpumemory[i].pDstOutput); writeChar(0x00, this->cpumemory[i].pDstOutput); this->cpumemory[i].pDstJpegDataStart = this->cpumemory[i].pDstOutput; } } //释放申请的显存与内存 void memoryfree() { for (int i = 0; i < GRAYCompressStreams; i++) { //释放显存 cudaFree(this->memory[i].d_bsrc); cudaFree(this->memory[i].d_ydst); cudaFree(this->memory[i].d_JPEGdata); cudaFree(this->memory[i].last_JPEGdata); cudaFree(this->memory[i].prefix_num); cudaFree(this->memory[i].last_prefix_num); cudaFree(this->memory[i].dc_component); cudaFree(this->memory[i].d_blocksum); cudaFree(this->memory[i].d_datalen); //cudaFree(this->cpumemory[i].pDstJpeg); delete[] this->cpumemory[i].pDstJpeg; } cudaFree(this->staticdata.DEV_STD_QUANT_TAB_LUMIN); cudaFree(this->staticdata.DEV_ZIGZAG); } ~T() {} void Run() { char ImgoutputPath[255]; total_malloc = new unsigned char[100000000]; pix_index = 0; clock_t start, end, end2; int img_index;//图像索引 int cudaStreams_imgindex = 0; //每个流的图像索引 int mFlagIndex = 0; int OutPutInitialIndex = 0; //输出的Bin文件初始索引号 int Bufferoffset = 0; //缓冲区偏移量 bool DatafullFlag = false;//标志位:当为true的时候,表示该GPU对应的两个缓冲区中,至少有一个有有效数据。 //测试读入图片是否成功------------------------------------------------------------------------------------- cv::Mat img1(5120, 5120, CV_8UC1); cudaSetDevice((this->param).GpuId); this->Initialize(); cout << "T GPU :" << param.GpuId << " initial success!" << endl; WriteJpgheader(); while (!ExtractPointSuccess) { mydelay(0.01); img_index = 0;//图像计数 Bufferoffset = 0; //绑定数据 while (true)//这里需要改,锁不能用和提点一样 { gComressReadDataLock.lock(); mTindex = mTindex % (HardwareParam.DeviceCount + 1); if (gComressionBufferEmpty[mTindex] == false && gComressionBufferWorking[mTindex] == false) { //将页锁内存标志位置为工作状态--进行绑定 gComressionBufferWorking[mTindex] = true; OutPutInitialIndex = gComressionBufferStartIndex[mTindex] * Bufferlength;//获取图像首索引 mFlagIndex = mTindex; DatafullFlag = true; mTindex++; gComressReadDataLock.unlock(); break; } mTindex++; gComressReadDataLock.unlock(); if (ExtractPointSuccess) break; } start = clock(); sprintf_s(ImgoutputPath, "%s\\%d.bin", gStructVarible.ImgSavePath, OutPutInitialIndex); Package data_bag(ImgoutputPath); data_bag.Package_init(Bufferlength / gStructVarible.PictureNum); //压缩pImg图片 while (DatafullFlag) { if (img_index >= Bufferlength) { end = clock(); gComressReadDataLock.lock(); gComressionBufferWorking[mFlagIndex] = false; gComressReadDataLock.unlock(); gComressionBufferEmpty[mFlagIndex] = true; DatafullFlag = false; //写磁盘 compress_write_lock.lock(); data_bag.file.open(data_bag.Fname, ios::out | ios::binary); //data_bag.Form_total_head(); data_bag.Form_total_head(compress_imgWidth, compress_imgHeight, gStructVarible.PictureNum, OutPutInitialIndex); //cout << OutPutInitialIndex << endl; data_bag.file.write(data_bag.head_cache, data_bag.head_bias); data_bag.file.write(reinterpret_cast<const char *>(total_malloc), static_cast<int>(pix_index)); data_bag.file.close(); //data_bag.UnPack(data_bag.Fname); compress_write_lock.unlock(); memset(total_malloc, 0, 100000000); pix_index = 0; end2 = clock(); //cout << "T GPU :" << param.GpuId << " Index" << OutPutInitialIndex << " 处理:" << double(end - start) / CLOCKS_PER_SEC <<" 总时间:"<< double(end2 - start) / CLOCKS_PER_SEC<< endl; break; } int picture_index = OutPutInitialIndex + img_index; //Bufferoffset = gStructVarible.ImgWidth * gStructVarible.ImgHeight * gStructVarible.PictureNum; Bufferoffset = gStructVarible.ImgWidth * gStructVarible.ImgHeight * img_index; //将图像数据传输到GPU for (int i = 0; i < GRAYCompressStreams; i++) { cudaMemcpy2DAsync(this->memory[i].d_bsrc, ImageSize.StrideF, gHostComressiongBuffer[mFlagIndex] + Bufferoffset, ImageSize.width * sizeof(unsigned char), ImageSize.width * sizeof(unsigned char), ImageSize.height, cudaMemcpyHostToDevice, this->stream[i]); Bufferoffset += gStructVarible.ImgWidth * gStructVarible.ImgHeight * gStructVarible.PictureNum; } this->process(); this->writedisk(picture_index, &data_bag, img_index); img_index = img_index + gStructVarible.PictureNum * GRAYCompressStreams; } } delete[] total_malloc; this->memoryfree(); for (int i = 0; i < GRAYCompressStreams; i++) //-------------销毁CUDA流 cudaStreamDestroy(this->stream[i]); } }; /*----------------------------------实现灰度图压缩功能的类--------------------------------*/ //npp库调用版 //class T : public Runnable //由于该类的实现和前者TC类及其相似,所以不再进行注释 //{ //public: // HardwareInfo param; //硬件参数 // needmemory memory; // needdata staticdata; // static int mTindex; // static int test_number; // unsigned char* total_malloc; // int pix_index; // // //public: // void mydelay(double sec)//延时函数,用于图像数据缓冲区的更新 // { // clock_t start_time, cur_time; // start_time = clock(); // do // { // cur_time = clock(); // } while (double(cur_time - start_time) / CLOCKS_PER_SEC < sec); // } // void Initialize() // { // //cudaMalloc((void**)&(this->my_in), imgHeight * imgWidth * sizeof(unsigned char) * 3); // nppiDCTInitAlloc(&(this->memory).pDCTState); // cudaMalloc(&(this->staticdata).pdQuantizationTables, 64 * 4); // // float nScaleFactor; // nScaleFactor = 1.0f; // int nMCUBlocksH = 0; // int nMCUBlocksV = 0; // quantityassgnment(); // // for (int i = 0; i < oFrameHeader.nComponents; ++i) // { // nMCUBlocksV = max(nMCUBlocksV, oFrameHeader.aSamplingFactors[i] & 0x0f); // nMCUBlocksH = max(nMCUBlocksH, oFrameHeader.aSamplingFactors[i] >> 4); // } // // // Npp8u aZigzag[] = { // 0, 1, 5, 6, 14, 15, 27, 28, // 2, 4, 7, 13, 16, 26, 29, 42, // 3, 8, 12, 17, 25, 30, 41, 43, // 9, 11, 18, 24, 31, 40, 44, 53, // 10, 19, 23, 32, 39, 45, 52, 54, // 20, 22, 33, 38, 46, 51, 55, 60, // 21, 34, 37, 47, 50, 56, 59, 61, // 35, 36, 48, 49, 57, 58, 62, 63 // }; // // for (int i = 0; i < 4; ++i) // { // Npp8u temp[64]; // // for (int k = 0; k < 32; ++k) // { // temp[2 * k + 0] = aQuantizationTables[i].aTable[aZigzag[k + 0]]; // temp[2 * k + 1] = aQuantizationTables[i].aTable[aZigzag[k + 32]]; // } // // cudaMemcpyAsync((unsigned char *)(this->staticdata).pdQuantizationTables + i * 64, temp, 64, cudaMemcpyHostToDevice); // // } // // float frameWidth = floor((float)oFrameHeader.nWidth * (float)nScaleFactor); // float frameHeight = floor((float)oFrameHeader.nHeight * (float)nScaleFactor); // // (this->staticdata).oDstImageSize.width = (int)max(1.0f, frameWidth); // (this->staticdata).oDstImageSize.height = (int)max(1.0f, frameHeight); // // size_t newPitch[3]; // NppiSize oBlocks; // // // for (int i = 0; i < oFrameHeader.nComponents; ++i) // { // //NppiSize oBlocks; // NppiSize oBlocksPerMCU = { oFrameHeader.aSamplingFactors[i] & 0x0f, oFrameHeader.aSamplingFactors[i] >> 4 }; // // oBlocks.width = (int)ceil(((this->staticdata).oDstImageSize.width + 7) / 8 * // static_cast<float>(oBlocksPerMCU.width) / nMCUBlocksH); // oBlocks.width = DivUp(oBlocks.width, oBlocksPerMCU.width) * oBlocksPerMCU.width; // // oBlocks.height = (int)ceil(((this->staticdata).oDstImageSize.height + 7) / 8 * // static_cast<float>(oBlocksPerMCU.height) / nMCUBlocksV); // oBlocks.height = DivUp(oBlocks.height, oBlocksPerMCU.height) * oBlocksPerMCU.height; // // (this->staticdata).aDstSize[i].width = oBlocks.width * 8; // (this->staticdata).aDstSize[i].height = oBlocks.height * 8; // } // // // // Scale to target image size // // Assume we only deal with 420 images. // int aSampleFactor[3] = { 1, 2, 2 }; // // (this->memory).nScanSize = (this->staticdata).oDstImageSize.width * (this->staticdata).oDstImageSize.height * 2; // (this->memory).nScanSize = (this->memory).nScanSize > (4 << 20) ? (this->memory).nScanSize : (4 << 20); // cudaMalloc(&(this->memory).pDScan, (this->memory).nScanSize); // nppiEncodeHuffmanGetSize((this->staticdata).aDstSize[0], 3, &(this->memory).nTempSize); // cudaMalloc(&(this->memory).pDJpegEncoderTemp, (this->memory).nTempSize); // // // for (int j = 0; j < 3; j++) { // size_t nPitch1; // cudaMallocPitch(&(this->memory).pDCT[j], &nPitch1, oBlocks.width * 64 * sizeof(Npp16s), oBlocks.height); // (this->memory).DCTStep[j] = static_cast<Npp32s>(nPitch1); // //NPP_CHECK_CUDA(cudaMallocPitch(&myImage1[j], &nPitch1, aSrcSize[j].width, aSrcSize[j].height)); 原来 // cudaMallocPitch(&(this->memory).pDImage[j], &nPitch1, (this->staticdata).aDstSize[j].width, (this->staticdata).aDstSize[j].height); // (this->memory).DImageStep[j] = static_cast<Npp32s>(nPitch1); // dataduiqi[j] = nPitch1; // // } // for (int i = 0; i < 3; ++i) // { // nppiEncodeHuffmanSpecInitAlloc_JPEG(pHuffmanDCTables[(oScanHeader.aHuffmanTablesSelector[i] >> 4)].aCodes, nppiDCTable, &(this->staticdata).apDHuffmanDCTable[i]); // nppiEncodeHuffmanSpecInitAlloc_JPEG(pHuffmanACTables[(oScanHeader.aHuffmanTablesSelector[i] & 0x0f)].aCodes, nppiACTable, &(this->staticdata).apDHuffmanACTable[i]); // } // // for (int iComponent = 0; iComponent < 2; ++iComponent) // { // (this->memory).hpCodesDC[iComponent] = pHuffmanDCTables[iComponent].aCodes; // (this->memory).hpCodesAC[iComponent] = pHuffmanACTables[iComponent].aCodes; // (this->memory).hpTableDC[iComponent] = pHuffmanDCTables[iComponent].aTable; // (this->memory).hpTableAC[iComponent] = pHuffmanACTables[iComponent].aTable; // } // } // void process() // { // compress_process_lock.lock(); // nppiDCTQuantFwd8x8LS_JPEG_8u16s_C1R_NEW((this->memory).pDImage[0], (this->memory).DImageStep[0], // (this->memory).pDCT[0], (this->memory).DCTStep[0], // (this->staticdata).pdQuantizationTables + oFrameHeader.aQuantizationTableSelector[0] * 64, // (this->staticdata).aDstSize[0], // (this->memory).pDCTState); // compress_process_lock.unlock(); // // // nppiEncodeOptimizeHuffmanScan_JPEG_8u16s_P3R((this->memory).pDCT, (this->memory).DCTStep, // 0, oScanHeader.nSs, oScanHeader.nSe, oScanHeader.nA >> 4, oScanHeader.nA & 0x0f, // (this->memory).pDScan, &(this->memory).nScanLength, // (this->memory).hpCodesDC, (this->memory).hpTableDC, (this->memory).hpCodesAC, (this->memory).hpTableAC, // (this->staticdata).apDHuffmanDCTable, // (this->staticdata).apDHuffmanACTable, // (this->staticdata).aDstSize, // (this->memory).pDJpegEncoderTemp); // // // } // // //void writedisk(int picture_num, Package* a, int bag_index, unsigned char*total_malloc, int& pix_index) // void writedisk(int picture_num, Package* a, int bag_index) // { // unsigned char *pDstJpeg = new unsigned char[(this->memory).nScanSize]; // unsigned char *pDstOutput = pDstJpeg; // // oFrameHeader.nWidth = (this->staticdata).oDstImageSize.width; // oFrameHeader.nHeight = (this->staticdata).oDstImageSize.height; // // writeMarker(0x0D8, pDstOutput); // writeJFIFTag(pDstOutput); // writeQuantizationTable(aQuantizationTables[0], pDstOutput); // writeQuantizationTable(aQuantizationTables[1], pDstOutput); // writeFrameHeader(oFrameHeader, pDstOutput); // writeHuffmanTable(pHuffmanDCTables[0], pDstOutput); // writeHuffmanTable(pHuffmanACTables[0], pDstOutput); // writeHuffmanTable(pHuffmanDCTables[1], pDstOutput); // writeHuffmanTable(pHuffmanACTables[1], pDstOutput); // writeScanHeader(oScanHeader, pDstOutput); // // cudaMemcpy(pDstOutput, (this->memory).pDScan, (this->memory).nScanLength, cudaMemcpyDeviceToHost); // pDstOutput += (this->memory).nScanLength; // // writeMarker(0x0D9, pDstOutput); // // char szOutputFiler[100]; // sprintf_s(szOutputFiler, "%s\\%d.jpg", gStructVarible.ImgSavePath, picture_num); // memcpy(total_malloc + pix_index, pDstJpeg, static_cast<int>(pDstOutput - pDstJpeg)); // pix_index += static_cast<int>(pDstOutput - pDstJpeg); // a->Form_one_head(bag_index / gStructVarible.PictureNum, szOutputFiler, pDstOutput - pDstJpeg); // // delete[] pDstJpeg; // // //Write result to file. // //std::ofstream outputFile1(OutputFile, ios::out | ios::binary); // //outputFile1.write(reinterpret_cast<const char *>(pDstJpeg), static_cast<int>(pDstOutput - pDstJpeg)); // //delete[] pDstJpeg; // } // void memoryfree() // { // //cudaFree(this->my_in); // for (int i = 0; i < 3; ++i) // { // cudaFree((this->memory).pDCT[i]); // cudaFree((this->memory).pDImage[i]); // nppiEncodeHuffmanSpecFree_JPEG((this->staticdata).apDHuffmanDCTable[i]); // nppiEncodeHuffmanSpecFree_JPEG((this->staticdata).apDHuffmanACTable[i]); // } // nppiDCTFree((this->memory).pDCTState); // cudaFree((this->memory).pDJpegEncoderTemp); // cudaFree((this->memory).pDScan); // cudaFree((this->staticdata).pdQuantizationTables); // } // ~T() {} // void Run() // { // char ImgoutputPath[255]; // total_malloc = new unsigned char[100000000]; // pix_index = 0; // clock_t start, end, end2; // int img_index;//图像索引 // int mFlagIndex = 0; // int OutPutInitialIndex = 0; //输出的Bin文件初始索引号 // int Bufferoffset = 0;//缓冲区偏移量 // bool DatafullFlag = false;//标志位:当为true的时候,表示该GPU对应的两个缓冲区中,至少有一个有有效数据。 // //测试读入图片是否成功------------------------------------------------------------------------------------------------------------ // cv::Mat img1(5120, 5120, CV_8UC1); // cudaSetDevice((this->param).GpuId); // this->Initialize(); // cudaMemcpy2D((this->memory).pDImage[1], dataduiqi[1], gpHudata, compress_imgWidth * sizeof(unsigned char), compress_imgWidth * sizeof(unsigned char), compress_imgHeight, cudaMemcpyHostToDevice); // cudaMemcpy2D((this->memory).pDImage[2], dataduiqi[2], gpHvdata, compress_imgWidth * sizeof(unsigned char), compress_imgWidth * sizeof(unsigned char), compress_imgHeight, cudaMemcpyHostToDevice); // // for (int i = 1; i < 3; ++i) // { // nppiDCTQuantFwd8x8LS_JPEG_8u16s_C1R_NEW((this->memory).pDImage[i], (this->memory).DImageStep[i], // (this->memory).pDCT[i], (this->memory).DCTStep[i], // (this->staticdata).pdQuantizationTables + oFrameHeader.aQuantizationTableSelector[i] * 64, // (this->staticdata).aDstSize[i], // (this->memory).pDCTState); // } // cout << "T GPU :" << param.GpuId << " initial success!" << endl; // // while (!ExtractPointSuccess) // { // mydelay(0.01); // img_index = 0;//图像计数 // Bufferoffset = 0; // //绑定数据 // while (true)//这里需要改,锁不能用和提点一样 // { // gComressReadDataLock.lock(); // mTindex = mTindex % (HardwareParam.DeviceCount + 1); // if (gComressionBufferEmpty[mTindex] == false && gComressionBufferWorking[mTindex] == false) // { // //将页锁内存标志位置为工作状态--进行绑定 // gComressionBufferWorking[mTindex] = true; // OutPutInitialIndex = gComressionBufferStartIndex[mTindex] * Bufferlength;//获取图像首索引 // mFlagIndex = mTindex; // DatafullFlag = true; // mTindex++; // gComressReadDataLock.unlock(); // break; // } // mTindex++; // gComressReadDataLock.unlock(); // if (ExtractPointSuccess) // break; // } // start = clock(); // sprintf_s(ImgoutputPath, "%s\\%d.bin", gStructVarible.ImgSavePath, OutPutInitialIndex); // //Package data_bag(ImgoutputPath, Bufferlength); // Package data_bag(ImgoutputPath, Bufferlength / gStructVarible.PictureNum); // //压缩pImg图片 // while (DatafullFlag) // { // if (img_index >= Bufferlength) // { // end = clock(); // gComressReadDataLock.lock(); // gComressionBufferWorking[mFlagIndex] = false; // gComressReadDataLock.unlock(); // gComressionBufferEmpty[mFlagIndex] = true; // DatafullFlag = false; // // //写磁盘 // compress_write_lock.lock(); // data_bag.file.open(data_bag.Fname, ios::out | ios::binary); // //data_bag.Form_total_head(); // data_bag.Form_total_head(compress_imgWidth, compress_imgHeight, gStructVarible.PictureNum, OutPutInitialIndex); // //cout << OutPutInitialIndex << endl; // data_bag.file.write(data_bag.head_cache, data_bag.head_bias); // data_bag.file.write(reinterpret_cast<const char *>(total_malloc), static_cast<int>(pix_index)); // data_bag.file.close(); // //data_bag.UnPack(data_bag.Fname); // compress_write_lock.unlock(); // memset(total_malloc, 0, 100000000); // pix_index = 0; // end2 = clock(); // cout << "T GPU :" << param.GpuId << " Index" << OutPutInitialIndex << " 处理:" << double(end - start) / CLOCKS_PER_SEC << " 总时间:" << double(end2 - start) / CLOCKS_PER_SEC << endl; // break; // } // int picture_index = OutPutInitialIndex + img_index; // Bufferoffset = gStructVarible.ImgWidth * gStructVarible.ImgHeight * gStructVarible.PictureNum; // // cudaMemcpy2D((this->memory).pDImage[0], dataduiqi[0], gHostComressiongBuffer[mFlagIndex] + Bufferoffset, compress_old_Width * sizeof(unsigned char), compress_old_Width * sizeof(unsigned char), compress_old_Height, cudaMemcpyHostToDevice); // this->process(); // this->writedisk(picture_index, &data_bag, img_index); // //img_index++; // img_index = img_index + gStructVarible.PictureNum; // } // } // delete[] total_malloc; // this->memoryfree(); // } //}; int T::mTindex = 0; /*----------------------------------数据更新类--------------------------------------------*/ class ReadImg : public Runnable { public: bool ExtractPointWorkingFlag = false;//表示 提点在工作 bool CompressionWorkingFlag = false;//表示 压缩在工作 Parameter Devpar;//变量传参 ~ReadImg() { } void mydelay(double sec)//延时函数,用于图像数据缓冲区的更新 { clock_t start_time, cur_time; start_time = clock(); do { cur_time = clock(); } while (double(cur_time - start_time) / CLOCKS_PER_SEC < sec); } void Run() { Devpar.ImgHeight = gStructVarible.ImgHeight; Devpar.ImgWidth = gStructVarible.ImgWidth; Devpar.PictureNum = gStructVarible.PictureNum; Devpar.ImgChannelNum = gStructVarible.ImgChannelNum; int mPageLockBufferIndex = 0; int mCompressionBufferindex = 0; bool ExtractCopySuccess; bool ComressionCopySuccess; //初始化标志位 for (int i = 0; i < HardwareParam.DeviceCount + 1; i++) { //页锁 PageLockBufferEmpty[i] = true; PageLockBufferWorking[i] = false; PageLockBufferStartIndex[i] = 0; //压缩 gComressionBufferEmpty[i] = true; gComressionBufferWorking[i] = false; gComressionBufferStartIndex[i] = 0; } //cout << "ReadImg initial success!" << endl; while (!ExtractPointSuccess) // 实验结束的标志位 { mydelay(0.01); for (int i = 0; i <HardwareParam.DeviceCount * 2; i++)//这个用于遍历相机对应buffer的标志位 { ExtractCopySuccess = false; ComressionCopySuccess = false; if (CameraBufferFull[i]) //相机对应的内存是否有可用数据--当为true时,则表示相机对应内存的i号Buffer有可用数据 { //拷贝方位盒数据至方位盒缓冲区 if (gStructVarible.RecModelFlag == true && HostUpdateRec == false) { memcpy(gRecupImgData, gCameraBuffer[i], sizeof(unsigned char)*Devpar.ImgChannelNum*Devpar.ImgWidth* Devpar.ImgHeight*Devpar.PictureNum);//这个是在内存区域拷贝图像 HostUpdateRec = true; } //由相机内存拷贝数据到页锁内存 if (ExtractPointWorkingFlag) { while (1) //遍历页锁Buffer,判断页锁内存缓冲区是否有空余Buffer块 { mPageLockBufferIndex = mPageLockBufferIndex % (HardwareParam.DeviceCount + 1); if (PageLockBufferEmpty[mPageLockBufferIndex])//若某一页锁为空 { memcpy(gHostBuffer[mPageLockBufferIndex], gCameraBuffer[i], sizeof(unsigned char)*Devpar.ImgHeight*Devpar.ImgWidth *Devpar.ImgChannelNum* Bufferlength);//拷贝数据到页锁 ExtractCopySuccess = true; PageLockBufferEmpty[mPageLockBufferIndex] = false;// 拷贝了数据之后将 标志位置为false; PageLockBufferStartIndex[mPageLockBufferIndex] = BufferBlockIndex[i];//索引拷贝 mPageLockBufferIndex++; break; } mPageLockBufferIndex++; if (ExtractPointSuccess) break; } } else ExtractCopySuccess = true; // 由相机内存拷贝到压缩缓冲区 if (CompressionWorkingFlag) { while (1) //遍历页锁Buffer,判断页锁内存缓冲区是否有空余Buffer块 { mCompressionBufferindex = mCompressionBufferindex % (HardwareParam.DeviceCount + 1); if (gComressionBufferEmpty[mCompressionBufferindex])//若某一页锁为空 { memcpy(gHostComressiongBuffer[mCompressionBufferindex], gCameraBuffer[i], sizeof(unsigned char)*Devpar.ImgHeight*Devpar.ImgWidth *Devpar.ImgChannelNum* Bufferlength);//拷贝数据到页锁 ComressionCopySuccess = true; gComressionBufferEmpty[mCompressionBufferindex] = false;// 拷贝了数据之后将 标志位置为false; gComressionBufferStartIndex[mCompressionBufferindex] = BufferBlockIndex[i];//索引拷贝 mCompressionBufferindex++; break; } mCompressionBufferindex++; if (ExtractPointSuccess) break; } } else ComressionCopySuccess = true; //相机内存对应标志位置false if (ExtractCopySuccess&&ComressionCopySuccess) CameraBufferFull[i] = false; } } } } }; /*----------------------------------模拟数据产生类----------------------------------------*/ class DataRefresh : public Runnable { public: Parameter Devpar;//变量传参 ~DataRefresh() { } void mydelay(double sec)//延时函数,用于图像数据缓冲区的更新 { clock_t start_time, cur_time; start_time = clock(); do { cur_time = clock(); } while (double(cur_time - start_time) / CLOCKS_PER_SEC < sec); } void Run() { //初始化参数 Devpar.ImgHeight = gStructVarible.ImgHeight; Devpar.ImgWidth = gStructVarible.ImgWidth; Devpar.PictureNum = gStructVarible.PictureNum; Devpar.ImgChannelNum = gStructVarible.ImgChannelNum; clock_t start, end; char path[250]; //读图像 unsigned char *Img1 = new unsigned char[Devpar.ImgWidth* Devpar.ImgHeight*Devpar.ImgChannelNum]; if (Devpar.ImgChannelNum == 1) { RmwRead8BitBmpFile2Img(gStructVarible.ImgReadPath, NULL, Img1, &Devpar.ImgWidth, &Devpar.ImgHeight); } else { RmwRead8BitBmpFile2Img(gStructVarible.ImgReadPath, Img1, NULL, &Devpar.ImgWidth, &Devpar.ImgHeight); } for (int i = 0; i < HardwareParam.DeviceCount * 2; i++) { for (long long j = 0; j < Bufferlength; j++) { memcpy(gCameraBuffer[i] + j* Devpar.ImgHeight * Devpar.ImgWidth*Devpar.ImgChannelNum, Img1, Devpar.ImgWidth* Devpar.ImgHeight*Devpar.ImgChannelNum * sizeof(unsigned char)); } } //初始化索引计数 for (int i = 0; i < HardwareParam.DeviceCount * 2; i++) { BufferBlockIndex[i] = i - HardwareParam.DeviceCount * 2; CameraBufferFull[i] = false; } //cout << "DataRefresh initial success!" << endl;/*调试项*/ mydelay(2); //cout << " start!" << endl; //模拟相机更新数据 start = clock(); for (int q = 0; q <3; q++) //5 { for (int i = 0; i < HardwareParam.DeviceCount * 2; i++) { BufferBlockIndex[i] += HardwareParam.DeviceCount * 2; //若提点速度过慢,则这里会打印; if (CameraBufferFull[i]) { //cout << "speed is too slow!" << endl; SimulationSuccessFlaf = true; ExtractPointSuccess = true; break; } CameraBufferFull[i] = true; mydelay(Timedatarefresh); } if (ExtractPointSuccess) break; } end = clock(); mydelay(3); ExtractPointSuccess = true; //cout << "实验:50张图片更新时间=" << Timedatarefresh << " over" << endl; delete[] Img1; } }; //--------------------------------------------结束------------------------------------------// /*对外接口函数*/ //调用动态库第一步 //对硬件设备初始化,自适应分配 /************************************************* 函数名称: GetDiskSpaceInfo // 函数描述: 函数用于获取输入路径所在位置磁盘剩余容量(GB); // 输入参数:LPCWSTR pszDrive :路径所在位置盘符, . 比如"D:\1.bmp"则为"D:\";// 输出参数:空;// 返回值 : RemainingSpace(int型) -- 剩余磁盘容量(GB)// 其他说明: 函数只用在硬件初始化时调用一次; . 函数还根据输入的驱动器,获取磁盘 . 总容量空闲空间、簇数量等磁盘信息 . 等,未引出接口// *************************************************/ int GetDiskSpaceInfo(LPCWSTR pszDrive) { DWORD64 qwFreeBytesToCaller, qwTotalBytes, qwFreeBytes; DWORD dwSectPerClust, dwBytesPerSect, dwFreeClusters, dwTotalClusters; BOOL bResult; //使用GetDiskFreeSpaceEx获取磁盘信息并打印结果 bResult = GetDiskFreeSpaceEx(pszDrive, (PULARGE_INTEGER)&qwFreeBytesToCaller, (PULARGE_INTEGER)&qwTotalBytes, (PULARGE_INTEGER)&qwFreeBytes); //使用GetDiskFreeSpace获取磁盘信息并打印结果 bResult = GetDiskFreeSpace(pszDrive, &dwSectPerClust, &dwBytesPerSect, &dwFreeClusters, &dwTotalClusters); int RemainingSpace; if (bResult) { RemainingSpace = int((DWORD64)dwFreeClusters* (DWORD64)dwSectPerClust*(DWORD64)dwBytesPerSect >> 30); } return RemainingSpace; } /************************************************* 函数名称: HardwareInit // 函数描述: 硬件初始化; // 输入参数:null// 输出参数:HardwareInfo *HardwareProp : 硬件配置信息;// 返回值 : (int型) -- 初始化成功或失败标志// 其他说明: 函数在软件启动初始化时调用 . 对系统使用硬件资源配置;// *************************************************/ IMGSIMULATION_API int HardwareInit(HardwareInfo *HardwareProp) { if (gWorkingGpuId.size() != 0) gWorkingGpuId.clear(); cudaGetDeviceCount(&gDeviceCount); //公共信息放在结构体HardwareParam中 HardwareParam.DeviceCount = 0;//GPU设备数清零 HardwareParam.DiskRemainingSpace = GetDiskSpaceInfo(L"C:/pic");//C盘剩余空间 if (HardwareParam.DiskRemainingSpace < DiskRemainingSpaceThreshold)//%%%暂时定为100G%%% { return 1;//磁盘存储空间不足 } for (int i = 0; i<gDeviceCount-1; i++) { cudaDeviceProp DevProp; cudaGetDeviceProperties(&DevProp, i); HardwareProp->major = DevProp.major; HardwareProp->minor = DevProp.minor; if (DevProp.major > 5)//计算能力大于5时 { gWorkingGpuId.push_back(i); } } HardwareParam.DeviceCount = gWorkingGpuId.size();//GPU设备数目 HardwareProp->DeviceCount = HardwareParam.DeviceCount; if (HardwareParam.DeviceCount > 5 || HardwareParam.DeviceCount < 1) { return 2;//最多可同时支持5块GPU } HardwareParam.ExPointThreads = HardwareParam.DeviceCount;//提点线程数 HardwareProp->ExPointThreads = HardwareParam.DeviceCount; HardwareParam.CompThreads = HardwareParam.DeviceCount;//压缩线程数 HardwareProp->CompThreads = HardwareParam.DeviceCount; return 0; } //-------------------------------------------------------结束----------------------------------------// /************************************************* 函数名称: Image_Pretreatment // 函数描述: 函数用于图像预处理; // 输入参数:const char *path :图像文件夹路径, . const char *exten : 图像格式,比如".bmp"; . int ChooseMode : 预处理选项--1 图像读入内存 . --2 内存释放// 输出参数:gHostImage[i]批量存放图像数据的数组;// 返回值 : gHostPathImgNumber(int型) -- 路径下图像数量// 其他说明: 函数在调试时使用,用于图像的批处理;// *************************************************/ IMGSIMULATION_API int Image_Pretreatment(const char *path, const char *exten, int ChooseMode) { cv::Directory dir; string filepath(path); string fileexten(exten); vector<string> filenames = dir.GetListFiles(filepath, fileexten, false); if (filenames.size() == NULL) { return 0; } else { gHostPathImgNumber = filenames.size(); } switch (ChooseMode) { case 1: { //图像预处理,从硬盘批量读入内存 #ifdef Pretreatment char strFilename[100]; int mWidth; int mHeight; for (int i = 0; i < gHostPathImgNumber; i++) { sprintf_s(strFilename, "%s\\%d.bmp", path, i + 1); //将图片的路径名动态的写入到strFilename这个地址的内存空间 checkCudaErrors(cudaHostAlloc((void**)&gHostImage[i], gStructVarible.ImgHeight * gStructVarible.ImgWidth * sizeof(unsigned char), cudaHostAllocDefault)); if (gStructVarible.ImgBitDeep == 24) { gHostColorImage[i] = new unsigned char[gStructVarible.ImgHeight * gStructVarible.ImgWidth * 3]; } RmwRead8BitBmpFile2Img(strFilename, gHostColorImage[i], gHostImage[i], &mWidth, &mHeight); } #endif // Pretreatment break; } case 2: { //批量内存释放 #ifdef Pretreatment for (int i = 0; i < gHostPathImgNumber; i++) { cudaFreeHost(gHostImage[i]); if (gStructVarible.ImgBitDeep == 24) { cudaFreeHost(gHostColorImage[i]); } } #endif // Pretreatment break; } default: break; } return gHostPathImgNumber; } /************************************************* 函数名称: SimulationImageTest // 函数描述: 测试原图仿真测试; // 输入参数:const char *path :测试图像路径;// 输出参数:Infomation *Info : 测试实验数据;// 返回值 : bool -- 实验成功标志位// 其他说明: 函数包含对整幅测试图在不同实验模式下 . 实验性能的测试; . 测试包括:单提点测试、单压缩测试、提点压缩测试// *************************************************/ IMGSIMULATION_API bool SimulationImageTest(const char *path, Infomation *Info) { cudaError_t err; int mWidth, mHeight; gHostPathImgNumber = 5;//测试图片复制数量 Info->ImgProcessingNumbers = gHostPathImgNumber; /**** 图片导入 ****/ for (int i = 0; i < gHostPathImgNumber; i++)//为图片申请锁页内存 { err = cudaHostAlloc((void**)&gHostImage[i], gStructVarible.ImgHeight * gStructVarible.ImgWidth *gStructVarible.PictureNum * sizeof(unsigned char), cudaHostAllocDefault); if (gStructVarible.ImgBitDeep == 24) { err = cudaHostAlloc((void**)&gHostColorImage[i], gStructVarible.ImgHeight * gStructVarible.ImgWidth *gStructVarible.PictureNum * 3 * sizeof(unsigned char), cudaHostAllocDefault); } } int Picoffset = gStructVarible.ImgHeight * gStructVarible.ImgWidth;//单张灰度图片地址偏移量 int PicoffsetColor = gStructVarible.ImgHeight * gStructVarible.ImgWidth * 3;//单张图片地址偏移量 for (int i = 0; i < gHostPathImgNumber; i++)//读取图片 { for (int j = 0; j < gStructVarible.PictureNum; j++) { RmwRead8BitBmpFile2Img(path, gHostColorImage[i] + j * PicoffsetColor, gHostImage[i] + j * Picoffset, &mWidth, &mHeight); } } if (gStructVarible.RecModelFlag == 1) GetImgBoxHost(path);//提取包围盒 Info->DeviceCount = HardwareParam.DeviceCount; Info->CPUThreadCount = ExtractPointThreads; clock_t start, finish; float Difftime;//时间差 float ImageSize;//图像尺寸 int ImgChannel;//图像通道 int ThreadID; /**** 单提点测试 ****/ CThreadPoolExecutor * pExecutor = new CThreadPoolExecutor(); //提点线程数为GPU设备数 pExecutor->Init(1, HardwareParam.ExPointThreads, 1); SIM *ExtractPoint = new SIM[HardwareParam.ExPointThreads]; RecR *RecExtractPoint = new RecR[HardwareParam.ExPointThreads]; //RecS recs; if (gStructVarible.RecModelFlag == 0)//全图模式 { start = clock(); //计时开始 ThreadID = 0x01;//线程号 for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** 参数传入 ****/ ExtractPoint[i].HardwarePar.DeviceID = i; ExtractPoint[i].HardwarePar.GpuId = gWorkingGpuId[i]; ExtractPoint[i].HardwarePar.CUDAStreamNum = CUDAStreams; ExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.ExPointThreads; sprintf_s(ExtractPoint[i].Devpar.ImgReadPath, "%s", gStructVarible.ImgReadPath); sprintf_s(ExtractPoint[i].Devpar.ImgSavePath, "%s", gStructVarible.ImgSavePath); sprintf_s(ExtractPoint[i].Devpar.DataReadPath, "%s", gStructVarible.DataReadPath); ExtractPoint[i].Devpar.ImgBitDeep = gStructVarible.ImgBitDeep; ExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; ExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; ExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; ExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; ExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; ExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; ExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; ExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; ExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; ExtractPoint[i].Devpar.ImgChannelNum = gStructVarible.ImgBitDeep / 8; /**** 提取标志点过程 ****/ pExecutor->Execute(&ExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } pExecutor->Terminate();//终止线程 delete pExecutor;//删除线程池 finish = clock();//计时结束 Difftime = (float)(finish - start) / CLOCKS_PER_SEC;//得到两次记录之间的时间差 Info->PointNumbers = SignPoint.PointNumbers; Info->ExtractPointTimes = Difftime; ImgChannel = gStructVarible.ImgBitDeep / 8;//图像通道数 ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; Info->ExtractPointSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; } else //矩形模式 { start = clock(); //计时开始 ThreadID = 0x01;//线程号 for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** 参数传入 ****/ RecExtractPoint[i].HardwarePar.DeviceID = i; RecExtractPoint[i].HardwarePar.GpuId = gWorkingGpuId[i]; RecExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.DeviceCount; RecExtractPoint[i].HardwarePar.CUDAStreamNum = 5; //RecExtractPoint[i].Devpar.DataReadPath = "C:\\pic\\img_data"; RecExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; RecExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; RecExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; RecExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; RecExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; RecExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; RecExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; RecExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; RecExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; /**** 提取标志点过程 ****/ pExecutor->Execute(&RecExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } pExecutor->Terminate(); delete pExecutor; finish = clock();//计时结束 //得到两次记录之间的时间差 Difftime = (float)(finish - start) / CLOCKS_PER_SEC; Info->PointNumbers = SignPoint.PointNumbers; Info->ExtractPointTimes = Difftime; ImgChannel = gStructVarible.ImgBitDeep / 8;//图像通道数 ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; Info->ExtractPointSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; } ///**** 单压缩测试****/ //CThreadPoolExecutor * pExecutor1 = new CThreadPoolExecutor(); //pExecutor1->Init(1, HardwareParam.CompThreads, 1); //T *Compression_grey = new T[HardwareParam.CompThreads]; //TC *Compression = new TC[HardwareParam.CompThreads]; //start = clock(); //计时开始 //ThreadID = 0x01;//线程号重置 //for (int i = 0; i < HardwareParam.ExPointThreads; i++) //{ // /**** 参数传入 ****/ // Compression_grey[i].param.DeviceID = i; // Compression_grey[i].param.GpuId = gWorkingGpuId[i]; // Compression_grey[i].param.CompThreads = HardwareParam.CompThreads; // Compression[i].param.DeviceID = i; // Compression[i].param.GpuId = gWorkingGpuId[i]; // Compression[i].param.CompThreads = HardwareParam.CompThreads; // if (gStructVarible.ImgBitDeep == 8) // { // pExecutor1->Execute(&Compression_grey[i], ThreadID); // ThreadID = ThreadID << 1; // } // else if (gStructVarible.ImgBitDeep == 24) // { // pExecutor1->Execute(&Compression[i], ThreadID); // ThreadID = ThreadID << 1; // } //} //pExecutor1->Terminate(); //delete pExecutor1; //finish = clock();//计时结束 // //得到两次记录之间的时间差 //Difftime = (float)(finish - start) / CLOCKS_PER_SEC; //Info->CompressionTimes = Difftime; //ImgChannel = gStructVarible.ImgBitDeep / 8;//图像通道数 //ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; //Info->CompressionSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; ///**** 提点与压缩同步测试****/ //CThreadPoolExecutor * pExecutor2 = new CThreadPoolExecutor(); //pExecutor2->Init(1, HardwareParam.ExPointThreads + HardwareParam.CompThreads, 1); //start = clock(); //计时开始 //ThreadID = 0x01;//线程号重置 //if (gStructVarible.RecModelFlag == 0)//全图模式 //{ // for (int i = 0; i < HardwareParam.ExPointThreads; i++) // { // pExecutor2->Execute(&ExtractPoint[i], ThreadID); // ThreadID = ThreadID << 1; // } //} //else //{ // for (int i = 0; i < HardwareParam.ExPointThreads; i++) // { // pExecutor2->Execute(&RecExtractPoint[i], ThreadID); // ThreadID = ThreadID << 1; // } //} //if (gStructVarible.ImgBitDeep == 8) //{ // for (int i = 0; i < HardwareParam.CompThreads; i++) // { // pExecutor2->Execute(&Compression_grey[i], ThreadID); // ThreadID = ThreadID << 1; // } //} //else if (gStructVarible.ImgBitDeep == 24) //{ // for (int i = 0; i < HardwareParam.CompThreads; i++) // { // pExecutor2->Execute(&Compression[i], ThreadID); // ThreadID = ThreadID << 1; // } //} //pExecutor2->Terminate(); //delete pExecutor2; //finish = clock();//计时结束 // //得到两次记录之间的时间差 //Difftime = (float)(finish - start) / CLOCKS_PER_SEC; //Info->SynchronizeTimes = Difftime; //ImgChannel = gStructVarible.ImgBitDeep / 8;//图像通道数 //ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; //Info->SynchronizeSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; //释放内存 for (int i = 0; i < gHostPathImgNumber; i++) { err = cudaFreeHost(gHostImage[i]); if (gStructVarible.ImgBitDeep == 24) { err = cudaFreeHost(gHostColorImage[i]); } if (err != cudaSuccess) { return false; } } return true; } IMGSIMULATION_API void SimulationTestReport(const char *path, Infomation *Info) { //测提点加压缩 Bufferlength = 50; Memory_application(); Timedatarefresh = 1; double SiglePicSize = double(gStructVarible.ImgHeight*gStructVarible.ImgWidth) / (1024 * 1024);//单张图片大小 double minTimeRefresh = Bufferlength*SiglePicSize / (2 * 1024);//2G/s时缓冲区刷新时间。 SimulationSuccessFlaf = false; while (!SimulationSuccessFlaf) { if (Timedatarefresh > minTimeRefresh) { Timedatarefresh = Timedatarefresh - 0.05; continue; } for (int i = 0; i < 3; i++) ExtractPointInitialSuccessFlag[i] = false; ExtractPointSuccess = false; Timedatarefresh = Timedatarefresh - 0.05; OnlineImageRecExperiment(3 , Info); //每次实验之后,延时两秒 clock_t start_time, cur_time; start_time = clock(); do { cur_time = clock(); } while (double(cur_time - start_time) / CLOCKS_PER_SEC < 2); } Memory_release(); Timedatarefresh = Timedatarefresh + 0.05; if (Timedatarefresh > minTimeRefresh) Timedatarefresh = minTimeRefresh; Info->SynchronizeTimes = Timedatarefresh; Info->SynchronizeSpeed = SiglePicSize*Bufferlength / Timedatarefresh; } /****************************************仿真实验相关********************************************/ //qwe 仿真总函数 IMGSIMULATION_API bool SimulationExperient(int ChooseMode) { clock_t start, finish; Infomation *Info; float Difftime;//时间差 float ImageSize;//图像尺寸 int ImgChannel;//图像通道 int ThreadID; //cout << "设备数目:" << HardwareParam.DeviceCount << endl; switch (ChooseMode) { case 1://单提点 { /**** 单提点测试****/ CThreadPoolExecutor * pExecutor = new CThreadPoolExecutor(); //int ThreadsNum; //if (gStructVarible.RecModelFlag == true)//qwt // ThreadsNum = HardwareParam.ExPointThreads + 3; //else // ThreadsNum = HardwareParam.ExPointThreads + 2; pExecutor->Init(1, 10, 1); R *ExtractPoint = new R[HardwareParam.ExPointThreads]; RecR *RecExtractPoint = new RecR[HardwareParam.ExPointThreads]; RecUpData recupdate; ReadImg readimg; DataRefresh datarefresh; readimg.CompressionWorkingFlag = false; readimg.ExtractPointWorkingFlag = true; if (gStructVarible.RecModelFlag == false)//全图模式 { ThreadID = 0x01;//线程号 for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** 参数传入 ****/ ExtractPoint[i].HardwarePar.DeviceID = i; ExtractPoint[i].HardwarePar.GpuId = gWorkingGpuId[i]; ExtractPoint[i].HardwarePar.CUDAStreamNum = CUDAStreams; ExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.ExPointThreads; sprintf_s(ExtractPoint[i].Devpar.ImgReadPath, "%s", gStructVarible.ImgReadPath); sprintf_s(ExtractPoint[i].Devpar.ImgSavePath, "%s", gStructVarible.ImgSavePath); sprintf_s(ExtractPoint[i].Devpar.DataReadPath, "%s", gStructVarible.DataReadPath); ExtractPoint[i].Devpar.ImgBitDeep = gStructVarible.ImgBitDeep; ExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; ExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; ExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; ExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; ExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; ExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; ExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; ExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; ExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; ExtractPoint[i].Devpar.ImgChannelNum = gStructVarible.ImgBitDeep / 8; /**** 提取标志点过程 ****/ pExecutor->Execute(&ExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } pExecutor->Execute(&readimg, ThreadID); ThreadID = ThreadID << 1; pExecutor->Execute(&datarefresh, ThreadID); pExecutor->Terminate();//终止线程 } else //矩形模式 { GetImgBoxHost(gStructVarible.ImgReadPath); ThreadID = 0x01;//线程号 /**** 提取标志点过程 ****/ for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** 参数传入 ****/ RecExtractPoint[i].HardwarePar.DeviceID = i; RecExtractPoint[i].HardwarePar.GpuId = gWorkingGpuId[i]; RecExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.DeviceCount; RecExtractPoint[i].HardwarePar.CUDAStreamNum = CUDAStreams; sprintf_s(ExtractPoint[i].Devpar.ImgReadPath, "%s", gStructVarible.ImgReadPath); sprintf_s(ExtractPoint[i].Devpar.ImgSavePath, "%s", gStructVarible.ImgSavePath); sprintf_s(ExtractPoint[i].Devpar.DataReadPath, "%s", gStructVarible.DataReadPath); RecExtractPoint[i].Devpar.ImgBitDeep = gStructVarible.ImgBitDeep; RecExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; RecExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; RecExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; RecExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; RecExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; RecExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; RecExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; RecExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; RecExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; RecExtractPoint[i].Devpar.ImgChannelNum = gStructVarible.ImgBitDeep / 8; /**** 提取标志点过程 ****/ pExecutor->Execute(&RecExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } pExecutor->Execute(&recupdate, ThreadID); ThreadID = ThreadID << 1; pExecutor->Execute(&readimg, ThreadID); ThreadID = ThreadID << 1; pExecutor->Execute(&datarefresh, ThreadID); pExecutor->Terminate(); cout << "实验结束" << endl; delete pExecutor; } break; } case 2://单压缩 { CThreadPoolExecutor * pExecutor1 = new CThreadPoolExecutor(); pExecutor1->Init(1, 10, 1); T *Compression_grey = new T[HardwareParam.CompThreads]; TC *Compression = new TC[HardwareParam.CompThreads]; DataRefresh datarefresh; ReadImg readimg; readimg.CompressionWorkingFlag = true; readimg.ExtractPointWorkingFlag = false; ThreadID = 0x01;//线程号重置 for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** 参数传入 ****/ Compression_grey[i].param.DeviceID = i; Compression_grey[i].param.GpuId = gWorkingGpuId[i]; Compression_grey[i].param.CompThreads = HardwareParam.CompThreads; Compression[i].param.DeviceID = i; Compression[i].param.CompThreads = HardwareParam.CompThreads; if (gStructVarible.ImgChannelNum == 1) { pExecutor1->Execute(&Compression_grey[i], ThreadID); ThreadID = ThreadID << 1; } else if (gStructVarible.ImgChannelNum == 3) { pExecutor1->Execute(&Compression[i], ThreadID); ThreadID = ThreadID << 1; } } pExecutor1->Execute(&datarefresh, ThreadID); ThreadID = ThreadID << 1; pExecutor1->Execute(&readimg, ThreadID); pExecutor1->Terminate(); delete pExecutor1; break; } case 3://提点&压缩 { CThreadPoolExecutor * pExecutor2 = new CThreadPoolExecutor(); pExecutor2->Init(1, 10, 1); R *ExtractPoint = new R[HardwareParam.ExPointThreads]; RecR *RecExtractPoint = new RecR[HardwareParam.ExPointThreads]; T *Compression_grey = new T[HardwareParam.CompThreads]; TC *Compression = new TC[HardwareParam.CompThreads]; RecUpData recupdate; ReadImg readimg; DataRefresh datarefresh; readimg.CompressionWorkingFlag = true; readimg.ExtractPointWorkingFlag = true; ThreadID = 0x01;//线程号 //提点线程 if (gStructVarible.RecModelFlag == false)//全图模式 { for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** 参数传入 ****/ ExtractPoint[i].HardwarePar.DeviceID = i; ExtractPoint[i].HardwarePar.GpuId = gWorkingGpuId[i]; ExtractPoint[i].HardwarePar.CUDAStreamNum = CUDAStreams; ExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.ExPointThreads; sprintf_s(ExtractPoint[i].Devpar.ImgReadPath, "%s", gStructVarible.ImgReadPath); sprintf_s(ExtractPoint[i].Devpar.ImgSavePath, "%s", gStructVarible.ImgSavePath); sprintf_s(ExtractPoint[i].Devpar.DataReadPath, "%s", gStructVarible.DataReadPath); ExtractPoint[i].Devpar.ImgBitDeep = gStructVarible.ImgBitDeep; ExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; ExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; ExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; ExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; ExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; ExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; ExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; ExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; ExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; ExtractPoint[i].Devpar.ImgChannelNum = gStructVarible.ImgBitDeep / 8; /**** 提取标志点过程 ****/ pExecutor2->Execute(&ExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } } else //矩形模式 { GetImgBoxHost(gStructVarible.ImgReadPath); for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** 参数传入 ****/ RecExtractPoint[i].HardwarePar.DeviceID = i; RecExtractPoint[i].HardwarePar.GpuId = gWorkingGpuId[i]; RecExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.DeviceCount; RecExtractPoint[i].HardwarePar.CUDAStreamNum = CUDAStreams; sprintf_s(ExtractPoint[i].Devpar.ImgReadPath, "%s", gStructVarible.ImgReadPath); sprintf_s(ExtractPoint[i].Devpar.ImgSavePath, "%s", gStructVarible.ImgSavePath); sprintf_s(ExtractPoint[i].Devpar.DataReadPath, "%s", gStructVarible.DataReadPath); RecExtractPoint[i].Devpar.ImgBitDeep = gStructVarible.ImgBitDeep; RecExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; RecExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; RecExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; RecExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; RecExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; RecExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; RecExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; RecExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; RecExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; RecExtractPoint[i].Devpar.ImgChannelNum = gStructVarible.ImgBitDeep / 8; /**** 提取标志点过程 ****/ pExecutor2->Execute(&RecExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; pExecutor2->Execute(&recupdate, ThreadID); ThreadID = ThreadID << 1; } } //压缩线程 for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** 参数传入 ****/ Compression_grey[i].param.DeviceID = i; Compression_grey[i].param.GpuId = gWorkingGpuId[i]; Compression_grey[i].param.CompThreads = HardwareParam.CompThreads; Compression[i].param.DeviceID = i; Compression[i].param.GpuId = gWorkingGpuId[i]; Compression[i].param.CompThreads = HardwareParam.CompThreads; if (gStructVarible.ImgChannelNum == 1) { pExecutor2->Execute(&Compression_grey[i], ThreadID); ThreadID = ThreadID << 1; } else if (gStructVarible.ImgChannelNum == 3) { pExecutor2->Execute(&Compression[i], ThreadID); ThreadID = ThreadID << 1; } } //数据生成+读图线程 pExecutor2->Execute(&readimg, ThreadID); ThreadID = ThreadID << 1; pExecutor2->Execute(&datarefresh, ThreadID); pExecutor2->Terminate(); delete pExecutor2; delete[] ExtractPoint; delete[] RecExtractPoint; delete[] Compression_grey; delete[] Compression; break; } default: return 1; } return 0; } //qwe 提点和压缩同步 IMGSIMULATION_API void SimulationTestSynchronize(const char *path, Infomation *Info) { //测提点加压缩 //Bufferlength = 50; Memory_application(); Timedatarefresh = 1; double SiglePicSize = double(gStructVarible.ImgHeight*gStructVarible.ImgWidth) / (1024 * 1024);//单张图片大小 double minTimeRefresh = Bufferlength*SiglePicSize / (2 * 1024);//2G/s时缓冲区刷新时间。 SimulationSuccessFlaf = false; while (!SimulationSuccessFlaf) { if (Timedatarefresh > minTimeRefresh) { Timedatarefresh = Timedatarefresh - 0.05; continue; } for (int i = 0; i < 3; i++) ExtractPointInitialSuccessFlag[i] = false; ExtractPointSuccess = false; Timedatarefresh = Timedatarefresh - 0.05; SimulationExperient(3); //每次实验之后,延时两秒 clock_t start_time, cur_time; start_time = clock(); do { cur_time = clock(); } while (double(cur_time - start_time) / CLOCKS_PER_SEC < 2); } Memory_release(); Timedatarefresh = Timedatarefresh + 0.05; if (Timedatarefresh > minTimeRefresh) Timedatarefresh = minTimeRefresh; Info->SynchronizeTimes = Timedatarefresh; Info->SynchronizeSpeed = SiglePicSize*Bufferlength / Timedatarefresh; } //qwe 单提点 IMGSIMULATION_API void SimulationTestExtractPoint(const char *path, Infomation *Info) { //测提点加压缩 //Bufferlength = 50; Memory_application(); Timedatarefresh = 1; double SiglePicSize = double(gStructVarible.ImgHeight*gStructVarible.ImgWidth) / (1024 * 1024);//单张图片大小 double minTimeRefresh = Bufferlength*SiglePicSize / (2 * 1024);//2G/s时缓冲区刷新时间。 SimulationSuccessFlaf = false; while (!SimulationSuccessFlaf) { if (Timedatarefresh > minTimeRefresh) { Timedatarefresh = Timedatarefresh - 0.05; continue; } for (int i = 0; i < 3; i++) ExtractPointInitialSuccessFlag[i] = false; ExtractPointSuccess = false; Timedatarefresh = Timedatarefresh - 0.1; SimulationExperient(1); //每次实验之后,延时两秒 clock_t start_time, cur_time; start_time = clock(); do { cur_time = clock(); } while (double(cur_time - start_time) / CLOCKS_PER_SEC < 2); } Memory_release(); Timedatarefresh = Timedatarefresh + 0.05; if (Timedatarefresh > minTimeRefresh) Timedatarefresh = minTimeRefresh; Info->SynchronizeTimes = Timedatarefresh; Info->SynchronizeSpeed = SiglePicSize*Bufferlength / Timedatarefresh; } //qwe 单压缩 IMGSIMULATION_API void SimulationTestComression(const char *path, Infomation *Info) { //测提点加压缩 //Bufferlength = 50; Memory_application(); Timedatarefresh = 1; double SiglePicSize = double(gStructVarible.ImgHeight*gStructVarible.ImgWidth) / (1024 * 1024);//单张图片大小 double minTimeRefresh = Bufferlength*SiglePicSize / (2 * 1024);//2G/s时缓冲区刷新时间。 SimulationSuccessFlaf = false; while (!SimulationSuccessFlaf) { if (Timedatarefresh > minTimeRefresh) { Timedatarefresh = Timedatarefresh - 0.05; continue; } for (int i = 0; i < 3; i++) ExtractPointInitialSuccessFlag[i] = false; ExtractPointSuccess = false; Timedatarefresh = Timedatarefresh - 0.1; SimulationExperient(2); //每次实验之后,延时两秒 clock_t start_time, cur_time; start_time = clock(); do { cur_time = clock(); } while (double(cur_time - start_time) / CLOCKS_PER_SEC < 2); } Memory_release(); Timedatarefresh = Timedatarefresh + 0.05; if (Timedatarefresh > minTimeRefresh) Timedatarefresh = minTimeRefresh; Info->SynchronizeTimes = Timedatarefresh; Info->SynchronizeSpeed = SiglePicSize*Bufferlength / Timedatarefresh; } /*---------------------------------------------------------------------------------------*/ /************************************************* 函数名称: OnlineImageExperiment // 函数描述: 在线实验模块--全图模式; // 输入参数:const char *Imgpath :在线实验图像路径; . ChooseMode :1 单提点 . 2 单压缩 . 3 提点&压缩// 输出参数:Infomation *Info : 在线实验数据;// 返回值 : bool -- 实验成功标志位// 其他说明: 函数选择性的进行三种模式的在线实验 . ,具体模式通过界面设置参数选择// *************************************************/ IMGSIMULATION_API bool OnlineImageExperiment(int ChooseMode, const char *Imgpath, Infomation *Info) { cudaError_t err; int mWidth, mHeight; clock_t start, finish; float Difftime;//时间差 float ImageSize;//图像尺寸 int ImgChannel;//图像通道 int ThreadID; switch (ChooseMode) { case 1://单提点 { /**** 单提点测试****/ CThreadPoolExecutor * pExecutor = new CThreadPoolExecutor(); pExecutor->Init(1, HardwareParam.ExPointThreads, 1); R *ExtractPoint = new R[HardwareParam.ExPointThreads]; RecR *RecExtractPoint = new RecR[HardwareParam.ExPointThreads]; if (gStructVarible.RecModelFlag == 0)//全图模式 { start = clock(); //计时开始 ThreadID = 0x01;//线程号 for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** 参数传入 ****/ ExtractPoint[i].HardwarePar.DeviceID = i; ExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.ExPointThreads; ExtractPoint[i].HardwarePar.CUDAStreamNum = 5; //ExtractPoint[i].Devpar.DataReadPath = "C:\\pic\\img_data"; ExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; ExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; ExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; ExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; ExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; ExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; ExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; ExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; ExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; /**** 提取标志点过程 ****/ pExecutor->Execute(&ExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } pExecutor->Terminate();//终止线程 delete pExecutor;//删除线程池 finish = clock();//计时结束 //得到两次记录之间的时间差 Difftime = (float)(finish - start) / CLOCKS_PER_SEC; Info->PointNumbers = SignPoint.PointNumbers; Info->ExtractPointTimes = Difftime; ImgChannel = gStructVarible.ImgBitDeep / 8;//图像通道数 ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; Info->ExtractPointSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; } else //矩形模式 { start = clock(); //计时开始 ThreadID = 0x01;//线程号 for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** 参数传入 ****/ RecExtractPoint[i].HardwarePar.DeviceID = i; RecExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.ExPointThreads; RecExtractPoint[i].HardwarePar.CUDAStreamNum = 5; //RecExtractPoint[i].Devpar.DataReadPath = "C:\\pic\\img_data"; RecExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; RecExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; RecExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; RecExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; RecExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; RecExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; RecExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; RecExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; RecExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; /**** 提取标志点过程 ****/ pExecutor->Execute(&RecExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } pExecutor->Terminate(); delete pExecutor; finish = clock();//计时结束 //得到两次记录之间的时间差 Difftime = (float)(finish - start) / CLOCKS_PER_SEC; Info->PointNumbers = SignPoint.PointNumbers; Info->ExtractPointTimes = Difftime; ImgChannel = gStructVarible.ImgBitDeep / 8;//图像通道数 ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; Info->ExtractPointSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; } break; } case 2://单压缩 { CThreadPoolExecutor * pExecutor1 = new CThreadPoolExecutor(); pExecutor1->Init(1, HardwareParam.CompThreads, 1); T *Compression_grey = new T[HardwareParam.CompThreads]; TC *Compression = new TC[HardwareParam.CompThreads]; start = clock(); //计时开始 ThreadID = 0x01;//线程号重置 for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** 参数传入 ****/ Compression_grey[i].param.DeviceID = i; Compression_grey[i].param.CompThreads = HardwareParam.CompThreads; Compression[i].param.DeviceID = i; Compression[i].param.CompThreads = HardwareParam.CompThreads; if (gStructVarible.ImgBitDeep == 8) { pExecutor1->Execute(&Compression_grey[i], ThreadID); ThreadID = ThreadID << 1; } else if (gStructVarible.ImgBitDeep == 24) { pExecutor1->Execute(&Compression[i], ThreadID); ThreadID = ThreadID << 1; } } pExecutor1->Terminate(); delete pExecutor1; finish = clock();//计时结束 //得到两次记录之间的时间差 Difftime = (float)(finish - start) / CLOCKS_PER_SEC; Info->CompressionTimes = Difftime; ImgChannel = gStructVarible.ImgBitDeep / 8;//图像通道数 ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; Info->CompressionSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; break; } case 3://提点&压缩 { CThreadPoolExecutor * pExecutor2 = new CThreadPoolExecutor(); pExecutor2->Init(1, HardwareParam.ExPointThreads + HardwareParam.CompThreads, 1); R *ExtractPoint = new R[HardwareParam.ExPointThreads]; RecR *RecExtractPoint = new RecR[HardwareParam.ExPointThreads]; T *Compression_grey = new T[HardwareParam.CompThreads]; TC *Compression = new TC[HardwareParam.CompThreads]; ThreadID = 0x01;//线程号 start = clock(); //计时开始 if (gStructVarible.RecModelFlag == 0)//全图模式 { for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** 参数传入 ****/ ExtractPoint[i].HardwarePar.DeviceID = i; ExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.ExPointThreads; ExtractPoint[i].HardwarePar.CUDAStreamNum = 5; //ExtractPoint[i].Devpar.DataReadPath = "C:\\pic\\img_data"; ExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; ExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; ExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; ExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; ExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; ExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; ExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; ExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; ExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; /**** 提取标志点过程 ****/ pExecutor2->Execute(&ExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } } else //矩形模式 { for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** 参数传入 ****/ RecExtractPoint[i].HardwarePar.DeviceID = i; RecExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.ExPointThreads; RecExtractPoint[i].HardwarePar.CUDAStreamNum = 5; //RecExtractPoint[i].Devpar.DataReadPath = "C:\\pic\\img_data"; RecExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; RecExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; RecExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; RecExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; RecExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; RecExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; RecExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; RecExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; RecExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; /**** 提取标志点过程 ****/ pExecutor2->Execute(&RecExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } } for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** 参数传入 ****/ Compression_grey[i].param.DeviceID = i; Compression_grey[i].param.CompThreads = HardwareParam.CompThreads; Compression[i].param.DeviceID = i; Compression[i].param.CompThreads = HardwareParam.CompThreads; if (gStructVarible.ImgBitDeep == 8) { pExecutor2->Execute(&Compression_grey[i], ThreadID); ThreadID = ThreadID << 1; } else if (gStructVarible.ImgBitDeep == 24) { pExecutor2->Execute(&Compression[i], ThreadID); ThreadID = ThreadID << 1; } } pExecutor2->Terminate(); delete pExecutor2; finish = clock();//计时结束 //得到两次记录之间的时间差 Difftime = (float)(finish - start) / CLOCKS_PER_SEC; Info->SynchronizeTimes = Difftime; ImgChannel = gStructVarible.ImgBitDeep / 8;//图像通道数 ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; Info->SynchronizeSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; break; } default: return 1; } return 0; } /************************************************* 函数名称: OnlineImageExperiment // 函数描述: 在线实验模块--矩形模式; // 输入参数:const char *Imgpath :在线实验图像路径; . ChooseMode :1 单提点 . 2 单压缩 . 3 提点&压缩// 输出参数:Infomation *Info : 在线实验数据;// 返回值 : bool -- 实验成功标志位// 其他说明: 函数选择性的进行三种模式的在线实验 . ,具体模式通过界面设置参数选择// *************************************************/ IMGSIMULATION_API bool OnlineImageRecExperiment(int ChooseMode, Infomation *Info) { clock_t start, finish; int mWidth, mHeight; float Difftime;//时间差 float ImageSize;//图像尺寸 int ImgChannel;//图像通道 int ThreadID; switch (ChooseMode) { case 1://单提点 { /**** 单提点测试****/ CThreadPoolExecutor * pExecutor = new CThreadPoolExecutor(); int ThreadsNum; if (gStructVarible.RecModelFlag == 1)//qwt ThreadsNum = HardwareParam.ExPointThreads + 3; else ThreadsNum = HardwareParam.ExPointThreads + 2; pExecutor->Init(1, 10, 1); R *ExtractPoint = new R[HardwareParam.ExPointThreads]; RecR *RecExtractPoint = new RecR[HardwareParam.ExPointThreads]; RecUpData recupdate; ReadImg readimg; DataRefresh datarefresh; readimg.CompressionWorkingFlag = false; readimg.ExtractPointWorkingFlag = true; if (gStructVarible.RecModelFlag == 0)//全图模式 { start = clock(); //计时开始 ThreadID = 0x01;//线程号 for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** 参数传入 ****/ ExtractPoint[i].HardwarePar.DeviceID = i; ExtractPoint[i].HardwarePar.GpuId = gWorkingGpuId[i]; ExtractPoint[i].HardwarePar.CUDAStreamNum = CUDAStreams; ExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.ExPointThreads; //sprintf_s(ExtractPoint[i].Devpar.ImgReadPath, "%s", gStructVarible.ImgReadPath); //sprintf_s(ExtractPoint[i].Devpar.ImgSavePath, "%s", gStructVarible.ImgSavePath); //sprintf_s(ExtractPoint[i].Devpar.DataReadPath, "%s", gStructVarible.DataReadPath); sprintf_s(ExtractPoint[i].Devpar.ImgReadPath, "%s", "C:\\pic\\img_read"); sprintf_s(ExtractPoint[i].Devpar.ImgSavePath, "%s", "C:\\pic\\img_write"); sprintf_s(ExtractPoint[i].Devpar.DataReadPath, "%s", "C:\\pic\\img_data"); ExtractPoint[i].Devpar.ImgBitDeep = gStructVarible.ImgBitDeep; ExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; ExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; ExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; ExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; ExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; ExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; ExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; ExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; ExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; ExtractPoint[i].Devpar.ImgChannelNum = gStructVarible.ImgBitDeep / 8; /**** 提取标志点过程 ****/ pExecutor->Execute(&ExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } pExecutor->Execute(&readimg, ThreadID); ThreadID = ThreadID << 1; pExecutor->Execute(&datarefresh, ThreadID); pExecutor->Terminate();//终止线程 delete pExecutor;//删除线程池 finish = clock();//计时结束 //得到两次记录之间的时间差 Difftime = (float)(finish - start) / CLOCKS_PER_SEC; Info->PointNumbers = SignPoint.PointNumbers; Info->ExtractPointTimes = Difftime; ImgChannel = gStructVarible.ImgBitDeep / 8;//图像通道数 ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; Info->ExtractPointSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; } else //矩形模式 { ThreadID = 0x01;//线程号 GetImgBoxHost(gStructVarible.ImgReadPath); /**** 提取标志点过程 ****/ for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** 参数传入 ****/ RecExtractPoint[i].HardwarePar.DeviceID = i; RecExtractPoint[i].HardwarePar.GpuId = gWorkingGpuId[i]; RecExtractPoint[i].HardwarePar.CUDAStreamNum = CUDAStreams; RecExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.DeviceCount; sprintf_s(RecExtractPoint[i].Devpar.ImgReadPath, "%s", gStructVarible.ImgReadPath); sprintf_s(RecExtractPoint[i].Devpar.ImgSavePath, "%s", gStructVarible.ImgSavePath); sprintf_s(RecExtractPoint[i].Devpar.DataReadPath, "%s", gStructVarible.DataReadPath); RecExtractPoint[i].Devpar.ImgBitDeep = gStructVarible.ImgBitDeep; RecExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; RecExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; RecExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; RecExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; RecExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; RecExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; RecExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; RecExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; RecExtractPoint[i].Devpar.ImgChannelNum = gStructVarible.ImgBitDeep / 8; pExecutor->Execute(&RecExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } pExecutor->Execute(&recupdate, ThreadID); ThreadID = ThreadID << 1; pExecutor->Execute(&readimg, ThreadID); ThreadID = ThreadID << 1; pExecutor->Execute(&datarefresh, ThreadID); pExecutor->Terminate(); //cout << "实验结束" << endl; delete pExecutor; } break; } case 2://单压缩 { CThreadPoolExecutor * pExecutor1 = new CThreadPoolExecutor(); pExecutor1->Init(1, HardwareParam.CompThreads + 2, 1); T *Compression_grey = new T[HardwareParam.CompThreads]; TC *Compression = new TC[HardwareParam.CompThreads]; ReadImg readimg; DataRefresh datarefresh; readimg.CompressionWorkingFlag = true; readimg.ExtractPointWorkingFlag = false; ThreadID = 0x01;//线程号重置 for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** 参数传入 ****/ Compression_grey[i].param.DeviceID = i; Compression_grey[i].param.GpuId = gWorkingGpuId[i]; Compression_grey[i].param.CompThreads = HardwareParam.CompThreads; Compression[i].param.DeviceID = i; Compression[i].param.GpuId = gWorkingGpuId[i]; Compression[i].param.CompThreads = HardwareParam.CompThreads; if (gStructVarible.ImgBitDeep == 8) { pExecutor1->Execute(&Compression_grey[i], ThreadID); ThreadID = ThreadID << 1; } else if (gStructVarible.ImgBitDeep == 24) { pExecutor1->Execute(&Compression[i], ThreadID); ThreadID = ThreadID << 1; } } pExecutor1->Execute(&datarefresh, ThreadID); ThreadID = ThreadID << 1; pExecutor1->Execute(&readimg, ThreadID); pExecutor1->Terminate(); delete pExecutor1; break; } case 3://提点&压缩 { CThreadPoolExecutor * pExecutor2 = new CThreadPoolExecutor(); //if (gStructVarible.RecModelFlag == 1)//qwt // pExecutor2->Init(1, HardwareParam.ExPointThreads + HardwareParam.CompThreads+2, 1); //else // pExecutor2->Init(1, HardwareParam.ExPointThreads + HardwareParam.CompThreads+1, 1); pExecutor2->Init(1, 10, 1); R *ExtractPoint = new R[HardwareParam.ExPointThreads]; RecR *RecExtractPoint = new RecR[HardwareParam.ExPointThreads]; T *Compression_grey = new T[HardwareParam.CompThreads]; TC *Compression = new TC[HardwareParam.CompThreads]; RecUpData recupdate; ReadImg readimg; DataRefresh datarefresh; readimg.CompressionWorkingFlag = true; readimg.ExtractPointWorkingFlag = true; ThreadID = 0x01;//线程号 if (gStructVarible.RecModelFlag == 0)//全图模式 { for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** 参数传入 ****/ ExtractPoint[i].HardwarePar.DeviceID = i; ExtractPoint[i].HardwarePar.GpuId = gWorkingGpuId[i]; ExtractPoint[i].HardwarePar.CUDAStreamNum = CUDAStreams; ExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.ExPointThreads; sprintf_s(ExtractPoint[i].Devpar.ImgReadPath, "%s", gStructVarible.ImgReadPath); sprintf_s(ExtractPoint[i].Devpar.ImgSavePath, "%s", gStructVarible.ImgSavePath); sprintf_s(ExtractPoint[i].Devpar.DataReadPath, "%s", gStructVarible.DataReadPath); ExtractPoint[i].Devpar.ImgBitDeep = gStructVarible.ImgBitDeep; ExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; ExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; ExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; ExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; ExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; ExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; ExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; ExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; ExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; ExtractPoint[i].Devpar.ImgChannelNum = gStructVarible.ImgBitDeep / 8; /**** 提取标志点过程 ****/ pExecutor2->Execute(&ExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; } } else //矩形模式 { GetImgBoxHost(gStructVarible.ImgReadPath); for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** 参数传入 ****/ RecExtractPoint[i].HardwarePar.DeviceID = i; RecExtractPoint[i].HardwarePar.GpuId = gWorkingGpuId[i]; RecExtractPoint[i].HardwarePar.CUDAStreamNum = CUDAStreams; RecExtractPoint[i].HardwarePar.DeviceCount = HardwareParam.ExPointThreads; sprintf_s(RecExtractPoint[i].Devpar.ImgReadPath, "%s", gStructVarible.ImgReadPath); sprintf_s(RecExtractPoint[i].Devpar.ImgSavePath, "%s", gStructVarible.ImgSavePath); sprintf_s(RecExtractPoint[i].Devpar.DataReadPath, "%s", gStructVarible.DataReadPath); RecExtractPoint[i].Devpar.ImgBitDeep = gStructVarible.ImgBitDeep; RecExtractPoint[i].Devpar.ImgHeight = gStructVarible.ImgHeight; RecExtractPoint[i].Devpar.ImgWidth = gStructVarible.ImgWidth; RecExtractPoint[i].Devpar.Threshold = gStructVarible.Threshold; RecExtractPoint[i].Devpar.LengthMin = gStructVarible.LengthMin; RecExtractPoint[i].Devpar.LengthMax = gStructVarible.LengthMax; RecExtractPoint[i].Devpar.AreaMin = gStructVarible.AreaMin; RecExtractPoint[i].Devpar.AreaMax = gStructVarible.AreaMax; RecExtractPoint[i].Devpar.PictureNum = gStructVarible.PictureNum; RecExtractPoint[i].Devpar.PicBlockSize = gStructVarible.PicBlockSize; RecExtractPoint[i].Devpar.ImgChannelNum = gStructVarible.ImgBitDeep / 8; /**** 提取标志点过程 ****/ pExecutor2->Execute(&RecExtractPoint[i], ThreadID); ThreadID = ThreadID << 1; pExecutor2->Execute(&recupdate, ThreadID); ThreadID = ThreadID << 1; } } //压缩线程 for (int i = 0; i < HardwareParam.ExPointThreads; i++) { /**** 参数传入 ****/ Compression_grey[i].param.DeviceID = i; Compression_grey[i].param.GpuId = gWorkingGpuId[i]; Compression_grey[i].param.CompThreads = HardwareParam.CompThreads; Compression[i].param.DeviceID = i; Compression[i].param.GpuId = gWorkingGpuId[i]; Compression[i].param.CompThreads = HardwareParam.CompThreads; if (gStructVarible.ImgChannelNum == 1) { pExecutor2->Execute(&Compression_grey[i], ThreadID); ThreadID = ThreadID << 1; } else if (gStructVarible.ImgChannelNum == 3) { pExecutor2->Execute(&Compression[i], ThreadID); ThreadID = ThreadID << 1; } } //数据生成+读图线程 pExecutor2->Execute(&readimg, ThreadID); ThreadID = ThreadID << 1; pExecutor2->Execute(&datarefresh, ThreadID); pExecutor2->Terminate(); delete pExecutor2; break; } default: return 1; } return 0; } /************************************************* 函数名称: OnlineImageRefresh // 函数描述: 在线实验读取缓冲区图像; // 输入参数:空// 输出参数:空// 返回值 : 空// 其他说明: // *************************************************/ IMGSIMULATION_API int OnlineImageRefresh(unsigned char *pImg) { if (gCameraBuffer[0] == NULL) return 1; //pImg指针内存在界面端申请,大小为单张图像大小 memcpy(pImg, gCameraBuffer[0], gStructVarible.ImgWidth * gStructVarible.ImgHeight * gStructVarible.ImgChannelNum * sizeof(unsigned char)); return 0; } /************************************************* 函数名称: OfflineImageExperiment // 函数描述: 离线实验模块; // 输入参数:const char *Imgpath :离线实验图像路径; 输出参数:Infomation *Info : 离线实验数据;// 返回值 : bool -- 实验成功标志位// 其他说明: 离线实验只是对单张图像的重算过程, . 没有图像压缩的步骤;// *************************************************/ IMGSIMULATION_API bool OfflineImageExperiment(const char *Imgpath, Infomation *Info) { cudaError_t err; int mWidth, mHeight; char strFilename[100]; clock_t start, finish; float Difftime;//时间差 float ImageSize;//图像尺寸 int ImgChannel;//图像通道 for (int i = 0; i<5; i++) { sprintf_s(strFilename, "%s", Imgpath); //将图片的路径名动态的写入到strFilename这个地址的内存空间 cudaHostAlloc((void**)&gHostImage[i], gStructVarible.ImgHeight * gStructVarible.ImgWidth * sizeof(unsigned char), cudaHostAllocDefault); if (gStructVarible.ImgBitDeep == 24) { gHostColorImage[i] = new unsigned char[gStructVarible.ImgHeight * gStructVarible.ImgWidth * 3]; } RmwRead8BitBmpFile2Img(strFilename, gHostColorImage[i], gHostImage[i], &mWidth, &mHeight); } gHostPathImgNumber = 5;//最低处理张数 /**** 单提点测试****/ CThreadPoolExecutor * pExecutor = new CThreadPoolExecutor(); pExecutor->Init(1, 1, 1); R ExtractPoint; RecR RecExtractPoint; if (gStructVarible.RecModelFlag == 0)//全图模式 { start = clock(); //计时开始 /**** 参数传入 ****/ ExtractPoint.HardwarePar.DeviceID = 0; //ExtractPoint.Devpar.DataReadPath = "C:\\pic\\img_data"; ExtractPoint.Devpar.ImgHeight = gStructVarible.ImgHeight; ExtractPoint.Devpar.ImgWidth = gStructVarible.ImgWidth; ExtractPoint.Devpar.Threshold = gStructVarible.Threshold; ExtractPoint.Devpar.LengthMin = gStructVarible.LengthMin; ExtractPoint.Devpar.LengthMax = gStructVarible.LengthMax; ExtractPoint.Devpar.AreaMin = gStructVarible.AreaMin; ExtractPoint.Devpar.AreaMax = gStructVarible.AreaMax; ExtractPoint.Devpar.PictureNum = gStructVarible.PictureNum; ExtractPoint.Devpar.PicBlockSize = gStructVarible.PicBlockSize; /**** 提取标志点过程 ****/ pExecutor->Execute(&ExtractPoint, 0x01); pExecutor->Terminate();//终止线程 delete pExecutor;//删除线程池 finish = clock();//计时结束 //得到两次记录之间的时间差 Difftime = (float)(finish - start) / CLOCKS_PER_SEC; Info->PointNumbers = SignPoint.PointNumbers; Info->ExtractPointTimes = Difftime; ImgChannel = gStructVarible.ImgBitDeep / 8;//图像通道数 ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; Info->ExtractPointSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; } else //矩形模式 { start = clock(); //计时开始 /**** 参数传入 ****/ RecExtractPoint.HardwarePar.DeviceID = 0; //RecExtractPoint.Devpar.DataReadPath = "C:\\pic\\img_data"; RecExtractPoint.Devpar.ImgHeight = gStructVarible.ImgHeight; RecExtractPoint.Devpar.ImgWidth = gStructVarible.ImgWidth; RecExtractPoint.Devpar.Threshold = gStructVarible.Threshold; RecExtractPoint.Devpar.LengthMin = gStructVarible.LengthMin; RecExtractPoint.Devpar.LengthMax = gStructVarible.LengthMax; RecExtractPoint.Devpar.AreaMin = gStructVarible.AreaMin; RecExtractPoint.Devpar.AreaMax = gStructVarible.AreaMax; RecExtractPoint.Devpar.PictureNum = gStructVarible.PictureNum; RecExtractPoint.Devpar.PicBlockSize = gStructVarible.PicBlockSize; /**** 提取标志点过程 ****/ pExecutor->Execute(&RecExtractPoint, 0x01); pExecutor->Terminate(); delete pExecutor; finish = clock();//计时结束 //得到两次记录之间的时间差 Difftime = (float)(finish - start) / CLOCKS_PER_SEC; Info->PointNumbers = SignPoint.PointNumbers; Info->ExtractPointTimes = Difftime; ImgChannel = gStructVarible.ImgBitDeep / 8;//图像通道数 ImageSize = gStructVarible.ImgHeight * gStructVarible.ImgWidth * ImgChannel / 1024 / 1024; Info->ExtractPointSpeed = ImageSize * gHostPathImgNumber * gStructVarible.PictureNum / Difftime; } for (int i = 0; i<5; i++) { err = cudaFreeHost(gHostImage[i]); if (gStructVarible.ImgBitDeep == 24) { err = cudaFreeHost(gHostColorImage[i]); } } return 0; } /************************************************* 函数名称: SinglePictureExtractPoint // 函数描述: 离线模式提点函数; // 输入参数:const char *Imgpath 离线时需要进行提点操作的图像路径 const char *outputPath 提取出的特征文件输出的路径 // 输出参数:无 // 返回值 : 无 // 其他说明: 函数输出的特征文件绝对路径名称为 outputPath\\OffLine.bin // . *************************************************/ IMGSIMULATION_API bool SinglePictureExtractPoint(const char *Imgpath, const char*outputPath) { char strfilename[255]; Parameter Devpar; Devpar.ImgHeight = gStructVarible.ImgHeight; Devpar.ImgWidth = gStructVarible.ImgWidth; Devpar.Threshold = gStructVarible.Threshold; Devpar.LengthMin = gStructVarible.LengthMin; Devpar.LengthMax = gStructVarible.LengthMax; Devpar.AreaMin = gStructVarible.AreaMin; Devpar.AreaMax = gStructVarible.AreaMax; Devpar.PictureNum = 1; Devpar.PicBlockSize = gStructVarible.PicBlockSize; Devpar.ImgChannelNum = gStructVarible.ImgChannelNum; Devpar.ImgMakeborderWidth = (Devpar.ImgWidth + 127) / 128 * 128; Devpar.ColThreadNum = (Devpar.ImgMakeborderWidth / Devpar.PicBlockSize + 127) / 128 * 128; Devpar.RowThreadNum = Devpar.ImgHeight*Devpar.PictureNum / Devpar.PicBlockSize; // 线程配置定义 dim3 mGrid1(Devpar.ImgMakeborderWidth / 128, Devpar.ImgHeight*Devpar.PictureNum, 1); dim3 mGrid2(Devpar.ColThreadNum / 128, Devpar.RowThreadNum, 1); //读取图片 unsigned char *tHostImage; cudaHostAlloc((void**)&tHostImage, Devpar.ImgHeight * Devpar.ImgWidth *Devpar.ImgChannelNum *Devpar.PictureNum * sizeof(unsigned char), cudaHostAllocDefault); if (Devpar.ImgChannelNum == 1) { RmwRead8BitBmpFile2Img(Imgpath, NULL, tHostImage, &Devpar.ImgWidth, &Devpar.ImgHeight); } else { RmwRead8BitBmpFile2Img(Imgpath, tHostImage, NULL, &Devpar.ImgWidth, &Devpar.ImgHeight); } //------------------------------------------------------------------------------------------------------------------------------- unsigned char * tDevColorImage; unsigned char * tDevGrayImage; unsigned char * tDevpad; unsigned char * tDev2val; unsigned char * tDevcounter; cudaMalloc((void**)&tDevColorImage, sizeof(unsigned char)* Devpar.ImgWidth* Devpar.ImgHeight*Devpar.ImgChannelNum*Devpar.PictureNum); cudaMalloc((void**)&tDevGrayImage, sizeof(unsigned char)* Devpar.ImgWidth* Devpar.ImgHeight*Devpar.PictureNum); cudaMalloc((void**)&tDevpad, sizeof(unsigned char)* Devpar.ImgMakeborderWidth* Devpar.ImgHeight*Devpar.PictureNum); cudaMalloc((void**)&tDev2val, sizeof(unsigned char)* Devpar.ImgMakeborderWidth* Devpar.ImgHeight*Devpar.PictureNum); cudaMalloc((void**)&tDevcounter, sizeof(unsigned char)* Devpar.ImgMakeborderWidth* Devpar.ImgHeight*Devpar.PictureNum); //设备端显存申请 short * tDevRecXLeft; short * tDevRecYLeft; short * tDevRecXRight; short * tDevRecYRight; short * tDevLength; short * tDevArea; double *tDevXpos; double *tDevYpos; short *tDevIndex; cudaMalloc((void**)&tDevRecXLeft, Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));//方位盒 xmin cudaMalloc((void**)&tDevRecYLeft, Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// ymin cudaMalloc((void**)&tDevRecXRight, Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// xmax cudaMalloc((void**)&tDevRecYRight, Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// ymax cudaMalloc((void**)&tDevLength, Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));//设备端输出 周长 cudaMalloc((void**)&tDevArea, Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));// 面积 cudaMalloc((void**)&tDevXpos, Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(double));// xpos cudaMalloc((void**)&tDevYpos, Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(double));// ypos cudaMalloc((void**)&tDevIndex, Devpar.ColThreadNum*Devpar.RowThreadNum * sizeof(short));//提取特征有效标志 //输出空间申请 short * tHostRecXLeft = new short[Devpar.ColThreadNum*Devpar.RowThreadNum]; short * tHostRecYLeft = new short[Devpar.ColThreadNum*Devpar.RowThreadNum]; short * tHostRecXRight = new short[Devpar.ColThreadNum*Devpar.RowThreadNum]; short * tHostRecYRight = new short[Devpar.ColThreadNum*Devpar.RowThreadNum]; short * tHostLength = new short[Devpar.ColThreadNum*Devpar.RowThreadNum]; short * tHostArea = new short[Devpar.ColThreadNum*Devpar.RowThreadNum]; double * tHostXpos = new double[Devpar.ColThreadNum*Devpar.RowThreadNum]; double * tHostYpos = new double[Devpar.ColThreadNum*Devpar.RowThreadNum]; short * tHostIndex = new short[Devpar.ColThreadNum*Devpar.RowThreadNum]; //核函数执行 if (Devpar.ImgChannelNum == 1) { cudaMemcpy(tDevGrayImage, tHostImage, sizeof(unsigned char)* Devpar.ImgHeight *Devpar.ImgWidth*Devpar.ImgChannelNum*Devpar.PictureNum, cudaMemcpyHostToDevice); //执行灰度化,二值化核函数程序 GrayMakeBorder << <mGrid1, 128 >> > (tDevGrayImage, tDevpad, Devpar); } else { cudaMemcpy(tDevColorImage, tHostImage, sizeof(unsigned char)* Devpar.ImgHeight *Devpar.ImgWidth*Devpar.ImgChannelNum*Devpar.PictureNum, cudaMemcpyHostToDevice); ColorMakeBorder << <mGrid1, 128 >> > (tDevColorImage, tDevpad, Devpar); } //执行灰度化,二值化核函数程序 Binarization << <mGrid1, 128 >> > (tDevpad, tDev2val, tDevcounter, Devpar); //边界提取 Dilation << <mGrid1, 128 >> > (tDev2val, tDevcounter, Devpar); cudaMemcpy(tDev2val, tDevcounter, sizeof(unsigned char)* Devpar.ImgHeight *Devpar.ImgMakeborderWidth*Devpar.PictureNum, cudaMemcpyDeviceToDevice); Erosion << <mGrid1, 128 >> > (tDev2val, tDevcounter, Devpar); //提取周长和包围盒 GetCounter << <mGrid2, 128 >> > (tDevcounter, tDevLength, tDevRecXLeft, tDevRecYLeft, tDevRecXRight, tDevRecYRight, Devpar);//提取轮廓的函数 SelectTrueBox << <mGrid2, 128 >> >(tDevcounter, tDevLength, tDevRecXLeft, tDevRecYLeft, tDevRecXRight, tDevRecYRight, tDevIndex, Devpar); SelectNonRepeatBox << <mGrid2, 128 >> > (tDevRecXLeft, tDevRecYLeft, tDevIndex, Devpar); GetNonRepeatBox << <mGrid2, 128 >> >(tDevRecXLeft, tDevRecYLeft, tDevIndex, Devpar); GetInfo << <mGrid2, 128 >> > (tDevpad, tDevIndex, tDevRecXLeft, tDevRecYLeft, tDevRecXRight, tDevRecYRight, tDevXpos, tDevYpos, tDevArea, Devpar); //拷贝输出结果 cudaMemcpy(tHostLength, tDevLength, sizeof(short)* Devpar.ColThreadNum * Devpar.RowThreadNum, cudaMemcpyDeviceToHost); cudaMemcpy(tHostArea, tDevArea, sizeof(short)* Devpar.ColThreadNum * Devpar.RowThreadNum, cudaMemcpyDeviceToHost); cudaMemcpy(tHostXpos, tDevXpos, sizeof(double)* Devpar.ColThreadNum * Devpar.RowThreadNum, cudaMemcpyDeviceToHost); cudaMemcpy(tHostYpos, tDevYpos, sizeof(double)* Devpar.ColThreadNum * Devpar.RowThreadNum, cudaMemcpyDeviceToHost); cudaMemcpy(tHostIndex, tDevIndex, sizeof(short)* Devpar.ColThreadNum * Devpar.RowThreadNum, cudaMemcpyDeviceToHost); vector<CircleInfo>myInfo; int mtempindex = 0; for (int j = 0; j <Devpar.ColThreadNum * Devpar.RowThreadNum; j++) { if (tHostIndex[j] != 0) { CircleInfo temp; mtempindex++; temp.index = (short)mtempindex; temp.length = tHostLength[j]; temp.area = tHostArea[j]; temp.xpos = tHostXpos[j]; temp.ypos = tHostYpos[j]; myInfo.push_back(temp); } } if (myInfo.size() > 0) { FILE* fp; sprintf(strfilename, "%s\\OffLine.bin", outputPath); //【3】将图片的路径名动态的写入到strFilename这个地址的内存空间 fp = fopen(strfilename, "wb"); fwrite(&myInfo[0], sizeof(CircleInfo)*myInfo.size(), 1, fp); fclose(fp); } //释放内存 cudaFreeHost(tHostImage); cudaFree(tDevRecXLeft); cudaFree(tDevRecYLeft); cudaFree(tDevRecXRight); cudaFree(tDevRecYRight); cudaFree(tDevLength); cudaFree(tDevArea); cudaFree(tDevXpos); cudaFree(tDevYpos); cudaFree(tDevIndex); cudaFree(tDevColorImage); cudaFree(tDevGrayImage); cudaFree(tDevpad); cudaFree(tDev2val); cudaFree(tDevcounter); delete[]tHostRecXLeft; delete[]tHostRecYLeft; delete[]tHostRecXRight; delete[] tHostRecYRight; delete[]tHostLength; delete[]tHostArea; delete[]tHostXpos; delete[]tHostYpos; delete[]tHostIndex; return 0; } /************************************************* 函数名称: DrawPointFlag // 函数描述: 标志点重绘; // 输入参数:const char *pathBin :标志点的特征信息文件; . const char *pathImg : 读入图像路径;// 输出参数:const char *pathWrite : 写出图像路径;// 返回值 : 空// 其他说明: 函数把从图像中提取的标志点数据重新标记 . 到图像原位置上,并输出标记后的图像 . 标记形式为红色十字形式;// *************************************************/ IMGSIMULATION_API void DrawPointFlag(const char *pathBin, const char *pathImg, const char *pathWrite) { //读取特征 FILE *fr; fr = fopen(pathBin, "rb"); //获取文件大小 fseek(fr, 0, SEEK_END);//设置文件指针stream的位置为文件结尾 long lSize = ftell(fr);//获取数据长度 rewind(fr);//设置文件指针stream的位置为给定流的文件开头 //开辟输出空间 int FlagSize = lSize / sizeof(CircleInfo); CircleInfo *RInfo = (CircleInfo*)malloc(sizeof(CircleInfo)*FlagSize); //读取文件数据 fread(RInfo, sizeof(CircleInfo), FlagSize, fr); fclose(fr); //绘制标志点十字架 Mat Img = imread(pathImg, IMREAD_COLOR); cv::Vec3b pflag(0, 0, 255); for (int i = 0; i < FlagSize; i++) { CircleInfo myinfo = RInfo[i]; Img.at<Vec3b>(myinfo.xpos, myinfo.ypos) = pflag; if (myinfo.xpos - 3 >= 0) { Img.at<Vec3b>(myinfo.xpos - 1, myinfo.ypos) = pflag; Img.at<Vec3b>(myinfo.xpos - 2, myinfo.ypos) = pflag; Img.at<Vec3b>(myinfo.xpos - 3, myinfo.ypos) = pflag; } if (myinfo.xpos + 3 <= gStructVarible.ImgHeight) { Img.at<Vec3b>(myinfo.xpos + 1, myinfo.ypos) = pflag; Img.at<Vec3b>(myinfo.xpos + 2, myinfo.ypos) = pflag; Img.at<Vec3b>(myinfo.xpos + 3, myinfo.ypos) = pflag; } if (myinfo.xpos - 3 >= 0) { Img.at<Vec3b>(myinfo.xpos, myinfo.ypos - 1) = pflag; Img.at<Vec3b>(myinfo.xpos, myinfo.ypos - 2) = pflag; Img.at<Vec3b>(myinfo.xpos, myinfo.ypos - 3) = pflag; } if (myinfo.ypos + 3 <= gStructVarible.ImgWidth) { Img.at<Vec3b>(myinfo.xpos, myinfo.ypos + 1) = pflag; Img.at<Vec3b>(myinfo.xpos, myinfo.ypos + 2) = pflag; Img.at<Vec3b>(myinfo.xpos, myinfo.ypos + 3) = pflag; } } imwrite(pathWrite, Img); free(RInfo); } /************************************************* 函数名称: Memory_application // 函数描述: 全局内存申请; // 输入参数:空// 输出参数:空// 返回值 : 空// 其他说明: 函数按图像尺寸申请所需的全局内存;// *************************************************/ IMGSIMULATION_API void Memory_application() { compress_old_Width = gStructVarible.ImgWidth; compress_old_Height = gStructVarible.ImgHeight * gStructVarible.PictureNum; //imgWidth = gStructVarible.ImgWidth; //从前端界面设置获取图片长宽 //imgHeight = gStructVarible.ImgHeight * gStructVarible.PictureNum; compress_imgWidth = (compress_old_Width + 7) / 8 * 8; compress_imgHeight = (compress_old_Height + 7) / 8 * 8; //从这里开始压缩注释 compressratio = gStructVarible.CompressionRatio; //从前端界面设置获取压缩比 int bmpSize = compress_imgWidth * compress_imgHeight; gpHudata = new unsigned char[bmpSize]; //灰度图片的色差值是确定的,提前设置好 gpHvdata = new unsigned char[bmpSize]; memset(gpHudata, 128, compress_imgHeight * compress_imgWidth); memset(gpHvdata, 128, compress_imgHeight * compress_imgWidth); blocks.x = compress_imgWidth / 8; //设置cuda压缩程序的blocks为(imgWidth / 8,imgHeight / 8) blocks.y = compress_imgHeight / 8; blocks.z = 1; quantityassgnment(); //初始化主机端的全局变量 /*申请数据缓冲区*/ //相机采集卡所对应内存 gCameraDress= (unsigned char*)malloc(gStructVarible.ImgWidth*gStructVarible.ImgHeight *gStructVarible.ImgChannelNum * sizeof(unsigned char) * Bufferlength* HardwareParam.DeviceCount * 2); for (int i = 0; i < HardwareParam.DeviceCount * 2; i++) { gCameraBuffer[i] = gCameraDress + i*gStructVarible.ImgWidth*gStructVarible.ImgHeight *gStructVarible.ImgChannelNum * sizeof(unsigned char) * Bufferlength; } //压缩缓冲区 for (int i = 0; i < HardwareParam.DeviceCount + 1; i++) { gHostComressiongBuffer[i] = (unsigned char*)malloc(gStructVarible.ImgWidth*gStructVarible.ImgHeight *gStructVarible.ImgChannelNum * sizeof(unsigned char) * Bufferlength); } //缓冲区页锁内存 for (int i = 0; i < HardwareParam.DeviceCount + 1; i++) { cudaHostAlloc((void**)&gHostBuffer[i], gStructVarible.ImgWidth*gStructVarible.ImgHeight *gStructVarible.ImgChannelNum * sizeof(unsigned char)*Bufferlength, cudaHostAllocDefault); } //矩形盒数据内存 gRecupImgData = (unsigned char*)malloc(gStructVarible.ImgWidth*gStructVarible.ImgHeight *gStructVarible.PictureNum*gStructVarible.ImgChannelNum * sizeof(unsigned char)); } /************************************************* 函数名称: Memory_release // 函数描述: 全局内存释放; // 输入参数:空// 输出参数:空// 返回值 : 空// 其他说明: 函数按图像尺寸释放所需的全局内存;// *************************************************/ IMGSIMULATION_API void Memory_release() { free(gCameraDress); gCameraDress = NULL; for (int i = 0; i < HardwareParam.DeviceCount * 2; i++) { //free(gCameraBuffer[i]); gCameraBuffer[i] = NULL; } for (int i = 0; i < HardwareParam.DeviceCount + 1; i++) { cudaFreeHost(gHostBuffer[i]); free(gHostComressiongBuffer[i]); gHostComressiongBuffer[i] = NULL; } free(gRecupImgData); delete[]gpHudata; delete[]gpHvdata;//qwt这里要出现错误 } /************************************************* 函数名称: SetCameraPar // 函数描述: 相机参数设置; // 输入参数:int ScrBufferlength : 图片缓冲区的长度(图片张数); . 输出参数:null;// 返回值 : bool -- 参数传递成功标志位// 其他说明: 函数用于将界面设置的参数导入DLL的参数中;// *************************************************/ IMGSIMULATION_API bool SetCameraPar(int ScrBufferlength) { Bufferlength = ScrBufferlength; return 0; } /************************************************* 函数名称: SetParameter // 函数描述: 参数传递; // 输入参数:Parameter *info : 界面设置的结构体参数; . int len : 待传递参数个数;// 输出参数:Parameter gStructVarible : 运行时的结构体参数;// 返回值 : bool -- 参数传递成功标志位// 其他说明: 函数用于将界面设置的参数导入DLL的参数中;// *************************************************/ IMGSIMULATION_API bool SetParameter(Parameter *info, int len) { char count = 0; if (info->ImgReadPath != NULL) { //gStructVarible.ImgReadPath = info->ImgReadPath; sprintf_s(gStructVarible.ImgReadPath, "%s//1.bmp", info->ImgReadPath); count++; } if (info->ImgSavePath != NULL) { //gStructVarible.ImgSavePath = info->ImgSavePath; sprintf_s(gStructVarible.ImgSavePath, "%s", info->ImgSavePath); count++; } if (info->DataReadPath != NULL) { //gStructVarible.DataReadPath = info->DataReadPath; sprintf_s(gStructVarible.DataReadPath, "%s", info->DataReadPath); count++; } if (info->ImgBitDeep != -1) { gStructVarible.ImgBitDeep = info->ImgBitDeep; count++; } if (info->ImgChannelNum != -1) { gStructVarible.ImgChannelNum = info->ImgChannelNum; count++; } if (info->ImgHeight != -1) { gStructVarible.ImgHeight = info->ImgHeight; count++; } if (info->ImgWidth != -1) { gStructVarible.ImgWidth = info->ImgWidth; count++; } if (info->Threshold != -1) { gStructVarible.Threshold = info->Threshold; count++; } if (info->LengthMin != -1) { gStructVarible.LengthMin = info->LengthMin; count++; } if (info->LengthMax != -1) { gStructVarible.LengthMax = info->LengthMax; count++; } if (info->PicBlockSize != -1) { gStructVarible.PicBlockSize = info->PicBlockSize; count++; } if (info->AreaMin != -1) { gStructVarible.AreaMin = info->AreaMin; count++; } if (info->AreaMax != -1) { gStructVarible.AreaMax = info->AreaMax; count++; } if (info->CompressionRatio != -1) { gStructVarible.CompressionRatio = info->CompressionRatio; count++; } if (info->PictureNum != -1) { gStructVarible.PictureNum = info->PictureNum; count++; } if (info->TerminateFlag != -1) { gStructVarible.TerminateFlag = info->TerminateFlag; if (gStructVarible.TerminateFlag == 1) { ExtractPointSuccess = true;//实验结束 } else { ExtractPointSuccess = false;//标志位复位 } count++; } if (info->RecModelFlag != -1) { gStructVarible.RecModelFlag = info->RecModelFlag; count++; } if (info->RecPadding != -1) { gStructVarible.RecPadding = info->RecPadding; count++; } gStructVarible.ImgChannelNum = gStructVarible.ImgBitDeep / 8;//通道数=位深度/8 //传参个数校验 if (count == len) return true; return false; } /************************************************* 函数名称: GetParameter // 函数描述: 获取界面传入参数结构体参数值; // 输入参数:Parameter *info : 界面传入参数结构体; 输出参数:Parameter gStructVarible : 运行时的结构体参数;// 返回值 : NULL // 其他说明: 函数用于读取界面传入参数结构体参数;// *************************************************/ IMGSIMULATION_API void GetParameter(Parameter *info) { /*>1<*/sprintf_s(info->ImgReadPath, "%s", gStructVarible.ImgReadPath); /*>2<*/sprintf_s(info->ImgSavePath, "%s", gStructVarible.ImgSavePath); /*>3<*/sprintf_s(info->DataReadPath, "%s", gStructVarible.DataReadPath); /*>4<*/info->ImgBitDeep = gStructVarible.ImgBitDeep; /*>5<*/info->ImgChannelNum = gStructVarible.ImgChannelNum; /*>6<*/info->ImgHeight = gStructVarible.ImgHeight; /*>7<*/info->ImgWidth = gStructVarible.ImgWidth; /*>8<*/info->ImgMakeborderWidth = gStructVarible.ImgMakeborderWidth; /*>9<*/info->Threshold = gStructVarible.Threshold; /*>10<*/info->LengthMin = gStructVarible.LengthMin; /*>11<*/info->LengthMax = gStructVarible.LengthMax; /*>12<*/info->PicBlockSize = gStructVarible.PicBlockSize; /*>13<*/info->ColThreadNum = gStructVarible.ColThreadNum; /*>14<*/info->RowThreadNum = gStructVarible.RowThreadNum; /*>15<*/info->AreaMin = gStructVarible.AreaMin; /*>16<*/info->AreaMax = gStructVarible.AreaMax; /*>17<*/info->CompressionRatio = gStructVarible.CompressionRatio; /*>18<*/info->PictureNum = gStructVarible.PictureNum; /*>19<*/info->TerminateFlag = gStructVarible.TerminateFlag; /*>20<*/info->RecModelFlag = gStructVarible.RecModelFlag; /*>21<*/info->RecPadding = gStructVarible.RecPadding; } /************************************************* 函数名称: ClearDataCache // 函数描述: 缓存清空函数;将DLL库中的全局变量、静态变量等全部设置成为初始化状态 // 输入参数:无 // 输出参数:无 // 返回值 : 无 // 其他说明: 无 // . *************************************************/ IMGSIMULATION_API void ClearDataCache() { //全局变量情况 sprintf_s(gStructVarible.ImgReadPath, "%s//1.bmp", "C://pic//img_read"); sprintf_s(gStructVarible.ImgSavePath, "%s", "C://pic//img_write"); sprintf_s(gStructVarible.DataReadPath, "%s", "C://pic//img_data"); gStructVarible.AreaMax = 99999; gStructVarible.AreaMin = 0; gStructVarible.ColThreadNum = 320; gStructVarible.CompressionRatio = 2000; gStructVarible.ImgChannelNum = 1; gStructVarible.ImgHeight = 5120; gStructVarible.ImgMakeborderWidth = 5120; gStructVarible.ImgWidth = 5120; gStructVarible.LengthMax = 99999; gStructVarible.LengthMin = 0; gStructVarible.PicBlockSize = 16; gStructVarible.PictureNum = 1; gStructVarible.RecModelFlag = false; gStructVarible.RecPadding = 4; gStructVarible.RowThreadNum = 320; gStructVarible.Threshold = 60; gStructVarible.TerminateFlag = 0; //类静态变量 R::mRindex = 0; RecR::mRecindex = 0; T::mTindex = 0; TC::mTCindex = 0; //全局 Bufferlength = 50; ExtractPointInitialSuccessFlag[0] = false; ExtractPointInitialSuccessFlag[1] = false; ExtractPointInitialSuccessFlag[2] = false; ExtractPointSuccess = false; //矩形盒数据 gHostRecData.clear(); gRecNum = gHostRecData.size(); gSingleImgRecNum = gHostRecData.size(); gRecupImgData = NULL; DevUpdateRec[0] = false; DevUpdateRec[1] = false; DevUpdateRec[2] = false; HostUpdateRec = false; RecupdataInitialSuccessFlag = false; //相机缓冲 for (int i = 0; i < 6; i++) { BufferBlockIndex[i] = 0; gCameraBuffer[i] = false; CameraBufferFull[i] = false; } for (int i = 0; i<4; i++) { //页锁内存缓冲区 gHostBuffer[i] = NULL; PageLockBufferEmpty[i] = true; PageLockBufferWorking[i] = false; PageLockBufferStartIndex[i] = 0; //压缩缓冲 gHostComressiongBuffer[i] = NULL; gComressionBufferEmpty[i] = true; gComressionBufferWorking[i] = false; gComressionBufferStartIndex[i] = 0; } } /****************************解压特征文件***************************************************/ /************************************************* 函数名称: GetFiles // 函数描述: 遍历某一文件夹内的所有特征文件的路径; 函数将一个包含多张图像特征的特征文件(.bin文件)分解为多个特征文件(.bin),每张图片对应一个特征文件 // 输入参数:const char * path 包含多个.bin文件的文件夹路径 输出参数: vector<string>& files ;将文件夹之内的bin文件路径以string方式存起来 // 返回值 : 无 // 其他说明: 无 // . *************************************************/ IMGSIMULATION_API void GetFiles(const char * path, vector<string>& files) { //文件句柄 intptr_t hFile = 0; //文件信息,声明一个存储文件信息的结构体 struct __finddata64_t fileinfo; string p;//字符串,存放路径 if ((hFile = _findfirst64(p.assign(path).append("\\*").c_str(), &fileinfo)) != -1)//若查找成功,则进入 { do { //如果是目录,迭代之(即文件夹内还有文件夹) if ((fileinfo.attrib & _A_SUBDIR)) { //文件名不等于"."&&文件名不等于".." //.表示当前目录 //..表示当前目录的父目录 //判断时,两者都要忽略,不然就无限递归跳不出去了! if (strcmp(fileinfo.name, ".") != 0 && strcmp(fileinfo.name, "..") != 0) GetFiles(p.assign(path).append("\\").append(fileinfo.name).c_str(), files); } //如果不是,加入列表 else { files.push_back(p.assign(path).append("\\").append(fileinfo.name)); } } while (_findnext64(hFile, &fileinfo) == 0); //_findclose函数结束查找 _findclose(hFile); } } /************************************************* 函数名称: UnzipFeatureBins // 函数描述: 解包函数;函数将一个包含多张图像特征的特征文件(.bin文件)分解为多个特征文件(.bin),每张图片对应一个特征文件 // 输入参数: const char *InputPath 包含多张图片特征的特征文件(50张,跟bufferlenth有关) const char *OutputFilename 输出特征文件的文件夹 // 输出参数:无 // 返回值 : 无 // 其他说明: 无 // . *************************************************/ void UnzipFeatureBins(const char *InputPath, const char *OutputFilename) { char strFilename[255]; FILE *fr; fr = fopen(InputPath, "rb"); if (fr == NULL)//若图像打不开,则return { cout << "FILE fail open" << endl; return; } fseek(fr, 0, SEEK_END); long lSize = ftell(fr);//获取数据长度 rewind(fr); int Datalength = lSize / sizeof(CircleInfo); CircleInfo *RInfo = (CircleInfo*)malloc(sizeof(CircleInfo)*Datalength); fread(RInfo, sizeof(CircleInfo), Datalength, fr); fclose(fr); //获取数据总个数 int Dataoffset = 0; int Dataindex = 0; while (Dataoffset < Datalength) { CircleInfo mHead = RInfo[Dataoffset]; Dataoffset++; int mlen = 0; if (mHead.area == 0 && int(mHead.xpos) == 99999)//判断头特征 { Dataindex = mHead.index; mlen = mHead.length; if (mlen > 0 && Dataoffset + mlen <= Datalength) { FILE* fp; sprintf_s(strFilename, "%s\\%d.bin", OutputFilename, Dataindex); //【3】将图片的路径名动态的写入到strFilename这个地址的内存空间 fp = fopen(strFilename, "wb"); fwrite(&RInfo[Dataoffset], sizeof(CircleInfo)*mlen, 1, fp); fclose(fp); Dataoffset = Dataoffset + mlen; } } } }; /************************************************* 函数名称: UnzipMultiFeatureBins // 函数描述: 解包函数;将选择的多个特征压缩文件解包 // 输入参数: const char *Filepath 文件解压路径 const int arrsize 压缩文件个数 const char* Binfile[] 压缩文件名数组 输出参数:无 // 返回值 : 无 // 其他说明: 无 // . *************************************************/ IMGSIMULATION_API void UnzipMultiFeatureBins(const char* Filepath, const int arrsize, const int BinfileIndex[]) { if (arrsize == 0)//无输入 return; char Binfile[200];//压缩文件名 //剩余压缩文件个数不足5个时,单独解压 for (int index = 0; index < arrsize; index++) { sprintf(Binfile, "%s//%d%s", Filepath, BinfileIndex[index], ".bin"); UnzipFeatureBins(Binfile, Filepath); } delete[] Binfile; } /************************************************* 函数名称: UnzipFeatureFiles // 函数描述: 解包函数;将某一个文件夹下的大特征文件(包含多张图片特征的文件.bin文件)解压成单个特征文件。 在GPU端提点所生成的特征文件是一个特征文件包含bufferlenth张图的特征,该函数将这个大特征文件 分解成多个bufferlenth个图片。 输入参数: const char *Filepath 包含特征文件的的文件夹 输出参数:无 // 返回值 : 无 // 其他说明: 无 // . *************************************************/ IMGSIMULATION_API void UnzipFeatureFiles(const char * Filepath) { vector<string>FeatureFilesPass; GetFiles(Filepath, FeatureFilesPass); if (FeatureFilesPass.size() > 0) { for (int i = 0; i < FeatureFilesPass.size(); i++) { UnzipFeatureBins(FeatureFilesPass[i].c_str(), Filepath); } } } //解压图片函数 IMGSIMULATION_API void UnzipOneBin(const char* Filepath, const char* BinPath) { //Package temp(BinPath, content); Package temp(BinPath); temp.UnPack(BinPath, Filepath); return; } /************************************************* 函数名称: UnzipSomeImgBins // 函数描述: 解包函数;将选择的多个图像压缩文件解包 // 输入参数: const char *Filepath 文件解压路径 const int arrsize 压缩文件个数 const char* Binfile[] 压缩文件名数组 输出参数:无 // 返回值 : 无 // 其他说明: 无 // . *************************************************/ IMGSIMULATION_API void UnzipMultiImgBins(const char* Filepath, const int arrsize, const int BinfileIndex[]) { if (arrsize == 0)//无输入 return; char Binfile[200];//压缩文件名 int index = 0; for (int i = 0; i + 5 < arrsize; i = i + 5) { sprintf(Binfile, "%s//%d%s", Filepath, BinfileIndex[i], ".bin"); thread th1(UnzipOneBin, Filepath, Binfile); sprintf(Binfile, "%s//%d%s", Filepath, BinfileIndex[i+1], ".bin"); thread th2(UnzipOneBin, Filepath, Binfile); sprintf(Binfile, "%s//%d%s", Filepath, BinfileIndex[i+2], ".bin"); thread th3(UnzipOneBin, Filepath, Binfile); sprintf(Binfile, "%s//%d%s", Filepath, BinfileIndex[i+3], ".bin"); thread th4(UnzipOneBin, Filepath, Binfile); sprintf(Binfile, "%s//%d%s", Filepath, BinfileIndex[i+4], ".bin"); thread th5(UnzipOneBin, Filepath, Binfile); index = i + 5; th1.join(); th2.join(); th3.join(); th4.join(); th5.join(); } //剩余压缩文件个数不足5个时,单独解压 for (; index < arrsize; index++) { sprintf(Binfile, "%s//%d%s", Filepath, BinfileIndex[index], ".bin"); Package cc(Binfile); cc.UnPack(Binfile, Filepath); } delete[] Binfile; } /************************************************* 函数名称: UnzipPictureFiles // 函数描述: 解包函数;文件夹下的所有图像压缩文件解包 // 输入参数: const char *Filepath 包含多张图片的压缩文件(50张,跟bufferlenth有关) 输出参数:无 // 返回值 : 无 // 其他说明: 无 // . *************************************************/ IMGSIMULATION_API void UnzipPictureFiles(const char * Filepath) { vector<string>FeatureFilesPass; GetFiles(Filepath, FeatureFilesPass); int interpret = 0; if (FeatureFilesPass.size() > 0) { for (int i = 0; i + 5 < FeatureFilesPass.size(); i = i + 5) { //cout << FeatureFilesPass[i].c_str() << endl; //Package cc(FeatureFilesPass[i].c_str(), Bufferlength/ gStructVarible.PictureNum); //cc.UnPack(FeatureFilesPass[i].c_str(), Filepath); //thread th1(UnzipOneBin, Filepath, FeatureFilesPass[i].c_str(), Bufferlength / gStructVarible.PictureNum); //thread th2(UnzipOneBin, Filepath, FeatureFilesPass[i+1].c_str(), Bufferlength / gStructVarible.PictureNum); //thread th3(UnzipOneBin, Filepath, FeatureFilesPass[i+2].c_str(), Bufferlength / gStructVarible.PictureNum); //thread th4(UnzipOneBin, Filepath, FeatureFilesPass[i+3].c_str(), Bufferlength / gStructVarible.PictureNum); //thread th5(UnzipOneBin, Filepath, FeatureFilesPass[i+4].c_str(), Bufferlength / gStructVarible.PictureNum); thread th1(UnzipOneBin, Filepath, FeatureFilesPass[i].c_str()); thread th2(UnzipOneBin, Filepath, FeatureFilesPass[i + 1].c_str()); thread th3(UnzipOneBin, Filepath, FeatureFilesPass[i + 2].c_str()); thread th4(UnzipOneBin, Filepath, FeatureFilesPass[i + 3].c_str()); thread th5(UnzipOneBin, Filepath, FeatureFilesPass[i + 4].c_str()); interpret = i + 5; th1.join(); th2.join(); th3.join(); th4.join(); th5.join(); } for (; interpret < FeatureFilesPass.size(); interpret++) { //Package cc(FeatureFilesPass[interpret].c_str(), Bufferlength / gStructVarible.PictureNum); Package cc(FeatureFilesPass[interpret].c_str()); cc.UnPack(FeatureFilesPass[interpret].c_str(), Filepath); } } } /*---------------------------------------------------------------------------------------*/
f3a1decd910bd39eaeb7f294a610a2ef0fdfa758.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void matrixs_1D_multiplication(int *matrix_a_dev, int *matrix_b_dev, int *matrix_c_dev, int matrix_width) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < matrix_width && col < matrix_width) { for(int k = 0; k < matrix_width; k++) { matrix_c_dev[row * matrix_width + col] += matrix_a_dev[row * matrix_width + k] * matrix_b_dev[k * matrix_width + col]; } } } int main() { int matrix_width = 3; int *matrix_a_host; int *matrix_b_host; int *matrix_c_host; matrix_a_host = (int *)malloc(matrix_width*matrix_width*sizeof(int)); matrix_b_host = (int *)malloc(matrix_width*matrix_width*sizeof(int)); matrix_c_host = (int *)malloc(matrix_width*matrix_width*sizeof(int)); for(int row = 0; row < matrix_width; row++) { for(int col = 0; col < matrix_width; col++) { matrix_a_host[row * matrix_width + col] = row + col; matrix_b_host[row * matrix_width + col] = row * col + col; } } // ------------------GPU-------------------------- int *matrix_a_dev; int *matrix_b_dev; int *matrix_c_dev; hipMalloc((void**) &matrix_a_dev, matrix_width*matrix_width*sizeof(int)); hipMalloc((void**) &matrix_b_dev, matrix_width*matrix_width*sizeof(int)); hipMalloc((void**) &matrix_c_dev, matrix_width*matrix_width*sizeof(int)); hipMemcpy(matrix_a_dev, matrix_a_host, matrix_width*matrix_width*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(matrix_b_dev, matrix_b_host, matrix_width*matrix_width*sizeof(int), hipMemcpyHostToDevice); dim3 dimGrid(1, 1, 1); dim3 dimBlock(3, 3, 1); hipLaunchKernelGGL(( matrixs_1D_multiplication), dim3(dimGrid), dim3(dimBlock), 0, 0, matrix_a_dev, matrix_b_dev, matrix_c_dev, matrix_width); hipMemcpy(matrix_c_host, matrix_c_dev, matrix_width*matrix_width*sizeof(int), hipMemcpyDeviceToHost); printf("\n-------------Matrix c-----------------\n"); for(int i = 0; i < matrix_width * matrix_width; i++) { if((i + 1) % matrix_width) printf("%d ", *(matrix_c_host + i)); else printf("%d \n", *(matrix_c_host + i)); } free(matrix_a_host); free(matrix_b_host); free(matrix_c_host); hipFree(matrix_a_dev); hipFree(matrix_b_dev); hipFree(matrix_c_dev); return 1; }
f3a1decd910bd39eaeb7f294a610a2ef0fdfa758.cu
#include <stdio.h> __global__ void matrixs_1D_multiplication(int *matrix_a_dev, int *matrix_b_dev, int *matrix_c_dev, int matrix_width) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < matrix_width && col < matrix_width) { for(int k = 0; k < matrix_width; k++) { matrix_c_dev[row * matrix_width + col] += matrix_a_dev[row * matrix_width + k] * matrix_b_dev[k * matrix_width + col]; } } } int main() { int matrix_width = 3; int *matrix_a_host; int *matrix_b_host; int *matrix_c_host; matrix_a_host = (int *)malloc(matrix_width*matrix_width*sizeof(int)); matrix_b_host = (int *)malloc(matrix_width*matrix_width*sizeof(int)); matrix_c_host = (int *)malloc(matrix_width*matrix_width*sizeof(int)); for(int row = 0; row < matrix_width; row++) { for(int col = 0; col < matrix_width; col++) { matrix_a_host[row * matrix_width + col] = row + col; matrix_b_host[row * matrix_width + col] = row * col + col; } } // ------------------GPU-------------------------- int *matrix_a_dev; int *matrix_b_dev; int *matrix_c_dev; cudaMalloc((void**) &matrix_a_dev, matrix_width*matrix_width*sizeof(int)); cudaMalloc((void**) &matrix_b_dev, matrix_width*matrix_width*sizeof(int)); cudaMalloc((void**) &matrix_c_dev, matrix_width*matrix_width*sizeof(int)); cudaMemcpy(matrix_a_dev, matrix_a_host, matrix_width*matrix_width*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(matrix_b_dev, matrix_b_host, matrix_width*matrix_width*sizeof(int), cudaMemcpyHostToDevice); dim3 dimGrid(1, 1, 1); dim3 dimBlock(3, 3, 1); matrixs_1D_multiplication<<<dimGrid, dimBlock>>>(matrix_a_dev, matrix_b_dev, matrix_c_dev, matrix_width); cudaMemcpy(matrix_c_host, matrix_c_dev, matrix_width*matrix_width*sizeof(int), cudaMemcpyDeviceToHost); printf("\n-------------Matrix c-----------------\n"); for(int i = 0; i < matrix_width * matrix_width; i++) { if((i + 1) % matrix_width) printf("%d ", *(matrix_c_host + i)); else printf("%d \n", *(matrix_c_host + i)); } free(matrix_a_host); free(matrix_b_host); free(matrix_c_host); cudaFree(matrix_a_dev); cudaFree(matrix_b_dev); cudaFree(matrix_c_dev); return 1; }
08076cbbf423efbd7344c463e2485a4e37e63a1c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // // **************************** // *** MC-GPU, version 1.5b *** // **************************** // //! Definition of the CUDA GPU kernel for the simulation of x ray tracks in a voxelized geometry. //! The physics models for Rayleigh and Compton scattering are translated from the Fortran //! code in PENELOPE 2006. // // ** DISCLAIMER ** // // This software and documentation (the "Software") were developed at the Food and // Drug Administration (FDA) by employees of the Federal Government in the course // of their official duties. Pursuant to Title 17, Section 105 of the United States // Code, this work is not subject to copyright protection and is in the public // domain. Permission is hereby granted, free of charge, to any person obtaining a // copy of the Software, to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, distribute, // sublicense, or sell copies of the Software or derivatives, and to permit persons // to whom the Software is furnished to do so. FDA assumes no responsibility // whatsoever for use by other parties of the Software, its source code, // documentation or compiled executables, and makes no guarantees, expressed or // implied, about its quality, reliability, or any other characteristic. Further, // use of this code in no way implies endorsement by the FDA or confers any // advantage in regulatory decisions. Although this software can be redistributed // and/or modified freely, we ask that any derivative works bear some notice that // they are derived from it, and any modified versions bear some notice that they // have been modified. // // //! @file MC-GPU_kernel_v1.5b.cu //! @author Andreu Badal (Andreu.Badal-Soler{at}fda.hhs.gov) //! @date 2018/01/01 // -- Original code started on: 2009/04/14 // //////////////////////////////////////////////////////////////////////////////// // ** This software is described in the following reference (please cite it in yuor papers): // Andreu Badal, Diksha Sharma, Christian G.Graff, Rongping Zeng, and Aldo Badano, Mammography and breast // tomosynthesis simulator for virtual clinical trials, Computer Physics Communications 261, p. 107779 (2021) // https://doi.org/10.1016/j.cpc.2020.107779 // ** Update May 2021 ** !!BLOCKING_LAYER!! // Enabling blocking (or dead) layers at the top and bottom of the detector slab. // Interactions in these layers will not contribute to the pixel value, but their fluorescence will // be tracked and might be detected somewhere else. The insensitive top layer causes a measurable // drop in DQE(0), but it does not affect MTF as implemented. Pixel values will be reduced (less // energy detected per history. [Reference: Zhou et al., Med. Phys. 34, 1098-1109 (2007)] #define BLOCKING_LAYER_TOP 0.0000f // [cm] Thickness layer closer to source. Example: 0.0008f for a 8 micron layer (0.0 == no layer). !!BLOCKING_LAYER!! #define BLOCKING_LAYER_BOTTOM 0.0000f // [cm] Thickness layer further from source. Example: 0.0008f for a 8 micron layer (0.0 == no layer). !!BLOCKING_LAYER!! //////////////////////////////////////////////////////////////////////////////// //! Initialize the image array, ie, set all pixels to zero //! Essentially, this function has the same effect as the command: //! "cutilSafeCall(hipMemcpy(image_device, image, image_bytes, hipMemcpyHostToDevice))"; //! //! CUDA performs some initialization work the first time a GPU kernel is called. //! Therefore, calling a short kernel before the real particle tracking is performed //! may improve the accuracy of the timing measurements in the relevant kernel. //! //! @param[in,out] image Pointer to the image array. //! @param[in] pixels_per_image Number of pixels in the image (ie, elements in the array). //////////////////////////////////////////////////////////////////////////////// __global__ void init_image_array_GPU(unsigned long long int* image, int pixels_per_image) { int my_pixel = threadIdx.x + blockIdx.x*blockDim.x; if (my_pixel < pixels_per_image) { // -- Set the current pixel to 0 and return, avoiding overflow when more threads than pixels are used: image[my_pixel] = (unsigned long long int)(0); // Initialize non-scatter image my_pixel += pixels_per_image; // (advance to next image) image[my_pixel] = (unsigned long long int)(0); // Initialize Compton image my_pixel += pixels_per_image; // (advance to next image) image[my_pixel] = (unsigned long long int)(0); // Initialize Rayleigh image my_pixel += pixels_per_image; // (advance to next image) image[my_pixel] = (unsigned long long int)(0); // Initialize multi-scatter image } } // //////////////////////////////////////////////////////////////////////////////// // //! Initialize the dose deposition array, ie, set all voxel doses to zero // //! // //! @param[in,out] dose Pointer to the dose mean and sigma arrays. // //! @param[in] num_voxels_dose Number of voxels in the dose ROI (ie, elements in the arrays). // //////////////////////////////////////////////////////////////////////////////// // __global__ // void init_dose_array_GPU(ulonglong2* voxels_Edep, int num_voxels_dose) // { // int my_voxel = threadIdx.x + blockIdx.x*blockDim.x; // register ulonglong2 ulonglong2_zero; // ulonglong2_zero.x = ulonglong2_zero.y = (unsigned long long int) 0; // if (my_voxel < num_voxels_dose) // { // dose[my_voxel] = ulonglong2_zero; // Set the current voxel to (0,0) and return, avoiding overflow // } // } //////////////////////////////////////////////////////////////////////////////// //! Main function to simulate x-ray tracks inside a voxelized geometry. //! Secondary electrons are not simulated (in photoelectric and Compton //! events the energy is locally deposited). //! //! The following global variables, in the GPU __constant__ memory are used: //! voxel_data_CONST, //! source_energy_data_CONST //! mfp_table_data_CONST. //! density_LUT_CONST //! //! @param[in] history_batch Particle batch number (only used in the CPU version when CUDA is disabled!, the GPU uses the built-in variable threadIdx) //! @param[in] num_p Projection number in the CT simulation. This variable defines a specific angle and the corresponding source and detector will be used. //! @param[in] histories_per_thread Number of histories to simulate for each call to this function (ie, for GPU thread). //! @param[in] seed_input Random number generator seed (the same seed is used to initialize the two MLCGs of RANECU). //! @param[in] voxel_mat_dens Pointer to the voxel densities and material vector (the voxelized geometry), stored in GPU glbal memory. //! @param[in] mfp_Woodcock_table Two parameter table for the linear interpolation of the Woodcock mean free path (MFP) (stored in GPU global memory). //! @param[in] mfp_table_a First element for the linear interpolation of the interaction mean free paths (stored in GPU global memory). //! @param[in] mfp_table_b Second element for the linear interpolation of the interaction mean free paths (stored in GPU global memory). //! @param[in] rayleigh_table Pointer to the table with the data required by the Rayleigh interaction sampling, stored in GPU global memory. //! @param[in] compton_table Pointer to the table with the data required by the Compton interaction sampling, stored in GPU global memory. //! @param[in,out] image Pointer to the image vector in the GPU global memory. //! @param[in,out] dose Pointer to the array containing the 3D voxel dose (and its uncertainty) in the GPU global memory. //////////////////////////////////////////////////////////////////////////////// __global__ void track_particles(int histories_per_thread, short int num_p, // For a CT simulation: allocate space for up to MAX_NUM_PROJECTIONS projections. int* seed_input_device, // Random seed read from global memory; secuence continued for successive projections in same GPU. !!DBTv1.4!! unsigned long long int* image, ulonglong2* voxels_Edep, int* voxel_mat_dens, //!!bitree!! Using "int" to be store the index to the bitree table //!!FixedDensity_DBT!! Allocating "voxel_mat_dens" as "char" instead of "float2" char* bitree, //!!bitree!! Array with the bitrees for every non-uniform coarse voxel float2* mfp_Woodcock_table, float3* mfp_table_a, float3* mfp_table_b, struct rayleigh_struct* rayleigh_table, struct compton_struct* compton_table, struct detector_struct* detector_data_array, struct source_struct* source_data_array, ulonglong2* materials_dose) { // -- Declare the track state variables: float3 position, direction; float energy, step, prob, randno, mfp_density, mfp_Woodcock; float3 mfp_table_read_a, mfp_table_read_b; int2 seed; int index; int material0, // Current material, starting at 0 for 1st material material_old; // Flag to mark a material or energy change signed char scatter_state; // Flag for scatter images: scatter_state=0 for non-scattered, =1 for Compton, =2 for Rayleigh, and =3 for multiple scatter. // -- Store the Compton table in shared memory from global memory: // For Compton and Rayleigh the access to memory is not coherent and the caching capability do not speeds up the accesses, they actually slows down the acces to other data. __shared__ struct compton_struct cgco_SHARED; __shared__ struct detector_struct detector_data_SHARED; __shared__ struct source_struct source_data_SHARED; if (0==threadIdx.x) // First GPU thread copies the variables to shared memory { // -Copy the current source, detector data from global to shared memory for fast access: source_data_SHARED = source_data_array[num_p]; detector_data_SHARED = detector_data_array[num_p]; // Copy the long array to a single instance in shared memory for the current projection // -Copy the compton data to shared memory: cgco_SHARED = *compton_table; } __syncthreads(); // Make sure all threads will see the initialized shared variable // -- Initialize the RANECU generator in a position far away from the previous history: init_PRNG((threadIdx.x + blockIdx.x*blockDim.x), histories_per_thread, *seed_input_device, &seed); // Using a 1D block. Random seed read from global memory. !!DBTv1.4!! // -- Loop for the "histories_per_thread" particles in the current history_batch: for( ; histories_per_thread>0; histories_per_thread--) { // printf("\n\n********* NEW HISTORY: %d [seeds: %d, %d]\n\n", histories_per_thread, seed.x, seed.y); // fflush(stdout); // !!Verbose!! calling printf from the GPU is possible but if multiple threads call it at the same time some output will be lost. unsigned int absvox = 1; // -- Call the source function to get a primary x ray: source(&position, &direction, &energy, &seed, &absvox, &source_data_SHARED, &detector_data_SHARED); scatter_state = (signed char)0; // Reset previous scatter state: new non-scattered particle loaded // -- Find the current energy bin by truncation (this could be pre-calculated for a monoenergetic beam): // The initialization host code made sure that the sampled energy will always be within the tabulated energies (index never negative or too large). index = __float2int_rd((energy-mfp_table_data_CONST.e0)*mfp_table_data_CONST.ide); // Using CUDA function to convert float to integer rounding down (towards minus infinite) // -- Get the minimum mfp at the current energy using linear interpolation (Woodcock tracking): { float2 mfp_Woodcock_read = mfp_Woodcock_table[index]; // Read the 2 parameters for the linear interpolation in a single read from global memory mfp_Woodcock = mfp_Woodcock_read.x + energy * mfp_Woodcock_read.y; // Interpolated minimum MFP } // -- Reset previous material to force a recalculation of the MFPs (negative materials are not allowed in the voxels): material_old = -1; // *** X-ray interaction loop: for(;;) { if (absvox==FLAG_OUTSIDE_VOXELS) break; // -- Primary particle was not pointing to the voxel region! (but may still be detected after moving in vacuum in a straight line). // *** Virtual interaction loop: // New loop structure in MC-GPU_v1.3: simulate all virtual events before sampling Compton & Rayleigh: // float2 matdens; short3 voxel_coord; // Variable used only by DOSE TALLY do { step = -(mfp_Woodcock)*logf(ranecu(&seed)); // Using the minimum MFP in the geometry for the input energy (Woodcock tracking) position.x += step*direction.x; position.y += step*direction.y; position.z += step*direction.z; // -- Locate the new particle in the voxel geometry: absvox = locate_voxel(position, &voxel_coord); // Get the voxel number at the current position and the voxel coordinates (used to check if inside the dose ROI in DOSE TALLY). if (absvox==FLAG_OUTSIDE_VOXELS) break; // -- Particle escaped the voxel region! ("index" is still >0 at this moment) // matdens = voxel_mat_dens[absvox]; // Get the voxel material and density in a single read from global memory // material0 = (int)(matdens.x - 1); // Set the current material by truncation, and set 1st material to value '0'. //!!FixedDensity_DBT!! Allocating "voxel_mat_dens" as "char" instead of "float2". Density taken from function "density_LUT". First material number == 0 material0 = (int)voxel_mat_dens[absvox]; // Get the voxel material and density in a single read from global memory (first material==0) if (material0<0) { // -- Non-uniform low resolution voxel: find material at current location searching the original high resolution geometry using the corresponding binary tree: material0 = find_material_bitree(&position, bitree, -material0, &voxel_coord); // !!bitree!! } // -- Get the data for the linear interpolation of the interaction MFPs, in case the energy or material have changed: if (material0 != material_old) { mfp_table_read_a = mfp_table_a[index*(MAX_MATERIALS)+material0]; mfp_table_read_b = mfp_table_b[index*(MAX_MATERIALS)+material0]; material_old = material0; // Store the new material } // *** Apply Woodcock tracking: mfp_density = mfp_Woodcock * density_LUT_CONST[material0]; //!!FixedDensity_DBT!! Density taken from constant memory array "density_LUT_CONST"; Old: mfp_density=mfp_Woodcock*matdens.y; // -- Calculate probability of delta scattering, using the total mean free path for the current material and energy (linear interpolation): prob = 1.0f - mfp_density * (mfp_table_read_a.x + energy * mfp_table_read_b.x); randno = ranecu(&seed); // Sample uniform PRN } while (randno<prob); // [Iterate if there is a delta scattering event] if (absvox==FLAG_OUTSIDE_VOXELS) break; // -- Particle escaped the voxel region! Break the interaction loop to call tally image. // The GPU threads will be stopped and waiting here until ALL threads have a REAL event: // -- Real event takes place! Check the kind of event and sample the effects of the interaction: prob += mfp_density * (mfp_table_read_a.y + energy * mfp_table_read_b.y); // Interpolate total Compton MFP ('y' component) if (randno<prob) // [Checking Compton scattering] { // *** Compton interaction: // -- Sample new direction and energy: double costh_Compton; randno = energy; // Save temporal copy of the particle energy (variable randno not necessary until next sampling). DOSE TALLY GCOa(&energy, &costh_Compton, &material0, &seed, &cgco_SHARED); rotate_double(&direction, costh_Compton, /*phi=2*pi*PRN=*/ 6.28318530717958647693*ranecu_double(&seed)); randno = energy - randno; // Save temporal copy of the negative of the energy lost in the interaction. DOSE TALLY // -- Find the new energy interval: index = __float2int_rd((energy-mfp_table_data_CONST.e0)*mfp_table_data_CONST.ide); // Using CUDA function to convert float to integer rounding down (towards minus infinite) if (index>-1) // 'index' will be negative only when the energy is below the tabulated minimum energy: particle will be then absorbed (rejected) after tallying the dose. { // -- Get the Woodcock MFP for the new energy (energy above minimum cutoff): float2 mfp_Woodcock_read = mfp_Woodcock_table[index]; // Read the 2 parameters for the linear interpolation in a single read from global memory mfp_Woodcock = mfp_Woodcock_read.x + energy * mfp_Woodcock_read.y; // Interpolated minimum MFP material_old = -2; // Set an impossible material to force an update of the MFPs data for the nex energy interval // -- Update scatter state: if (scatter_state==(signed char)0) scatter_state = (signed char)1; // Set scatter_state == 1: Compton scattered particle else scatter_state = (signed char)3; // Set scatter_state == 3: Multi-scattered particle } } else { prob += mfp_density * (mfp_table_read_a.z + energy * mfp_table_read_b.z); // Interpolate total Rayleigh MFP ('z' component) if (randno<prob) // [Checking Rayleigh scattering] { // *** Rayleigh interaction: // -- Sample angular deflection: double costh_Rayleigh; float pmax_current = rayleigh_table->pmax[(index+1)*MAX_MATERIALS+material0]; // Get max (ie, value for next bin?) cumul prob square form factor for Rayleigh sampling GRAa(&energy, &costh_Rayleigh, &material0, &pmax_current, &seed, rayleigh_table); rotate_double(&direction, costh_Rayleigh, /*phi=2*pi*PRN=*/ 6.28318530717958647693*ranecu_double(&seed)); // -- Update scatter state: if (scatter_state==(signed char)0) scatter_state = (signed char)2; // Set scatter_state == 1: Rayleigh scattered particle else scatter_state = (signed char)3; // Set scatter_state == 3: Multi-scattered particle } else { // *** Photoelectric interaction (or pair production): mark particle for absorption after dose tally (ie, index<0)! randno = -energy; // Save temporal copy of the (negative) energy deposited in the interaction (variable randno not necessary anymore). index = -11; // A negative "index" marks that the particle was absorved and that it will never arrive at the detector. } } // -- Tally the dose deposited in Compton and photoelectric interactions: if (randno<-0.001f) { float Edep = -1.0f*randno; // If any energy was deposited, this variable will temporarily store the negative value of Edep. // -- Tally the dose deposited in the current material, if enabled (ie, array allocated and not null): if (materials_dose!=NULL) tally_materials_dose(&Edep, &material0, materials_dose); // !!tally_materials_dose!! // -- Tally the energy deposited in the current voxel, if enabled (tally disabled when dose_ROI_x_max_CONST is negative). DOSE TALLY // Optional code to skip dose tally in air (material=0): if (dose_ROI_x_max_CONST > -1 && 0!=material0) if (dose_ROI_x_max_CONST > -1) tally_voxel_energy_deposition(&Edep, &voxel_coord, voxels_Edep); } // -- Break interaction loop for particles that have been absorbed or with energy below the tabulated cutoff: particle is "absorbed" (ie, track discontinued). if (index<0) break; } // [Cycle the X-ray interaction loop] if (index>-1) { // -- Particle escaped the voxels but was not absorbed, check if it will arrive at the detector and tally its energy: tally_image(&energy, &position, &direction, &scatter_state, image, &source_data_SHARED, &detector_data_SHARED, &seed); } } // [Continue with a new history] // -- Store the final random seed used by the last thread in the grid to global memory in order to continue the random secuence in successive projections in same GPU without overlapping. !!DBTv1.4!! // Since I am only storing the 'x' component and using it to init both parts of the ranecu generator, the sequence will actually diverge, but I warranty that at least one MLCG will stay uncorrelated. !!DeBuG!! if ( (blockIdx.x == (gridDim.x-1)) && (threadIdx.x == (blockDim.x-1))) { *seed_input_device = seed.x; // Store seed in GPU memory, but only for the thread with the largest id } } // [All tracks simulated for this kernel call: return to CPU] //////////////////////////////////////////////////////////////////////////////// //! Tally the dose deposited in the voxels. //! This function is called whenever a particle suffers a Compton or photoelectric //! interaction. It is not necessary to call this function if the dose tally //! was disabled in the input file (ie, dose_ROI_x_max_CONST < 0). //! Electrons are not transported in MC-GPU and therefore we are approximating //! that the dose is equal to the KERMA (energy released by the photons alone). //! This approximation is acceptable when there is electronic equilibrium and when //! the range of the secondary electrons is shorter than the voxel size. Usually the //! doses will be acceptable for photon energies below 1 MeV. The dose estimates may //! not be accurate at the interface of low density volumes. //! //! We need to use atomicAdd() in the GPU to prevent that multiple threads update the //! same voxel at the same time, which would result in a lose of information. //! This is very improbable when using a large number of voxels but gives troubles //! with a simple geometries with few voxels (in this case the atomicAdd will slow //! down the code because threads will update the voxel dose secuentially). //! //! //! @param[in] Edep Energy deposited in the interaction //! @param[in] voxel_coord Voxel coordinates, needed to check if particle located inside the input region of interest (ROI) //! @param[out] voxels_Edep ulonglong2 array containing the 3D voxel dose and dose^2 (ie, uncertainty) as unsigned integers scaled by SCALE_eV. //////////////////////////////////////////////////////////////////////////////// __device__ inline void tally_voxel_energy_deposition(float* Edep, short3* voxel_coord, ulonglong2* voxels_Edep) { if((voxel_coord->x < dose_ROI_x_min_CONST) || (voxel_coord->x > dose_ROI_x_max_CONST) || (voxel_coord->y < dose_ROI_y_min_CONST) || (voxel_coord->y > dose_ROI_y_max_CONST) || (voxel_coord->z < dose_ROI_z_min_CONST) || (voxel_coord->z > dose_ROI_z_max_CONST)) { return; // -- Particle outside the ROI: return without tallying anything. } // -- Particle inside the ROI: tally Edep. register int DX = 1 + (int)(dose_ROI_x_max_CONST - dose_ROI_x_min_CONST); register int num_voxel = (int)(voxel_coord->x-dose_ROI_x_min_CONST) + ((int)(voxel_coord->y-dose_ROI_y_min_CONST))*DX + ((int)(voxel_coord->z-dose_ROI_z_min_CONST))*DX*(1 + (int)(dose_ROI_y_max_CONST-dose_ROI_y_min_CONST)); atomicAdd(&voxels_Edep[num_voxel].x, __float2ull_rn((*Edep)*SCALE_eV) ); // Energy deposited at the voxel, scaled by the factor SCALE_eV and rounded. atomicAdd(&voxels_Edep[num_voxel].y, __float2ull_rn((*Edep)*(*Edep)) ); // (not using SCALE_eV for std_dev to prevent overflow) return; } //////////////////////////////////////////////////////////////////////////////// //! Source that creates primary x rays, according to the defined source model. //! The particles are automatically moved to the surface of the voxel bounding box, //! to start the tracking inside a real material. If the sampled particle do not //! enter the voxels, it is init in the focal spot and the main program will check //! if it arrives at the detector or not. //! //! @param[in] source_data Structure describing the source. //! @param[in] source_energy_data_CONST Global variable in constant memory space describing the source energy spectrum. //! @param[out] position Initial particle position (particle transported inside the voxel bbox). //! @param[out] direction Sampled particle direction (cosine vectors). //! @param[out] energy Sampled energy of the new x ray. //! @param[in] seed Current seed of the random number generator, requiered to sample the movement direction. //! @param[out] absvox Set to <0 if primary particle will not cross the voxels, not changed otherwise (>0). //////////////////////////////////////////////////////////////////////////////// __device__ inline void source(float3* position, float3* direction, float* energy, int2* seed, unsigned int* absvox, struct source_struct* source_data_SHARED, struct detector_struct* detector_data_SHARED) { // *** Sample the initial x-ray energy following the input energy spectrum using the Walker aliasing algorithm from PENELOPE: // The following code is equivalent to calling the function "seeki_walker": int sampled_bin = seeki_walker(source_data_CONST.espc_cutoff, source_data_CONST.espc_alias, ranecu(seed), source_data_CONST.num_bins_espc); int sampled_bin; float RN = ranecu(seed) * source_energy_data_CONST.num_bins_espc; // Find initial interval (array starting at 0): int int_part = __float2int_rd(RN); // -- Integer part (round down) float fraction_part = RN - ((float)int_part); // -- Fractional part if (fraction_part < source_energy_data_CONST.espc_cutoff[int_part]) // Check if we are in the aliased part sampled_bin = int_part; // Below the cutoff: return current value else sampled_bin = (int)source_energy_data_CONST.espc_alias[int_part]; // Above the cutoff: return alias // Linear interpolation of the final energy within the sampled energy bin: *energy = source_energy_data_CONST.espc[sampled_bin] + ranecu(seed) * (source_energy_data_CONST.espc[sampled_bin+1] - source_energy_data_CONST.espc[sampled_bin]); // *** If not a point source, sample the focal spot position using a uniformly-distributed angle on a sphere AND a Gaussian-distributed random radius: !!DBTv1.4!! if (source_data_SHARED->focal_spot_FWHM > 5.0e-7f) { float g = sample_gausspdf_below2sigma(seed); // Return a Gaussian distributed random value located at less than 2 sigma from the center. !!DBTv1.4!! // Cropping the Gaussian dist at 2 sigma to prevent generating photons unrealistically far from the focal spot center. The 2 sigma limit has been set arbitrary and will affect 4.55% of sampled locations. // Experimental focal spot measurements show that the spot is quite sharp [A Burgess, "Focal spots: I. MTF separability", Invest Radiol 12, p. 36-43 (1977)] //ALTERNATIVE METHOD: float g = sample_gausspdf(seed); // Return a Gaussian distributed random value. !!DBTv1.4!! //ALTERNATIVE METHOD: gausspdf(&g1, &g2, seed); // Sample 2 independent Gaussian distributed random variables. float cos_thetaFS = 2.0f*ranecu(seed)-1.0f; // Sample uniform points on a sphere float sin_thetaFS = sqrtf(1.0f-cos_thetaFS*cos_thetaFS); float phiFS = (PI*2.0f)*ranecu(seed); float cos_phiFS, sin_phiFS; sincos(phiFS, &sin_phiFS, &cos_phiFS); // Full Width at Half Maximum for Gaussian curve: FWHM = [2*sqrt(2*ln(2))] * sigma = 2.3548 * sigma // For a focal spot with FWHM = 0.0200 cm --> sigma = 0.0200/2.354820 = 0.0200*0.4246609 = 0.008493 float r = g * source_data_SHARED->focal_spot_FWHM * 0.424660900144f; // Use a Gaussian distribution for the radius // Set current focal spot position with sampled focal spot shift (source_data_SHARED->position was already rotated to the appropriate angle): position->x = source_data_SHARED->position.x + r*sin_thetaFS*cos_phiFS; position->y = source_data_SHARED->position.y + r*sin_thetaFS*sin_phiFS; position->z = source_data_SHARED->position.z + r*cos_thetaFS; } else { // Set default focal spot position for point source: position->x = source_data_SHARED->position.x; position->y = source_data_SHARED->position.y; position->z = source_data_SHARED->position.z; } // *** Sample the initial direction: do // Iterate sampling if the sampled direction is not acceptable to get a square field at the given phi (rejection sampling): force square field for any phi!! { // Using the algorithm used in PENMAIN.f, from penelope 2008 (by F. Salvat). direction->z = source_data_SHARED->cos_theta_low + ranecu(seed)*source_data_SHARED->D_cos_theta; // direction->z = w = cos(theta_sampled) register float phi_sampled = source_data_SHARED->phi_low + ranecu(seed)*source_data_SHARED->D_phi; register float sin_theta_sampled = sqrtf(1.0f - direction->z*direction->z); float sinphi_sampled, cosphi_sampled; sincos(phi_sampled, &sinphi_sampled,&cosphi_sampled); // Calculate the SIN and COS at the same time. direction->y = sin_theta_sampled * sinphi_sampled; direction->x = sin_theta_sampled * cosphi_sampled; } while( (fabsf(direction->z/(direction->y+1.0e-8f)) > source_data_SHARED->max_height_at_y1cm) || // Force square field for any phi by rejection sampling. (The "+1e-8" prevents division by zero) (fabsf(direction->x/(direction->y+1.0e-8f)) > source_data_SHARED->max_width_at_y1cm) ); //!!DBTv1.4!! // -- Apply the rotation that moves the emission direction from the default direction pointing to (0,1,0), to the required acquistion orientation: apply_rotation(direction, source_data_SHARED->rot_fan); //!!DBTv1.4!! // *** Simulate motion blur (if needed): Rotate focal spot position and emission direction according to a uniformly-sampled angular motion blur !!DBTv1.4!! if (source_data_SHARED->rotation_blur>EPS) { position->x -= source_data_SHARED->rotation_point.x; // Move to the coordinate system where rotation point is at the origin to apply the rotation position->y -= source_data_SHARED->rotation_point.y; position->z -= source_data_SHARED->rotation_point.z; float blur_angle = source_data_SHARED->rotation_blur*(ranecu(seed)-0.5f); // Uniform sampling of angular motion blur before and after the nominal acquisition angle // rotate_around_axis_Rodrigues(&blur_angle, &source_data_SHARED->axis_of_rotation, position); // Rotate position around rotation angle using Rodrigues' formula (http://mathworld.wolfram.com/RodriguesRotationFormula.html) rotate_2vectors_around_axis_Rodrigues(&blur_angle, &source_data_SHARED->axis_of_rotation, position, direction); // Rotate position and direction around rotation angle using Rodrigues' formula (http://mathworld.wolfram.com/RodriguesRotationFormula.html) position->x += source_data_SHARED->rotation_point.x; // Move back to the real-world coordinate system where rotation point is not at the origin position->y += source_data_SHARED->rotation_point.y; position->z += source_data_SHARED->rotation_point.z; } // To be safe, renormalize the direction vector to 1 (should not be necessary but single precision math might accumulate errors) double NORM = rsqrt(direction->x*direction->x + direction->y*direction->y + direction->z*direction->z); // !!DeBuG!! Check if it is really necessary to renormalize in a real simulation!! direction->x = NORM*direction->x; direction->y = NORM*direction->y; direction->z = NORM*direction->z; // printf("%.20lf %.20lf %.20lf\n", NORM, rsqrt(direction->x*direction->x + direction->y*direction->y + direction->z*direction->z), diff); //!!VERBOSE!! !!DeBuG!! // *** Move the particle to the inside of the voxel bounding box: move_to_bbox(position, direction, absvox); } //////////////////////////////////////////////////////////////////////////////// //! Functions to moves a particle towards the inside of the voxelized geometry bounding box. //! An EPSILON distance is added to make sure the particles will be clearly inside the bbox, //! not exactly on the surface. //! //! This algorithm makes the following assumptions: //! - The back lower vertex of the voxel bounding box is always located at the origin: (x0,y0,z0)=(0,0,0). //! - The initial value of "position" corresponds to the focal spot location. //! - When a ray is not pointing towards the bbox plane that it should cross according to the sign of the direction, //! I assign a distance to the intersection =0 instead of the real negative distance. The wall that will be //! crossed to enter the bbox is always the furthest and therefore a 0 distance will never be used except //! in the case of a ray starting inside the bbox or outside the bbox and not pointing to any of the 3 planes. //! In this situation the ray will be transported a 0 distance, meaning that it will stay at the focal spot. //! //! (Interesting information on ray-box intersection: http://tog.acm.org/resources/GraphicsGems/gems/RayBox.c) //! //! @param[in,out] position Particle position: initially set to the focal spot, returned transported inside the voxel bbox. //! @param[out] direction Sampled particle direction (cosine vectors). //! @param[out] intersection_flag Set to <0 if particle outside bbox and will not cross the voxels, not changed otherwise. //! @param[in] size_bbox Global variable from structure voxel_data_CONST: size of the bounding box. //! @param[in] offset Global variable from structure voxel_data_CONST: offset of the geometry in x, y, and z. //////////////////////////////////////////////////////////////////////////////// __device__ inline void move_to_bbox(float3* position, float3* direction, unsigned int* intersection_flag) { float dist_y, dist_x, dist_z; // -Distance to the nearest Y plane: if ((direction->y) > EPS_SOURCE) // Moving to +Y: check distance to y=0 plane { // Check Y=0 (bbox wall): if (position->y > voxel_data_CONST.offset.y) //!!DBTv1.4!! Allowing a 3D offset of the voxelized geometry (default origin at lower back corner). dist_y = 0.0f; // No intersection with this plane: particle inside or past the box // The actual distance would be negative but we set it to 0 bc we will not move the particle if no intersection exist. else dist_y = EPS_SOURCE + (voxel_data_CONST.offset.y-position->y)/(direction->y); // dist_y > 0 for sure in this case } else if ((direction->y) < NEG_EPS_SOURCE) { // Check Y=voxel_data_CONST.size_bbox.y: if (position->y < (voxel_data_CONST.size_bbox.y + voxel_data_CONST.offset.y)) dist_y = 0.0f; // No intersection with this plane else dist_y = EPS_SOURCE + (voxel_data_CONST.size_bbox.y + voxel_data_CONST.offset.y - position->y)/(direction->y); // dist_y > 0 for sure in this case } else // (direction->y)~0 dist_y = NEG_INF; // Particle moving parallel to the plane: no interaction possible (set impossible negative dist = -INFINITE) // -Distance to the nearest X plane: if ((direction->x) > EPS_SOURCE) { // Check X=0: if (position->x > voxel_data_CONST.offset.x) dist_x = 0.0f; else dist_x = EPS_SOURCE + (voxel_data_CONST.offset.x-position->x)/(direction->x); // dist_x > 0 for sure in this case } else if ((direction->x) < NEG_EPS_SOURCE) { // Check X=voxel_data_CONST.size_bbox.x: if (position->x < (voxel_data_CONST.size_bbox.x+voxel_data_CONST.offset.x)) dist_x = 0.0f; else dist_x = EPS_SOURCE + (voxel_data_CONST.size_bbox.x + voxel_data_CONST.offset.x - position->x)/(direction->x); // dist_x > 0 for sure in this case } else dist_x = NEG_INF; // -Distance to the nearest Z plane: if ((direction->z) > EPS_SOURCE) { // Check Z=0: if (position->z > voxel_data_CONST.offset.z) dist_z = 0.0f; else dist_z = EPS_SOURCE + (voxel_data_CONST.offset.z - position->z)/(direction->z); // dist_z > 0 for sure in this case } else if ((direction->z) < NEG_EPS_SOURCE) { // Check Z=voxel_data_CONST.size_bbox.z: if (position->z < (voxel_data_CONST.size_bbox.z+voxel_data_CONST.offset.z)) dist_z = 0.0f; else dist_z = EPS_SOURCE + (voxel_data_CONST.size_bbox.z + voxel_data_CONST.offset.z - position->z)/(direction->z); // dist_z > 0 for sure in this case } else dist_z = NEG_INF; // -- Find the longest distance plane, which is the one that has to be crossed to enter the bbox. // Storing the maximum distance in variable "dist_z". Distance will be =0 if no intersection exists or // if the x ray is already inside the bbox. if ( (dist_y>dist_x) && (dist_y>dist_z) ) dist_z = dist_y; // dist_z == dist_max else if (dist_x>dist_z) dist_z = dist_x; // else // dist_max = dist_z; // -- Move particle from the focal spot (current location) to the bbox wall surface (slightly inside): float x = position->x + dist_z * direction->x; float y = position->y + dist_z * direction->y; float z = position->z + dist_z * direction->z; // Check if the new position is outside the bbox. If not, return the moved location: if ( (x < voxel_data_CONST.offset.x) || (x > (voxel_data_CONST.size_bbox.x+voxel_data_CONST.offset.x)) || (y < voxel_data_CONST.offset.y) || (y > (voxel_data_CONST.size_bbox.y+voxel_data_CONST.offset.y)) || (z < voxel_data_CONST.offset.z) || (z > (voxel_data_CONST.size_bbox.z+voxel_data_CONST.offset.z)) ) { (*intersection_flag) = FLAG_OUTSIDE_VOXELS; // OLD: -111; // Particle outside the bbox AND not pointing to the bbox: set absvox<0 to skip interaction sampling. Leave particle position at focal spot. } else { position->x = x; position->y = y; position->z = z; } } //////////////////////////////////////////////////////////////////////////////// //! Upper limit of the number of random values sampled in a single track. //! I need a large leap for simulations containing a heavy element that causes a lot of delta scattering (eg, for a 15 keV simulation with bone and water I might have 10 delta scatterings; adding Tungsten I might have >650 deltas, and each delta iteration consumes two PRN). #define LEAP_DISTANCE 2048 // #define LEAP_DISTANCE 256 //!!DeBuG!! !!DBTv1.4!! 256 is too low when using Tungsten!!! //! Multipliers and moduli for the two MLCG in RANECU. #define a1_RANECU 40014 #define m1_RANECU 2147483563 #define a2_RANECU 40692 #define m2_RANECU 2147483399 //////////////////////////////////////////////////////////////////////////////// //! Initialize the pseudo-random number generator (PRNG) RANECU to a position //! far away from the previous history (leap frog technique). //! //! Each calculated seed initiates a consecutive and disjoint sequence of //! pseudo-random numbers with length LEAP_DISTANCE, that can be used to //! in a parallel simulation (Sequence Splitting parallelization method). //! The basic equation behind the algorithm is: //! S(i+j) = (a**j * S(i)) MOD m = [(a**j MOD m)*S(i)] MOD m , //! which is described in: //! P L'Ecuyer, Commun. ACM 31 (1988) p.742 //! //! This function has been adapted from "seedsMLCG.f", see: //! A Badal and J Sempau, Computer Physics Communications 175 (2006) p. 440-450 //! //! @param[in] history Particle bach number. //! @param[in] seed_input Initial PRNG seed input (used to initiate both MLCGs in RANECU). //! @param[out] seed Initial PRNG seeds for the present history. //! //////////////////////////////////////////////////////////////////////////////// __device__ inline void init_PRNG(int history_batch, int histories_per_thread, int seed_input, int2* seed) { // -- Move the RANECU generator to a unique position for the current batch of histories: // I have to use an "unsigned long long int" value to represent all the simulated histories in all previous batches // The maximum unsigned long long int value is ~1.8e19: if history >1.8e16 and LEAP_DISTANCE==1000, 'leap' will overflow. // **** 1st MLCG: unsigned long long int leap = ((unsigned long long int)(history_batch+1))*(histories_per_thread*LEAP_DISTANCE); int y = 1; int z = a1_RANECU; // -- Calculate the modulo power '(a^leap)MOD(m)' using a divide-and-conquer algorithm adapted to modulo arithmetic for(;;) { // (A2) Halve n, and store the integer part and the residue if (0!=(leap&01)) // (bit-wise operation for MOD(leap,2), or leap%2 ==> proceed if leap is an odd number) Equivalent: t=(short)(leap%2); { leap >>= 1; // Halve n moving the bits 1 position right. Equivalent to: leap=(leap/2); y = abMODm(m1_RANECU,z,y); // (A3) Multiply y by z: y = [z*y] MOD m if (0==leap) break; // (A4) leap==0? ==> finish } else // (leap is even) { leap>>= 1; // Halve leap moving the bits 1 position right. Equivalent to: leap=(leap/2); } z = abMODm(m1_RANECU,z,z); // (A5) Square z: z = [z*z] MOD m } // AjMODm1 = y; // Exponentiation finished: AjMODm = expMOD = y = a^j // -- Compute and display the seeds S(i+j), from the present seed S(i), using the previously calculated value of (a^j)MOD(m): // S(i+j) = [(a**j MOD m)*S(i)] MOD m // S_i = abMODm(m,S_i,AjMODm) seed->x = abMODm(m1_RANECU, seed_input, y); // Using the input seed as the starting seed // **** 2nd MLCG (repeating the previous calculation for the 2nd MLCG parameters): leap = ((unsigned long long int)(history_batch+1))*(histories_per_thread*LEAP_DISTANCE); y = 1; z = a2_RANECU; for(;;) { // (A2) Halve n, and store the integer part and the residue if (0!=(leap&01)) // (bit-wise operation for MOD(leap,2), or leap%2 ==> proceed if leap is an odd number) Equivalent: t=(short)(leap%2); { leap >>= 1; // Halve n moving the bits 1 position right. Equivalent to: leap=(leap/2); y = abMODm(m2_RANECU,z,y); // (A3) Multiply y by z: y = [z*y] MOD m if (0==leap) break; // (A4) leap==0? ==> finish } else // (leap is even) { leap>>= 1; // Halve leap moving the bits 1 position right. Equivalent to: leap=(leap/2); } z = abMODm(m2_RANECU,z,z); // (A5) Square z: z = [z*z] MOD m } // AjMODm2 = y; seed->y = abMODm(m2_RANECU, seed_input, y); // Using the input seed as the starting seed } ///////////////////////////////////////////////////////////////////// //! Calculate "(a1*a2) MOD m" with 32-bit integers and avoiding //! the possible overflow, using the Russian Peasant approach //! modulo m and the approximate factoring method, as described //! in: L'Ecuyer and Cote, ACM Trans. Math. Soft. 17 (1991). //! //! This function has been adapted from "seedsMLCG.f", see: //! Badal and Sempau, Computer Physics Communications 175 (2006) //! //! @param[in] m,a,s MLCG parameters //! @return (a1*a2) MOD m // // Input: 0 < a1 < m // 0 < a2 < m // // Return value: (a1*a2) MOD m // ///////////////////////////////////////////////////////////////////// __device__ __host__ inline int abMODm(int m, int a, int s) { // CAUTION: the input parameters are modified in the function but should not be returned to the calling function! (pass by value!) int q, k; int p = -m; // p is always negative to avoid overflow when adding // ** Apply the Russian peasant method until "a =< 32768": while (a>32768) // We assume '32' bit integers (4 bytes): 2^(('32'-2)/2) = 32768 { if (0!=(a&1)) // Store 's' when 'a' is odd Equivalent code: if (1==(a%2)) { p += s; if (p>0) p -= m; } a >>= 1; // Half a (move bits 1 position right) Equivalent code: a = a/2; s = (s-m) + s; // Double s (MOD m) if (s<0) s += m; // (s is always positive) } // ** Employ the approximate factoring method (a is small enough to avoid overflow): q = (int) m / a; k = (int) s / q; s = a*(s-k*q)-k*(m-q*a); while (s<0) s += m; // ** Compute the final result: p += s; if (p<0) p += m; return p; } //////////////////////////////////////////////////////////////////////////////// //! Pseudo-random number generator (PRNG) RANECU returning a float value //! (single precision version). //! //! @param[in,out] seed PRNG seed (seed kept in the calling function and updated here). //! @return PRN double value in the open interval (0,1) //! //////////////////////////////////////////////////////////////////////////////// __device__ inline float ranecu(int2* seed) { int i1 = (int)(seed->x/53668); seed->x = 40014*(seed->x-i1*53668)-i1*12211; int i2 = (int)(seed->y/52774); seed->y = 40692*(seed->y-i2*52774)-i2*3791; if (seed->x < 0) seed->x += 2147483563; if (seed->y < 0) seed->y += 2147483399; i2 = seed->x-seed->y; if (i2 < 1) i2 += 2147483562; return (__int2float_rn(i2)*4.65661305739e-10f); // 4.65661305739e-10 == 1/2147483563 } //////////////////////////////////////////////////////////////////////////////// //! Pseudo-random number generator (PRNG) RANECU returning a double value. //////////////////////////////////////////////////////////////////////////////// __device__ inline double ranecu_double(int2* seed) { int i1 = (int)(seed->x/53668); seed->x = 40014*(seed->x-i1*53668)-i1*12211; int i2 = (int)(seed->y/52774); seed->y = 40692*(seed->y-i2*52774)-i2*3791; if (seed->x < 0) seed->x += 2147483563; if (seed->y < 0) seed->y += 2147483399; i2 = seed->x-seed->y; if (i2 < 1) i2 += 2147483562; return (__int2double_rn(i2)*4.6566130573917692e-10); } //////////////////////////////////////////////////////////////////////////////// __host__ inline double ranecu_double_CPU(int2* seed) { int i1 = (int)(seed->x/53668); seed->x = 40014*(seed->x-i1*53668)-i1*12211; int i2 = (int)(seed->y/52774); seed->y = 40692*(seed->y-i2*52774)-i2*3791; if (seed->x < 0) seed->x += 2147483563; if (seed->y < 0) seed->y += 2147483399; i2 = seed->x-seed->y; if (i2 < 1) i2 += 2147483562; return ((double)(i2)*4.6566130573917692e-10); } //////////////////////////////////////////////////////////////////////////////// //! Find the voxel that contains the current position. //! Report the voxel absolute index and the x,y,z indices. //! The structure containing the voxel number and size is read from CONSTANT memory. //! //! @param[in] position Particle position //! @param[out] voxel_coord Pointer to three integer values (short3*) that will store the x,y and z voxel indices. //! @return Returns "absvox", the voxel number where the particle is //! located (negative if position outside the voxel bbox). //! //////////////////////////////////////////////////////////////////////////////// __device__ inline unsigned int locate_voxel(float3 p, short3* voxel_coord) { p.x -= voxel_data_CONST.offset.x; // Translate the coordinate system to a reference where the voxel's lower back corner is at the origin p.y -= voxel_data_CONST.offset.y; p.z -= voxel_data_CONST.offset.z; if ( (p.y < EPS) || (p.y > (voxel_data_CONST.size_bbox.y-EPS)) || (p.x < EPS) || (p.x > (voxel_data_CONST.size_bbox.x-EPS)) || (p.z < EPS) || (p.z > (voxel_data_CONST.size_bbox.z-EPS)) ) { // -- Particle escaped the voxelized geometry: return FLAG_OUTSIDE_VOXELS; // OLD CODE: return -1; !!DBTv1.4!! } // -- Particle inside the voxelized geometry, find current voxel: // The truncation from float to integer could give troubles for negative coordinates but this will never happen thanks to the IF at the begining of this function. // (no need to use the CUDA function to convert float to integer rounding down (towards minus infinite): __float2int_rd) register int voxel_coord_x, voxel_coord_y, voxel_coord_z; voxel_coord_x = __float2int_rd(p.x * voxel_data_CONST.inv_voxel_size.x); voxel_coord_y = __float2int_rd(p.y * voxel_data_CONST.inv_voxel_size.y); voxel_coord_z = __float2int_rd(p.z * voxel_data_CONST.inv_voxel_size.z); voxel_coord->x = (short int) voxel_coord_x; // Output the voxel coordinates as short int (2 bytes) instead of int (4 bytes) to save registers; avoid type castings in the calculation of the return value. voxel_coord->y = (short int) voxel_coord_y; voxel_coord->z = (short int) voxel_coord_z; return ((unsigned int)(voxel_coord_x + voxel_coord_y*(voxel_data_CONST.num_voxels.x)) + ((unsigned int)voxel_coord_z)*(voxel_data_CONST.num_voxels.x)*(voxel_data_CONST.num_voxels.y)); } ////////////////////////////////////////////////////////////////////// //! Rotates a vector; the rotation is specified by giving //! the polar and azimuthal angles in the "self-frame", as //! determined by the vector to be rotated. //! This function is a literal translation from Fortran to C of //! PENELOPE (v. 2006) subroutine "DIRECT". //! //! @param[in,out] (u,v,w) input vector (=d) in the lab. frame; returns the rotated vector components in the lab. frame //! @param[in] costh cos(theta), angle between d before and after turn //! @param[in] phi azimuthal angle (rad) turned by d in its self-frame // // Output: // (u,v,w) -> rotated vector components in the lab. frame // // Comments: // -> (u,v,w) should have norm=1 on input; if not, it is // renormalized on output, provided norm>0. // -> The algorithm is based on considering the turned vector // d' expressed in the self-frame S', // d' = (sin(th)cos(ph), sin(th)sin(ph), cos(th)) // and then apply a change of frame from S' to the lab // frame. S' is defined as having its z' axis coincident // with d, its y' axis perpendicular to z and z' and its // x' axis equal to y'*z'. The matrix of the change is then // / uv/rho -v/rho u \ // S ->lab: | vw/rho u/rho v | , rho=(u^2+v^2)^0.5 // \ -rho 0 w / // -> When rho=0 (w=1 or -1) z and z' are parallel and the y' // axis cannot be defined in this way. Instead y' is set to // y and therefore either x'=x (if w=1) or x'=-x (w=-1) ////////////////////////////////////////////////////////////////////// __device__ inline void rotate_double(float3* direction, double costh, double phi) // The direction vector is single precision but the rotation is performed in double precision for increased accuracy. { double DXY, NORM, cosphi, sinphi, SDT; DXY = direction->x*direction->x + direction->y*direction->y; sincos(phi, &sinphi,&cosphi); // Calculate the SIN and COS at the same time. sinphi = sin(phi); cosphi = cos(phi); // **** Ensure normalisation NORM = DXY + direction->z*direction->z; // !!DeBuG!! Check if it is really necessary to renormalize in a real simulation!! if (fabs(NORM-1.0)>1.0e-14) { NORM = rsqrt(NORM); direction->x = NORM*direction->x; direction->y = NORM*direction->y; direction->z = NORM*direction->z; DXY = direction->x*direction->x + direction->y*direction->y; } if (DXY>1.0e-28) { SDT = sqrt((1.0-costh*costh)/DXY); float direction_x_in = direction->x; direction->x = direction->x*costh + SDT*(direction_x_in*direction->z*cosphi-direction->y*sinphi); direction->y = direction->y*costh+SDT*(direction->y*direction->z*cosphi+direction_x_in*sinphi); direction->z = direction->z*costh-DXY*SDT*cosphi; } else { SDT = sqrt(1.0-costh*costh); direction->y = SDT*sinphi; if (direction->z>0.0) { direction->x = SDT*cosphi; direction->z = costh; } else { direction->x =-SDT*cosphi; direction->z =-costh; } } } ////////////////////////////////////////////////////////////////////// // *********************************************************************** // * Translation of PENELOPE's "SUBROUTINE GRAa" from FORTRAN77 to C * // *********************************************************************** //! Sample a Rayleigh interaction using the sampling algorithm //! used in PENELOPE 2006. //! //! @param[in] energy Particle energy (not modified with Rayleigh) //! @param[out] costh_Rayleigh Cosine of the angular deflection //! @param[in] material Current voxel material // // CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC // C PENELOPE/PENGEOM (version 2006) C // C Copyright (c) 2001-2006 C // C Universitat de Barcelona C // C Permission to use, copy, modify, distribute and sell this software C // C and its documentation for any purpose is hereby granted without C // C fee, provided that the above copyright notice appears in all C // C copies and that both that copyright notice and this permission C // C notice appear in all supporting documentation. The Universitat de C // C Barcelona makes no representations about the suitability of this C // C software for any purpose. It is provided "as is" without express C // C or implied warranty. C // CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC ////////////////////////////////////////////////////////////////////// __device__ inline void GRAa(float *energy, double *costh_Rayleigh, int *mat, float *pmax_current, int2 *seed, struct rayleigh_struct* cgra) { /* **** Energy grid and interpolation constants for the current energy. */ double xmax = ((double)*energy) * 8.065535669099010e-5; // 8.065535669099010e-5 == 2.0*20.6074/510998.918 double x2max = min_value( (xmax*xmax) , ((double)cgra->xco[(*mat+1)*NP_RAYLEIGH - 1]) ); // Get the last tabulated value of xco for this mat if (xmax < 0.01) { do { *costh_Rayleigh = 1.0 - ranecu_double(seed) * 2.0; } while ( ranecu_double(seed) > (((*costh_Rayleigh)*(*costh_Rayleigh)+1.0)*0.5) ); return; } for(;;) // (Loop will iterate everytime the sampled value is rejected or above maximum) { double ru = ranecu_double(seed) * (double)(*pmax_current); // Pmax for the current energy is entered as a parameter /* **** Selection of the interval (binary search within pre-calculated limits). */ int itn = (int)(ru * (NP_RAYLEIGH-1)); // 'itn' will never reach the last interval 'NP_RAYLEIGH-1', but this is how RITA is implemented in PENELOPE int i__ = (int)cgra->itlco[itn + (*mat)*NP_RAYLEIGH]; int j = (int)cgra->ituco[itn + (*mat)*NP_RAYLEIGH]; if ((j - i__) > 1) { do { register int k = (i__ + j)>>1; // >>1 == /2 if (ru > cgra->pco[k -1 + (*mat)*NP_RAYLEIGH]) i__ = k; else j = k; } while ((j - i__) > 1); } /* **** Sampling from the rational inverse cumulative distribution. */ int index = i__ - 1 + (*mat)*NP_RAYLEIGH; double rr = ru - cgra->pco[index]; double xx; if (rr > 1e-16) { double d__ = (double)(cgra->pco[index+1] - cgra->pco[index]); float aco_index = cgra->aco[index], bco_index = cgra->bco[index], xco_index = cgra->xco[index]; // Avoid multiple accesses to the same global variable xx = (double)xco_index + (double)(aco_index + 1.0f + bco_index)* d__* rr / (d__*d__ + (aco_index*d__ + bco_index*rr) * rr) * (double)(cgra->xco[index+1] - xco_index); } else { xx = cgra->xco[index]; } if (xx < x2max) { // Sampled value below maximum possible value: *costh_Rayleigh = 1.0 - 2.0 * xx / x2max; // !!DeBuG!! costh_Rayleigh in double precision, but not all intermediate steps are!? /* **** Rejection: */ if (ranecu_double(seed) < (((*costh_Rayleigh)*(*costh_Rayleigh) + 1.0)*0.5)) break; // Sample value not rejected! break loop and return. } } } /* graa */ ////////////////////////////////////////////////////////////////////////// // *********************************************************************** // * Translation of PENELOPE's "SUBROUTINE GCOa" from FORTRAN77 to C * // ********************************************************************* * //! Random sampling of incoherent (Compton) scattering of photons, using //! the sampling algorithm from PENELOPE 2006: //! Relativistic impulse approximation with analytical one-electron Compton profiles // NOTE: In penelope, Doppler broadening is not used for E greater than 5 MeV. // We don't use it in GPU to reduce the lines of code and prevent using COMMON/compos/ZT(M) //! @param[in,out] energy incident and final photon energy (eV) //! @param[out] costh_Compton cosine of the polar scattering angle //! @param[in] material Current voxel material //! @param[in] seed RANECU PRNG seed // // CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC // C PENELOPE/PENGEOM (version 2006) C // C Copyright (c) 2001-2006 C // C Universitat de Barcelona C // C Permission to use, copy, modify, distribute and sell this software C // C and its documentation for any purpose is hereby granted without C // C fee, provided that the above copyright notice appears in all C // C copies and that both that copyright notice and this permission C // C notice appear in all supporting documentation. The Universitat de C // C Barcelona makes no representations about the suitability of this C // C software for any purpose. It is provided "as is" without express C // C or implied warranty. C // CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC // // ************************************************************************ __device__ inline void GCOa(float *energy, double *costh_Compton, int *mat, int2 *seed, struct compton_struct* cgco_SHARED) { float s, a1, s0, af, ek, ek2, ek3, tau, pzomc, taumin; float rn[MAX_SHELLS]; double cdt1; // Some variables used in PENELOPE have been eliminated to save register: float aux, taum2, fpzmax, a, a2, ek1 ,rni, xqc, fpz, pac[MAX_SHELLS]; int i__; int my_noscco = cgco_SHARED->noscco[*mat]; // Store the number of oscillators for the input material in a local variable //!!VERBOSE!! static int warning_flag_1 = -1, warning_flag_2 = -1, warning_flag_3 = -1; // Write warnings for the CPU code, but only once. !!DeBuG!! ek = *energy * 1.956951306108245e-6f; // (1.956951306108245e-6 == 1.0/510998.918) ek2 = ek * 2.f + 1.f; ek3 = ek * ek; // ek1 = ek3 - ek2 - 1.; taumin = 1.f / ek2; // taum2 = taumin * taumin; a1 = logf(ek2); // a2 = a1 + ek * 2. * (ek + 1.) * taum2; // a2 was used only once, code moved below /* **** Incoherent scattering function for theta=PI. */ s0 = 0.0f; for (i__ = 0; i__ < my_noscco; i__++) { register float temp = cgco_SHARED->uico[*mat + i__*MAX_MATERIALS]; if (temp < *energy) { register float aux = *energy * (*energy - temp) * 2.f; pzomc = cgco_SHARED->fj0[*mat + i__*MAX_MATERIALS] * (aux - temp * 510998.918f) * rsqrtf(aux + aux + temp * temp) * 1.956951306108245e-6f; // 1.956951306108245e-6 = 1.0/510998.918f // Version using the reciprocal of sqrt in CUDA: faster and more accurate!! // ORIGINAL: pzomc = cgco_SHARED->fj0[*mat + i__*MAX_MATERIALS] * (aux - temp * 510998.918f) / (sqrtf(aux + aux + temp * temp) * 510998.918f); if (pzomc > 0.0f) temp = (0.707106781186545f+pzomc*1.4142135623731f) * (0.707106781186545f+pzomc*1.4142135623731f); else temp = (0.707106781186545f-pzomc*1.4142135623731f) * (0.707106781186545f-pzomc*1.4142135623731f); temp = 0.5f * expf(0.5f - temp); // Calculate EXP outside the IF to avoid branching if (pzomc > 0.0f) temp = 1.0f - temp; s0 += cgco_SHARED->fco[*mat + i__*MAX_MATERIALS] * temp; } } /* **** Sampling tau. */ do { if (ranecu(seed)*/*a2=*/(a1+2.*ek*(ek+1.f)*taumin*taumin) < a1) { tau = powf(taumin, ranecu(seed)); // !!DeBuG!! "powf()" has a big error (7 ULP), the double version has only 2!! } else { tau = sqrtf(1.f + ranecu(seed) * (taumin * taumin - 1.f)); } cdt1 = (double)(1.f-tau) / (((double)tau)*((double)*energy)*1.956951306108245e-6); // !!DeBuG!! The sampled COS will be double precision, but TAU is not!!! if (cdt1 > 2.0) cdt1 = 1.99999999; // !!DeBuG!! Make sure that precision error in POW, SQRT never gives cdt1>2 ==> costh_Compton<-1 /* **** Incoherent scattering function. */ s = 0.0f; for (i__ = 0; i__ < my_noscco; i__++) { register float temp = cgco_SHARED->uico[*mat + i__*MAX_MATERIALS]; if (temp < *energy) { register float aux = (*energy) * (*energy - temp) * ((float)cdt1); if ((aux>1.0e-12f)||(temp>1.0e-12f)) // !!DeBuG!! Make sure the SQRT argument is never <0, and that we never get 0/0 -> NaN when aux=temp=0 !! { pzomc = cgco_SHARED->fj0[*mat + i__*MAX_MATERIALS] * (aux - temp * 510998.918f) * rsqrtf(aux + aux + temp * temp) * 1.956951306108245e-6f; // 1.956951306108245e-6 = 1.0/510998.918f // Version using the reciprocal of sqrt in CUDA: faster and more accurate!! // ORIGINAL: pzomc = cgco_SHARED->fj0[*mat + i__*MAX_MATERIALS] * (aux - temp * 510998.918f) / (sqrtf(aux + aux + temp * temp) * 510998.918f); } else { pzomc = 0.002f; // !!DeBuG!! Using a rough approximation to a sample value of pzomc found using pure double precision: NOT RIGUROUS! But this code is expected to be used very seldom, only in extreme cases. //!!VERBOSE!! if (warning_flag_1<0) //!!VERBOSE!! { warning_flag_1 = +1; // Disable warning, do not show again //!!VERBOSE!! // printf(" [... Small numerical precision error detected computing \"pzomc\" in GCOa (this warning will not be repeated).]\n i__=%d, aux=%.14f, temp=%.14f, pzomc(forced)=%.14f, uico=%.14f, energy=%.7f, cgco_SHARED->fj0=%.14f, mat=%d, cdt1=%.14lf\n", (int)i__, aux, temp, pzomc, cgco_SHARED->uico[*mat+i__*MAX_MATERIALS], *energy, cgco_SHARED->fj0[*mat+i__*MAX_MATERIALS], (int)*mat, cdt1); // !!DeBuG!! //!!VERBOSE!! } } temp = pzomc * 1.4142135623731f; if (pzomc > 0.0f) temp = 0.5f - (temp + 0.70710678118654502f) * (temp + 0.70710678118654502f); // Calculate exponential argument else temp = 0.5f - (0.70710678118654502f - temp) * (0.70710678118654502f - temp); temp = 0.5f * expf(temp); // All threads will calculate the expf together if (pzomc > 0.0f) temp = 1.0f - temp; s += cgco_SHARED->fco[*mat + i__*MAX_MATERIALS] * temp; rn[i__] = temp; } } } while( (ranecu(seed)*s0) > (s*(1.0f+tau*(/*ek1=*/(ek3 - ek2 - 1.0f)+tau*(ek2+tau*ek3)))/(ek3*tau*(tau*tau+1.0f))) ); // **** Rejection function *costh_Compton = 1.0 - cdt1; /* **** Target electron shell. */ for (;;) { register float temp = s*ranecu(seed); float pac = 0.0f; int ishell = my_noscco - 1; // First shell will have number 0 for (i__ = 0; i__ < (my_noscco-1); i__++) // !!DeBuG!! Iterate to (my_noscco-1) only: the last oscillator is excited in case all other fail (no point in double checking) ?? { pac += cgco_SHARED->fco[*mat + i__*MAX_MATERIALS] * rn[i__]; // !!DeBuG!! pac[] is calculated on the fly to save registers! if (pac > temp) // pac[] is calculated on the fly to save registers! { ishell = i__; break; } } /* **** Projected momentum of the target electron. */ temp = ranecu(seed) * rn[ishell]; if (temp < 0.5f) { pzomc = (0.70710678118654502f - sqrtf(0.5f - logf(temp + temp))) / (cgco_SHARED->fj0[*mat + ishell * MAX_MATERIALS] * 1.4142135623731f); } else { pzomc = (sqrtf(0.5f - logf(2.0f - 2.0f*temp)) - 0.70710678118654502f) / (cgco_SHARED->fj0[*mat + ishell * MAX_MATERIALS] * 1.4142135623731f); } if (pzomc < -1.0f) { continue; // re-start the loop } /* **** F(EP) rejection. */ temp = tau * (tau - (*costh_Compton) * 2.f) + 1.f; // this variable was originally called "xqc" // af = sqrt( max_value(temp,1.0e-30f) ) * (tau * (tau - *costh_Compton) / max_value(temp,1.0e-30f) + 1.f); //!!DeBuG!! Make sure the SQRT argument is never <0, and that I don't divide by zero!! if (temp>1.0e-20f) // !!DeBuG!! Make sure the SQRT argument is never <0, and that I don't divide by zero!! { af = sqrtf(temp) * (tau * (tau - ((float)(*costh_Compton))) / temp + 1.f); } else { // When using single precision, it is possible (but very uncommon) to get costh_Compton==1 and tau==1; then temp is 0 and 'af' can not be calculated (0/0 -> nan). Analysing the results obtained using double precision, we found that 'af' would be almost 0 in this situation, with an "average" about ~0.002 (this is just a rough estimation, but using af=0 the value would never be rejected below). af = 0.00200f; // !!DeBuG!! //!!VERBOSE!! if (warning_flag_2<0) //!!VERBOSE!! { warning_flag_2 = +1; // Disable warning, do not show again //!!VERBOSE!! printf(" [... Small numerical precision error detected computing \"af\" in GCOa (this warning will not be repeated)].\n xqc=%.14f, af(forced)=%.14f, tau=%.14f, costh_Compton=%.14lf\n", temp, af, tau, *costh_Compton); // !!DeBuG!! //!!VERBOSE!! } } if (af > 0.0f) { temp = af * 0.2f + 1.f; // this variable was originally called "fpzmax" } else { temp = 1.f - af * 0.2f; } if ( ranecu(seed)*temp < /*fpz =*/(af * max_value( min_value(pzomc,0.2f) , -0.2f ) + 1.f) ) { break; } } /* **** Energy of the scattered photon. */ { register float t, b1, b2, temp; t = pzomc * pzomc; b1 = 1.f - t * tau * tau; b2 = 1.f - t * tau * ((float)(*costh_Compton)); temp = sqrtf( fabsf(b2 * b2 - b1 * (1.0f - t)) ); if (pzomc < 0.0f) temp *= -1.0f; // !Error! energy may increase (slightly) due to inacurate calculation! !!DeBuG!! t = (tau / b1) * (b2 + temp); if (t > 1.0f) { //!!VERBOSE!! if (warning_flag_3<0) //!!VERBOSE!! { warning_flag_3 = +1; // Disable warning, do not show again //!!VERBOSE!! printf("\n [... a Compton event tried to increase the x ray energy due to precision error. Keeping initial energy. (This warning will not be repeated.)]\n scaling=%.14f, costh_Compton=%.14lf\n", t, *costh_Compton); // !!DeBuG!! //!!VERBOSE!! } t = 1.0f; // !!DeBuG!! Avoid increasing energy by hand!!! not nice!! } (*energy) *= t; // (*energy) *= (tau / b1) * (b2 + temp); // Original PENELOPE code } } // [End subroutine GCOa] //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //! Tally the depose deposited inside each material. //! This function is called whenever a particle suffers a Compton or photoelectric //! interaction. The energy released in each interaction is added and later in the //! report function the total deposited energy is divided by the total mass of the //! material in the voxelized object to get the dose. This naturally accounts for //! multiple densities for voxels with the same material (not all voxels have same mass). //! Electrons are not transported in MC-GPU and therefore we are approximating //! that the dose is equal to the KERMA (energy released by the photons alone). //! This approximation is acceptable when there is electronic equilibrium and //! when the range of the secondary electrons is shorter than the organ size. //! //! The function uses atomic functions for a thread-safe access to the GPU memory. //! We can check if this tally was disabled in the input file checking if the array //! materials_dose was allocated in the GPU (disabled if pointer = NULL). //! //! //! @param[in] Edep Energy deposited in the interaction //! @param[in] material Current material id number //! @param[out] materials_dose ulonglong2 array storing the mateials dose [in eV/g] and dose^2 (ie, uncertainty). //////////////////////////////////////////////////////////////////////////////// __device__ inline void tally_materials_dose(float* Edep, int* material, ulonglong2* materials_dose) { // Note: with many histories and few materials the materials_dose integer variables may overflow!! Using double precision floats would be better. Single precision is not good enough because adding small energies to a large counter would give problems. atomicAdd(&materials_dose[*material].x, __float2ull_rn((*Edep)*SCALE_eV) ); // Energy deposited at the material, scaled by the factor SCALE_eV and rounded. atomicAdd(&materials_dose[*material].y, __float2ull_rn((*Edep)*(*Edep)) ); // Square of the dose to estimate standard deviation (not using SCALE_eV for std_dev to prevent overflow) // OLD: materials_dose[*material].x += (unsigned long long int)((*Edep)*SCALE_eV + 0.5f); return; } /* !!inputDensity!! Replacing the hardcoded density_LUT look-up table function with an array in RAM or GPU constant memory: OLD LOOK-UP TABLE USED IN VICTRE SIMULATIONS: //////////////////////////////////////////////////////////////////////////////// //! Look up table that returns the pre-defined density of the input material. //////////////////////////////////////////////////////////////////////////////// __device__ __host__ // Function will be callable from host and also from device inline float density_LUT(int material) //!!FixedDensity_DBT!! { float density; switch(material) // Assuming that first material is number 0 { case 0: // air density = 0.0012f; break; case 1: // fat density = 0.92f; break; case 3: // glandular density = 1.035f; // - Johns&Yaffe1986: 1.035 ; Nominal: 1.06; break; case 10: // Compression Paddle density = 1.06; // polystyrene dens = 1.06 ; PMMA dens = 1.19 !!DBTv1.5!! break; case 2: // skin density = 1.090f; break; case 4: // nipple density = 1.090f; // -> skin? break; // case 6: // muscle // density = 1.05f; // break; case 5: // ligament(88) density = 1.120f; // -> connective Woodard? break; // case 9: // terminal duct lobular unit(95) // density = 1.04f; // -> muscle? // break; // case 7: // duct(125) // density = 1.05f; // break; case 8: // artery(150) and vein(225) density = 1.0f; break; case 11: // Mass/Signal density = 1.06f; // - Johns&Yaffe1986: Min: 1.027, Mean: 1.044, Max: 1.058 ; Nominal: 1.06; break; case 12: // ==Microcalcification density = 1.781f; // 1.781=0.84*2.12 -> reduced density a factor 0.84 according to: Hadjipanteli et al., Phys Med Biol 62 p 858 (2017) // Nominal density Calcium_oxalate=2.12 break; case 13: // ==Tungsten edge density = 19.30f; // !!detectorModel!! break; case 14: // ==a-Se detector density = 4.50f; // !!detectorModel!! break; default: density = 1.05f; // Using the default value for materials that have the same density. } return density; } */ //////////////////////////////////////////////////////////////////////////////// //! Tally a radiographic projection image using a detector layer with the input thickness and material composition. //! This model will reproduce the geometric spreading of the point spread function and the real detector transmission. //////////////////////////////////////////////////////////////////////////////// __device__ inline void tally_image(float* energy, float3* position, float3* direction, signed char* scatter_state, unsigned long long int* image, struct source_struct* source_data_SHARED, struct detector_struct* detector_data_SHARED, int2* seed) //!!detectorModel!! { // Rotate direction to the coordinate system with the detector on XZ plane (Y=0): // !!DBTv1.4!! apply_rotation(direction, detector_data_SHARED->rot_inv); //!!DBTv1.4!! // Check the angle between the x-ray direction and the Y axis (normal of the detector); return if the particle is moving away from the detector: if (direction->y < 0.0175f) return; // Reject particle: angle towards Y axis larger than 89 deg --> particle moving parallel or away from the detector! // Translate coordinate system to have detector centered at origin: // !!DBTv1.4!! position->x -= detector_data_SHARED->center.x; position->y -= detector_data_SHARED->center.y; position->z -= detector_data_SHARED->center.z; // Rotate coordinate system to have detector on XZ plane (Y=0): // !!DBTv1.4!! apply_rotation(position, detector_data_SHARED->rot_inv); // Sample the distance to the next interaction in the material of the detector or antiscatter grid protective covers, to determine if the particle will be absorbed in the covers: !!DBTv1.5!! // ASSUMPTIONS: neglecting scattering and fluorescence in the covers; using MFP at average energy spectrum, not the real MFP at current energy. !!DeBuG!! if (detector_data_SHARED->cover_MFP>0.0f) if ( (-detector_data_SHARED->cover_MFP*logf(ranecu(seed))) < detector_data_SHARED->cover_thickness ) // !!DBTv1.5!! return; // Do not tally particle lost in the cover !!DBTv1.5!! // Distance from the particle position to the detector at plane XZ (Y=0): float dist_detector = -position->y/direction->y; // Sample and add the extra distance the particle needs to travel to reach the first interaction inside the scintillator (particle not detected if interaction behind thickness): !!detectorModel!! dist_detector += -detector_data_SHARED->scintillator_MFP*logf(ranecu(seed)); // Add distance to next interaction inside the detector material to the detector distance //!!detectorModel!! // *** Translate the particle to the detector plane: position->x = position->x + dist_detector*direction->x; position->y = position->y + dist_detector*direction->y; position->z = position->z + dist_detector*direction->z; if (position->y > detector_data_SHARED->scintillator_thickness) return; // Do not tally energy if particle does not interact inside the detector layer. // !!detectorModel!! !!DBTv1.4!! // *** Find if particle interacted inside the detector bbox, and compute pixel number (taking into account a possible offset of the detector far from the default centered with the source): int pixel_coord_x = __float2int_rd((position->x - detector_data_SHARED->offset.x + 0.5f*detector_data_SHARED->width_X) * detector_data_SHARED->inv_pixel_size_X); // CUDA intrinsic function converts float to integer rounding down (to minus inf) if ((pixel_coord_x>-1)&&(pixel_coord_x<detector_data_SHARED->num_pixels.x)) { int pixel_coord_z = __float2int_rd((position->z - detector_data_SHARED->offset.y + 0.5f*detector_data_SHARED->height_Z) * detector_data_SHARED->inv_pixel_size_Z); if ((pixel_coord_z>-1)&&(pixel_coord_z<detector_data_SHARED->num_pixels.y)) { // --Sample if the particle is absorbed in the antiscatter grid (scatter or fluorescence in the grid not simulated): if (detector_data_SHARED->grid_freq>0.0f) { if (ranecu(seed) > antiscatter_grid_transmission_prob(position, direction, detector_data_SHARED)) //!!DBTv1.5!! return; } // --Sample if all the energy is deposited in the pixel or if a fluorescence x-ray was generated and was able to escape detection: // (k-edge energies available at: http://www.esrf.eu/UsersAndScience/Experiments/StructMaterials/ID11/ID11UserGuide/ID11Edges) int flag_fluorescence = 0; float edep = *energy; if (*energy > detector_data_SHARED->kedge_energy) { if (ranecu(seed) < detector_data_SHARED->fluorescence_yield) { edep -= detector_data_SHARED->fluorescence_energy; // !!DBTv1.4!! Subtract the input average K fluorescence energy from the deposited energy. The fluorescence photon is simulated afterwards. flag_fluorescence = 1; // !!TrackFluorescence!! } } // -- Particle enters the detector! Tally the particle energy in the corresponding pixel (in integer SCALE_eV fractions of eV): // Using a CUDA atomic function (not available for global floats yet) to read and increase the pixel value in a single instruction, blocking interferences from other threads. // The offset for the primaries or scatter images are calculated considering that: // scatter_state=0 for non-scattered, =1 for Compton, =2 for Rayleigh, and =3 for multiple scatter. // - Do not count the energy deposited inside the top or bottom blocking (dead) layers of the detector (fluorescence still generated). !!BLOCKING_LAYER!! // After rotation, the detector top layer starts at Y=0 and grows towards positive Y (bottom), with radiation expected from negative Y, moving towards positive Y. if ((position->y > BLOCKING_LAYER_TOP) && (position->y < (detector_data_SHARED->scintillator_thickness-BLOCKING_LAYER_BOTTOM))) // !!BLOCKING_LAYER!! atomicAdd(( image + // Pointer to beginning of image array (int)(*scatter_state) * detector_data_SHARED->total_num_pixels + // Offset to corresponding scatter image (pixel_coord_x + pixel_coord_z*(detector_data_SHARED->num_pixels.x)) ), // Offset to the corresponding pixel __float2ull_rn(edep*SCALE_eV) ); // Energy arriving at the pixel, scaled by the factor SCALE_eV and rounded. // The maximum unsigned long long int value is ~1.8e19: // *** Track Fluorescence inside detector: !!TrackFluorescence!! if (flag_fluorescence==1) { // -- Sample direction of emission of fluorescence photon isotropically: direction->z = 1.0f - 2.0*ranecu(seed); float sintheta = sqrtf(1.0f - direction->z*direction->z); float phi = (2.0f*PI)*ranecu(seed); float cos_phi, sin_phi; sincos(phi, &sin_phi, &cos_phi); direction->y = sintheta*sin_phi; direction->x = sintheta*cos_phi; // -- Sample distance to next fluorescence interaction inside scintillator, using the input MFP at the fluorescence energy: dist_detector = -detector_data_SHARED->fluorescence_MFP*logf(ranecu(seed)); // -- Tally fluorescence energy in the corresponding pixel, unless escaped: position->y = position->y + dist_detector*direction->y; if ((position->y > BLOCKING_LAYER_TOP) && (position->y < (detector_data_SHARED->scintillator_thickness-BLOCKING_LAYER_BOTTOM))) // !!BLOCKING_LAYER!! { position->x = position->x + dist_detector*direction->x; pixel_coord_x = __float2int_rd((position->x - detector_data_SHARED->offset.x + 0.5f*detector_data_SHARED->width_X) * detector_data_SHARED->inv_pixel_size_X); // CUDA intrinsic function converts float to integer rounding down (to minus inf) if ((pixel_coord_x>-1)&&(pixel_coord_x<detector_data_SHARED->num_pixels.x)) { position->z = position->z + dist_detector*direction->z; pixel_coord_z = __float2int_rd((position->z - detector_data_SHARED->offset.y + 0.5f*detector_data_SHARED->height_Z) * detector_data_SHARED->inv_pixel_size_Z); if ((pixel_coord_z>-1)&&(pixel_coord_z<detector_data_SHARED->num_pixels.y)) atomicAdd(( image + (int)(*scatter_state) * detector_data_SHARED->total_num_pixels + (pixel_coord_x + pixel_coord_z*(detector_data_SHARED->num_pixels.x)) ), __float2ull_rn(detector_data_SHARED->fluorescence_energy*SCALE_eV) ); // !!TrackFluorescence!! } } } } } } //////////////////////////////////////////////////////////////////////////////// //! Sample two random values with a Gaussian PDF. //! Uses the polar method to avoid expensive trigonometric calls implied by the alternative Box-Muller method. //! (**Code adapted from penEasyv20140609/penaux.F**) //////////////////////////////////////////////////////////////////////////////// __device__ inline void gausspdf(float *g1, float *g2, int2 *seed) { float x,y,u; do { x = 1.0f-2.0f*ranecu(seed); y = 1.0f-2.0f*ranecu(seed); u = x*x+y*y; } while ((u>=1.0f)||(u<1.0e-10f)); // Reject point and repeat float s = sqrtf(-2.0f*logf(u)/u); *g1 = x*s; // First Gaussian-distributed random variable *g2 = y*s; // Second independent Gaussian-distributed random variable } inline void gausspdf_double_CPU(double *g1, double *g2, int2 *seed) { double x,y,u; do { x = 1.0-2.0*ranecu_double_CPU(seed); y = 1.0-2.0*ranecu_double_CPU(seed); u = x*x+y*y; } while ((u>=1.0)||(u<1.0e-10)); // Reject point and repeat double s = sqrt(-2.0*log(u)/u); *g1 = x*s; // First Gaussian-distributed random variable *g2 = y*s; // Second independent Gaussian-distributed random variable } // //////////////////////////////////////////////////////////////////////////////// // //! Return a random value with a Gaussian PDF. // //! Uses the polar method to avoid expensive trigonometric calls implied by the alternative Box-Muller method. // // (**Code adapted from penEasyv20140609/penaux.F**) // //////////////////////////////////////////////////////////////////////////////// // __device__ inline float sample_gausspdf(int2 *seed) // { // float x,y,u; // do // { // x = 1.0f-2.0f*ranecu(seed); // y = 1.0f-2.0f*ranecu(seed); // u = x*x+y*y; // } while ((u>=1.0f)||(u<1.0e-10f)); // Reject point and repeat // return (x*sqrtf(-2.0f*logf(u)/u)); // Return Gaussian-distributed random value // } //////////////////////////////////////////////////////////////////////////////// //! Return a random value with a Gaussian PDF, with the distribution cropped at 2 sigma. //! Uses the polar method to avoid expensive trigonometric calls implied by the alternative Box-Muller method. // (**Code adapted from penEasyv20140609/penaux.F**) // // In a Gaussian distribution, 4.55% of sampled points are farther than 2*sigma; FWHM/2 = sqrt(2*ln(2))*sigma = 1.1774*sigma. // Cropping the Gaussian at 2 sigma we prevent generating photons unrealistically far from the focal spot center. // Experimental focal spot measurements show that the spot is quite sharp [A Burgess, "Focal spots: I. MTF separability", Invest Radiol 12, p. 36-43 (1977)] // //////////////////////////////////////////////////////////////////////////////// __device__ inline float sample_gausspdf_below2sigma(int2 *seed) { float g; do // Iterate function until we get a value under 2*sigma { float x,y,u; do { x = 1.0f-2.0f*ranecu(seed); y = 1.0f-2.0f*ranecu(seed); u = x*x+y*y; } while ((u>=1.0f)||(u<1.0e-10f)); // Reject point and repeat float s = sqrtf(-2.0f*logf(u)/u); g = x*s; // First Gaussian-distributed random variable if (fabsf(g)<2.0f) break; // exit loop and return g = y*s; // Second independent Gaussian-distributed random variable } while (fabsf(g)>2.0f); return g; // Return Gaussian-distributed random value under 2*sigma } //!* Rotate input vector (x,y,z) around the input rotation axis (wx,wy,wz) for the input angle, using Rodrigues' formula to compute the rotation matrix (http://mathworld.wolfram.com/RodriguesRotationFormula.html) : __device__ __host__ inline void rotate_around_axis_Rodrigues(float *angle, float3 *w, float3 *p) { if (fabs(*angle)>1.0e-8f) // Apply rotation only if input angle is not 0 { float s, c; sincos(*angle, &s,&c); // Precompute sinus and cosinus of input angle float x0 = p->x; // Temporary copies float y0 = p->y; // Construct and apply rotation matrix using Rodrigues' formula: float m1 = c+(w->x)*(w->x)*(1-c); // m1 float m2 = (w->z)*s+(w->x)*(w->y)*(1-c); // m4 float m3 =-(w->y)*s+(w->x)*(w->z)*(1-c); // m7 p->x = x0*m1 + y0*m2 +(p->z)*m3; // x=x0*m1+y0*m4+z0*m7 m1 =-(w->z)*s+(w->x)*(w->y)*(1-c); // m2 m2 = c+(w->y)*(w->y)*(1-c); // m5 m3 = (w->x)*s+(w->y)*(w->z)*(1-c); // m8 p->y = x0*m1 + y0*m2 + (p->z)*m3; // y=x0*m2+y0*m5+z0*m8 m1 = (w->y)*s+(w->x)*(w->z)*(1-c); // m3 m2 =-(w->x)*s+(w->y)*(w->z)*(1-c); // m6 m3 = c+(w->z)*(w->z)*(1-c); // m9 p->z = x0*m1 + y0*m2 + (p->z)*m3; // z=x0*m3+y0*m6+z0*m9 } } //!* Rotate the TWO input vectors (x,y,z) around the input rotation axis (wx,wy,wz) for the input angle, using Rodrigues' formula to compute the rotation matrix (http://mathworld.wolfram.com/RodriguesRotationFormula.html) : //!* Rotating the two vectors together I can re-use the rotation matrix computed on the fly __device__ __host__ inline void rotate_2vectors_around_axis_Rodrigues(float *angle, float3 *w, float3 *p, float3 *v) { if (fabs(*angle)>1.0e-8f) // Apply rotation only if input angle is not 0 { float s, c; sincos(*angle, &s,&c); // Precompute sinus and cosinus of input angle float x0 = p->x, y0 = p->y; // Temporary copies float v0 = v->x, w0 = v->y; // Construct and apply rotation matrix using Rodrigues' formula: float m1 = c+(w->x)*(w->x)*(1-c); // m1 float m2 = (w->z)*s+(w->x)*(w->y)*(1-c); // m4 float m3 =-(w->y)*s+(w->x)*(w->z)*(1-c); // m7 p->x = x0*m1 + y0*m2 +(p->z)*m3; // x=x0*m1+y0*m4+z0*m7 v->x = v0*m1 + w0*m2 +(v->z)*m3; m1 =-(w->z)*s+(w->x)*(w->y)*(1-c); // m2 m2 = c+(w->y)*(w->y)*(1-c); // m5 m3 = (w->x)*s+(w->y)*(w->z)*(1-c); // m8 p->y = x0*m1 + y0*m2 + (p->z)*m3; // y=x0*m2+y0*m5+z0*m8 v->y = v0*m1 + w0*m2 + (v->z)*m3; m1 = (w->y)*s+(w->x)*(w->z)*(1-c); // m3 m2 =-(w->x)*s+(w->y)*(w->z)*(1-c); // m6 m3 = c+(w->z)*(w->z)*(1-c); // m9 p->z = x0*m1 + y0*m2 + (p->z)*m3; // z=x0*m3+y0*m6+z0*m9 v->z = v0*m1 + w0*m2 + (v->z)*m3; } } //!* Rotate the input vector (float3) multiplying by the input rotation matrix (float m[9]). __device__ __host__ inline void apply_rotation(float3 *v, float *m) { float tmp_x = v->x, tmp_y = v->y; v->x = tmp_x*m[0] + tmp_y*m[1] + v->z*m[2]; v->y = tmp_x*m[3] + tmp_y*m[4] + v->z*m[5]; v->z = tmp_x*m[6] + tmp_y*m[7] + v->z*m[8]; } //////////////////////////////////////////////////////////////////////////////// //! Analytical model of a 1D focused antiscatter grid based on the work of Day and Dance [Phys Med Biol 28, p. 1429-1433 (1983)]. //! The model returns the probability of transmission through the grid for the current x-ray direction. //! The position of the particle in the default reference frame with the detector centered at the origin and laying on the XZ plane is used to compute the focused grid angle. //! //! ASSUMPTIONS: //! - Currently the x-ray energy is not used: the attenuation at the average energy is assumed for every x-ray. !!DeBuG!! //! - Assuming that the focal length of the grid is always identical to the input source-to-detector distance (sdd). !!DeBuG!! //! - The Day and Dance equations are for an uniform oblique grid and the change in angle for consecutive strips is not modeled. As they explain, this is unlikely to be relevant because //! the prob of x-rays traversing many strips is extremely low, and consecutive strips have very similar angulation. //! //! - Using double precision for variables that have to be inverted to avoid inaccuracy for collimated rays (u2 close to 0). Using exclusively double takes 4 times more than exclusively floats! //////////////////////////////////////////////////////////////////////////////// __device__ inline float antiscatter_grid_transmission_prob(float3* position, float3* direction, struct detector_struct* detector_data_SHARED) //!!DBTv1.5!! { // -- Compute grid angle at the current location on the detector: // The default MC-GPU detector orientation is on the XZ plane, perpendicular to Y axis, pointing towards Y. I have to transform to Day&Dance 1983 reference on XY plane, perpendicular to Z axis. // The position is already shifted to have the origin at the center of the detector: I can use the position as is to compute the incidence angle -> grid angle for focused grid. double grid_angle, u, w; if (detector_data_SHARED->grid_ratio<0.0f) { // <0 --> input orientation == 0 ==> 1D collimated grid with strips perpendicular to lateral direction X (mammo style), as in Day&Dance1983. grid_angle = (0.5*PI) - atan2(position->x, detector_data_SHARED->sdd); // A 0 deg angle between the incident beam and the strips corresponds to a grid angle (sigma) of 90 deg = PI/2 u = direction->x; w = direction->y; } else { // >0 --> input orientation == 1 ==> 1D collimated grid with strips parallel to lateral direction X and perpendicular to Z direction (DBT style): switch Z and X axis grid_angle = (0.5*PI) - atan2(position->z, detector_data_SHARED->sdd); u = direction->z; w = direction->y; } float C = 1.0f/detector_data_SHARED->grid_freq; float d2 = detector_data_SHARED->grid_strip_thickness/sinf(grid_angle); // Strip thickness in grid reference system (eq. page 1429, Day&Dance1983) float D2 = C - d2; // Distance between consecutive grid strips float h = fabsf(detector_data_SHARED->grid_ratio) * D2; // Compute the eight of the grid strips, according to the input grid ratio. Using absolute value bc sign encodes grid orientation in my implementation. double u2 = fabs(u - w/tan(grid_angle)); // (eq. 1, Day&Dance1983) Note: u2 is the direction RATIO in the oblique referrence system, not the direction COSINE. if (u2<1.0e-9) u2 = 1.0e-8; // !!DeBuG!! Perfectly collimated particles going parallel to strips will have u2=alpha=0. This might gives NaN computing A, but only for few angles (21 deg)??? Add arbitrary epsilon to prevent 0/0. double P = (h/w)*u2; // (eq. 4, Day&Dance1983) double n = floor(P*detector_data_SHARED->grid_freq); // grid_freq = 1/C float q = P - n*C; double alpha = u2/(detector_data_SHARED->grid_strip_mu-detector_data_SHARED->grid_interspace_mu); // (eq. 8, Day&Dance1983) double inv_alpha = 1.0/alpha; // Grid transmission: probability of a photon passing through the grid without interaction: float A = expf(-detector_data_SHARED->grid_interspace_mu*h/w - d2*n*inv_alpha); // (eq. 9, Day&Dance1983) float H = 0.0f; // Step function if (q>=D2) H = 1.0f; float B = (fabsf(q-D2)+2.0f*(float)alpha) * expf((H*(D2-q))*inv_alpha) + (fabsf(d2-q)-2.0f*(float)alpha) * expf((-0.5f*(d2+q-fabsf(d2-q)))*inv_alpha); // (eq. 12, Day&Dance1983) return (A*B*detector_data_SHARED->grid_freq); // (eq. 10, Day&Dance1983) ; grid_freq = 1/C } //////////////////////////////////////////////////////////////////////////////// //! Find the material number at the current location searching the binary tree structure. //! //! @param[in] position Particle position //! @param[in] bitree Array with the binary trees for every non-uniform coarse voxel //! @param[in] bitree_root_index Index of the root node of the current coarse voxel within bitree array //! @param[in] voxel_coord Voxel coordinates, needed to determine the location of the lower wall of the current coarse voxel //! @param[in] voxel_data_CONST.voxel_size Global variable with the size of the low resolution coarse voxels [cm] //! @param[in] voxel_data_CONST.voxel_size_HiRes Global variable with the size of the original high resolution voxels [cm] //! @param[in] voxel_data_CONST.num_voxels_coarse Global variable with the number of sub-voxels in a coarse voxel //! @param[in] voxel_data_CONST.offset Global variable with the location of the lower walls of the complete voxelized geometry //! @return Material number found at the input position (composition of the tree leaf in that point) //////////////////////////////////////////////////////////////////////////////// __device__ int find_material_bitree(const float3* position, char* bitree, const int bitree_root_index, short3* voxel_coord) // !!bitree!! v1.5b { // -- Define variable used during the tree traversal: int bitree_node=0, node=0; // Binary tree node index for the current coarse voxel int3 node_width; node_width.x = voxel_data_CONST.num_voxels_coarse.x; node_width.y = voxel_data_CONST.num_voxels_coarse.y; node_width.z = voxel_data_CONST.num_voxels_coarse.z; float3 node_lower_wall; node_lower_wall.x = voxel_coord->x*voxel_data_CONST.voxel_size.x + voxel_data_CONST.offset.x; // Init lower bound walls in x,y,z node_lower_wall.y = voxel_coord->y*voxel_data_CONST.voxel_size.y + voxel_data_CONST.offset.y; node_lower_wall.z = voxel_coord->z*voxel_data_CONST.voxel_size.z + voxel_data_CONST.offset.z; // -- Recursively traverse the tree (from the root node) until we get a final node (positive bitree value). // The X, Y, Z axes are divided in half sequentially in each iteration of the loop. for(;;) { bitree_node = (int)bitree[node+bitree_root_index]; // Every acces to the bitree array has to be offset by the root node index "bitree_root_index" if (bitree_node>-1) // Check if we are already in a final node (empty geometry or final node found in previous Z division): break; // We reached a final node! Exit infinite loop. // Negative node value: we need to continue traversing down the tree // -- Check X axis: if (node_width.x > 1) // Never split a dimension that is just one voxel wide. Skip splitting and advance to the next dimension. !!DeBuG!! { int width_2nd = node_width.x/2; node_width.x = node_width.x - width_2nd; // Integer length of the first node: +1 longer than the second if distance is odd float splitting_plane = node_lower_wall.x + node_width.x*voxel_data_CONST.voxel_size_HiRes.x; // Using the original high res voxel size to determine the location of the splitting plane // Check in which side of the middle plane the current position is located: if (position->x < splitting_plane) { // -Point below (left) the middle plane: move to the following element of the bitree array. node++; // Move to the first child: following node } else { // -Point above (right) the middle plane: skip the following subnodes (first half of the node) and move directly to the second half node node = -bitree_node; // Advance to the location of the 2nd half (stored as a negative value) node_lower_wall.x = splitting_plane; // The splitting plane is now the lower plane of the subnode node_width.x = width_2nd; // Update length of 2nd half subnode } } bitree_node = (int)bitree[node+bitree_root_index]; if (bitree_node>-1) break; // We reached a final node! Exit infinite loop. // -- Check Y axis: if (node_width.y > 1) { int width_2nd = node_width.y/2; node_width.y = node_width.y - width_2nd; float splitting_plane = node_lower_wall.y + node_width.y*voxel_data_CONST.voxel_size_HiRes.y; if (position->y < splitting_plane) { node++; } else { node = -bitree_node; node_lower_wall.y = splitting_plane; node_width.y = width_2nd; } } bitree_node = (int)bitree[node+bitree_root_index]; if (bitree_node>-1) break; // We reached a final node! Exit infinite loop. // -- Check Z axis: if (node_width.z > 1) { int width_2nd = node_width.z/2; node_width.z = node_width.z - width_2nd; float splitting_plane = node_lower_wall.z + node_width.z*voxel_data_CONST.voxel_size_HiRes.z; if (position->z < splitting_plane) { node++; } else { node = -bitree_node; node_lower_wall.z = splitting_plane; node_width.z = width_2nd; } } } // -- We reached a final node: return the material number in the current location return (bitree_node); }
08076cbbf423efbd7344c463e2485a4e37e63a1c.cu
//////////////////////////////////////////////////////////////////////////////// // // **************************** // *** MC-GPU, version 1.5b *** // **************************** // //! Definition of the CUDA GPU kernel for the simulation of x ray tracks in a voxelized geometry. //! The physics models for Rayleigh and Compton scattering are translated from the Fortran //! code in PENELOPE 2006. // // ** DISCLAIMER ** // // This software and documentation (the "Software") were developed at the Food and // Drug Administration (FDA) by employees of the Federal Government in the course // of their official duties. Pursuant to Title 17, Section 105 of the United States // Code, this work is not subject to copyright protection and is in the public // domain. Permission is hereby granted, free of charge, to any person obtaining a // copy of the Software, to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, distribute, // sublicense, or sell copies of the Software or derivatives, and to permit persons // to whom the Software is furnished to do so. FDA assumes no responsibility // whatsoever for use by other parties of the Software, its source code, // documentation or compiled executables, and makes no guarantees, expressed or // implied, about its quality, reliability, or any other characteristic. Further, // use of this code in no way implies endorsement by the FDA or confers any // advantage in regulatory decisions. Although this software can be redistributed // and/or modified freely, we ask that any derivative works bear some notice that // they are derived from it, and any modified versions bear some notice that they // have been modified. // // //! @file MC-GPU_kernel_v1.5b.cu //! @author Andreu Badal (Andreu.Badal-Soler{at}fda.hhs.gov) //! @date 2018/01/01 // -- Original code started on: 2009/04/14 // //////////////////////////////////////////////////////////////////////////////// // ** This software is described in the following reference (please cite it in yuor papers): // Andreu Badal, Diksha Sharma, Christian G.Graff, Rongping Zeng, and Aldo Badano, Mammography and breast // tomosynthesis simulator for virtual clinical trials, Computer Physics Communications 261, p. 107779 (2021) // https://doi.org/10.1016/j.cpc.2020.107779 // ** Update May 2021 ** !!BLOCKING_LAYER!! // Enabling blocking (or dead) layers at the top and bottom of the detector slab. // Interactions in these layers will not contribute to the pixel value, but their fluorescence will // be tracked and might be detected somewhere else. The insensitive top layer causes a measurable // drop in DQE(0), but it does not affect MTF as implemented. Pixel values will be reduced (less // energy detected per history. [Reference: Zhou et al., Med. Phys. 34, 1098-1109 (2007)] #define BLOCKING_LAYER_TOP 0.0000f // [cm] Thickness layer closer to source. Example: 0.0008f for a 8 micron layer (0.0 == no layer). !!BLOCKING_LAYER!! #define BLOCKING_LAYER_BOTTOM 0.0000f // [cm] Thickness layer further from source. Example: 0.0008f for a 8 micron layer (0.0 == no layer). !!BLOCKING_LAYER!! //////////////////////////////////////////////////////////////////////////////// //! Initialize the image array, ie, set all pixels to zero //! Essentially, this function has the same effect as the command: //! "cutilSafeCall(cudaMemcpy(image_device, image, image_bytes, cudaMemcpyHostToDevice))"; //! //! CUDA performs some initialization work the first time a GPU kernel is called. //! Therefore, calling a short kernel before the real particle tracking is performed //! may improve the accuracy of the timing measurements in the relevant kernel. //! //! @param[in,out] image Pointer to the image array. //! @param[in] pixels_per_image Number of pixels in the image (ie, elements in the array). //////////////////////////////////////////////////////////////////////////////// __global__ void init_image_array_GPU(unsigned long long int* image, int pixels_per_image) { int my_pixel = threadIdx.x + blockIdx.x*blockDim.x; if (my_pixel < pixels_per_image) { // -- Set the current pixel to 0 and return, avoiding overflow when more threads than pixels are used: image[my_pixel] = (unsigned long long int)(0); // Initialize non-scatter image my_pixel += pixels_per_image; // (advance to next image) image[my_pixel] = (unsigned long long int)(0); // Initialize Compton image my_pixel += pixels_per_image; // (advance to next image) image[my_pixel] = (unsigned long long int)(0); // Initialize Rayleigh image my_pixel += pixels_per_image; // (advance to next image) image[my_pixel] = (unsigned long long int)(0); // Initialize multi-scatter image } } // //////////////////////////////////////////////////////////////////////////////// // //! Initialize the dose deposition array, ie, set all voxel doses to zero // //! // //! @param[in,out] dose Pointer to the dose mean and sigma arrays. // //! @param[in] num_voxels_dose Number of voxels in the dose ROI (ie, elements in the arrays). // //////////////////////////////////////////////////////////////////////////////// // __global__ // void init_dose_array_GPU(ulonglong2* voxels_Edep, int num_voxels_dose) // { // int my_voxel = threadIdx.x + blockIdx.x*blockDim.x; // register ulonglong2 ulonglong2_zero; // ulonglong2_zero.x = ulonglong2_zero.y = (unsigned long long int) 0; // if (my_voxel < num_voxels_dose) // { // dose[my_voxel] = ulonglong2_zero; // Set the current voxel to (0,0) and return, avoiding overflow // } // } //////////////////////////////////////////////////////////////////////////////// //! Main function to simulate x-ray tracks inside a voxelized geometry. //! Secondary electrons are not simulated (in photoelectric and Compton //! events the energy is locally deposited). //! //! The following global variables, in the GPU __constant__ memory are used: //! voxel_data_CONST, //! source_energy_data_CONST //! mfp_table_data_CONST. //! density_LUT_CONST //! //! @param[in] history_batch Particle batch number (only used in the CPU version when CUDA is disabled!, the GPU uses the built-in variable threadIdx) //! @param[in] num_p Projection number in the CT simulation. This variable defines a specific angle and the corresponding source and detector will be used. //! @param[in] histories_per_thread Number of histories to simulate for each call to this function (ie, for GPU thread). //! @param[in] seed_input Random number generator seed (the same seed is used to initialize the two MLCGs of RANECU). //! @param[in] voxel_mat_dens Pointer to the voxel densities and material vector (the voxelized geometry), stored in GPU glbal memory. //! @param[in] mfp_Woodcock_table Two parameter table for the linear interpolation of the Woodcock mean free path (MFP) (stored in GPU global memory). //! @param[in] mfp_table_a First element for the linear interpolation of the interaction mean free paths (stored in GPU global memory). //! @param[in] mfp_table_b Second element for the linear interpolation of the interaction mean free paths (stored in GPU global memory). //! @param[in] rayleigh_table Pointer to the table with the data required by the Rayleigh interaction sampling, stored in GPU global memory. //! @param[in] compton_table Pointer to the table with the data required by the Compton interaction sampling, stored in GPU global memory. //! @param[in,out] image Pointer to the image vector in the GPU global memory. //! @param[in,out] dose Pointer to the array containing the 3D voxel dose (and its uncertainty) in the GPU global memory. //////////////////////////////////////////////////////////////////////////////// __global__ void track_particles(int histories_per_thread, short int num_p, // For a CT simulation: allocate space for up to MAX_NUM_PROJECTIONS projections. int* seed_input_device, // Random seed read from global memory; secuence continued for successive projections in same GPU. !!DBTv1.4!! unsigned long long int* image, ulonglong2* voxels_Edep, int* voxel_mat_dens, //!!bitree!! Using "int" to be store the index to the bitree table //!!FixedDensity_DBT!! Allocating "voxel_mat_dens" as "char" instead of "float2" char* bitree, //!!bitree!! Array with the bitrees for every non-uniform coarse voxel float2* mfp_Woodcock_table, float3* mfp_table_a, float3* mfp_table_b, struct rayleigh_struct* rayleigh_table, struct compton_struct* compton_table, struct detector_struct* detector_data_array, struct source_struct* source_data_array, ulonglong2* materials_dose) { // -- Declare the track state variables: float3 position, direction; float energy, step, prob, randno, mfp_density, mfp_Woodcock; float3 mfp_table_read_a, mfp_table_read_b; int2 seed; int index; int material0, // Current material, starting at 0 for 1st material material_old; // Flag to mark a material or energy change signed char scatter_state; // Flag for scatter images: scatter_state=0 for non-scattered, =1 for Compton, =2 for Rayleigh, and =3 for multiple scatter. // -- Store the Compton table in shared memory from global memory: // For Compton and Rayleigh the access to memory is not coherent and the caching capability do not speeds up the accesses, they actually slows down the acces to other data. __shared__ struct compton_struct cgco_SHARED; __shared__ struct detector_struct detector_data_SHARED; __shared__ struct source_struct source_data_SHARED; if (0==threadIdx.x) // First GPU thread copies the variables to shared memory { // -Copy the current source, detector data from global to shared memory for fast access: source_data_SHARED = source_data_array[num_p]; detector_data_SHARED = detector_data_array[num_p]; // Copy the long array to a single instance in shared memory for the current projection // -Copy the compton data to shared memory: cgco_SHARED = *compton_table; } __syncthreads(); // Make sure all threads will see the initialized shared variable // -- Initialize the RANECU generator in a position far away from the previous history: init_PRNG((threadIdx.x + blockIdx.x*blockDim.x), histories_per_thread, *seed_input_device, &seed); // Using a 1D block. Random seed read from global memory. !!DBTv1.4!! // -- Loop for the "histories_per_thread" particles in the current history_batch: for( ; histories_per_thread>0; histories_per_thread--) { // printf("\n\n********* NEW HISTORY: %d [seeds: %d, %d]\n\n", histories_per_thread, seed.x, seed.y); // fflush(stdout); // !!Verbose!! calling printf from the GPU is possible but if multiple threads call it at the same time some output will be lost. unsigned int absvox = 1; // -- Call the source function to get a primary x ray: source(&position, &direction, &energy, &seed, &absvox, &source_data_SHARED, &detector_data_SHARED); scatter_state = (signed char)0; // Reset previous scatter state: new non-scattered particle loaded // -- Find the current energy bin by truncation (this could be pre-calculated for a monoenergetic beam): // The initialization host code made sure that the sampled energy will always be within the tabulated energies (index never negative or too large). index = __float2int_rd((energy-mfp_table_data_CONST.e0)*mfp_table_data_CONST.ide); // Using CUDA function to convert float to integer rounding down (towards minus infinite) // -- Get the minimum mfp at the current energy using linear interpolation (Woodcock tracking): { float2 mfp_Woodcock_read = mfp_Woodcock_table[index]; // Read the 2 parameters for the linear interpolation in a single read from global memory mfp_Woodcock = mfp_Woodcock_read.x + energy * mfp_Woodcock_read.y; // Interpolated minimum MFP } // -- Reset previous material to force a recalculation of the MFPs (negative materials are not allowed in the voxels): material_old = -1; // *** X-ray interaction loop: for(;;) { if (absvox==FLAG_OUTSIDE_VOXELS) break; // -- Primary particle was not pointing to the voxel region! (but may still be detected after moving in vacuum in a straight line). // *** Virtual interaction loop: // New loop structure in MC-GPU_v1.3: simulate all virtual events before sampling Compton & Rayleigh: // float2 matdens; short3 voxel_coord; // Variable used only by DOSE TALLY do { step = -(mfp_Woodcock)*logf(ranecu(&seed)); // Using the minimum MFP in the geometry for the input energy (Woodcock tracking) position.x += step*direction.x; position.y += step*direction.y; position.z += step*direction.z; // -- Locate the new particle in the voxel geometry: absvox = locate_voxel(position, &voxel_coord); // Get the voxel number at the current position and the voxel coordinates (used to check if inside the dose ROI in DOSE TALLY). if (absvox==FLAG_OUTSIDE_VOXELS) break; // -- Particle escaped the voxel region! ("index" is still >0 at this moment) // matdens = voxel_mat_dens[absvox]; // Get the voxel material and density in a single read from global memory // material0 = (int)(matdens.x - 1); // Set the current material by truncation, and set 1st material to value '0'. //!!FixedDensity_DBT!! Allocating "voxel_mat_dens" as "char" instead of "float2". Density taken from function "density_LUT". First material number == 0 material0 = (int)voxel_mat_dens[absvox]; // Get the voxel material and density in a single read from global memory (first material==0) if (material0<0) { // -- Non-uniform low resolution voxel: find material at current location searching the original high resolution geometry using the corresponding binary tree: material0 = find_material_bitree(&position, bitree, -material0, &voxel_coord); // !!bitree!! } // -- Get the data for the linear interpolation of the interaction MFPs, in case the energy or material have changed: if (material0 != material_old) { mfp_table_read_a = mfp_table_a[index*(MAX_MATERIALS)+material0]; mfp_table_read_b = mfp_table_b[index*(MAX_MATERIALS)+material0]; material_old = material0; // Store the new material } // *** Apply Woodcock tracking: mfp_density = mfp_Woodcock * density_LUT_CONST[material0]; //!!FixedDensity_DBT!! Density taken from constant memory array "density_LUT_CONST"; Old: mfp_density=mfp_Woodcock*matdens.y; // -- Calculate probability of delta scattering, using the total mean free path for the current material and energy (linear interpolation): prob = 1.0f - mfp_density * (mfp_table_read_a.x + energy * mfp_table_read_b.x); randno = ranecu(&seed); // Sample uniform PRN } while (randno<prob); // [Iterate if there is a delta scattering event] if (absvox==FLAG_OUTSIDE_VOXELS) break; // -- Particle escaped the voxel region! Break the interaction loop to call tally image. // The GPU threads will be stopped and waiting here until ALL threads have a REAL event: // -- Real event takes place! Check the kind of event and sample the effects of the interaction: prob += mfp_density * (mfp_table_read_a.y + energy * mfp_table_read_b.y); // Interpolate total Compton MFP ('y' component) if (randno<prob) // [Checking Compton scattering] { // *** Compton interaction: // -- Sample new direction and energy: double costh_Compton; randno = energy; // Save temporal copy of the particle energy (variable randno not necessary until next sampling). DOSE TALLY GCOa(&energy, &costh_Compton, &material0, &seed, &cgco_SHARED); rotate_double(&direction, costh_Compton, /*phi=2*pi*PRN=*/ 6.28318530717958647693*ranecu_double(&seed)); randno = energy - randno; // Save temporal copy of the negative of the energy lost in the interaction. DOSE TALLY // -- Find the new energy interval: index = __float2int_rd((energy-mfp_table_data_CONST.e0)*mfp_table_data_CONST.ide); // Using CUDA function to convert float to integer rounding down (towards minus infinite) if (index>-1) // 'index' will be negative only when the energy is below the tabulated minimum energy: particle will be then absorbed (rejected) after tallying the dose. { // -- Get the Woodcock MFP for the new energy (energy above minimum cutoff): float2 mfp_Woodcock_read = mfp_Woodcock_table[index]; // Read the 2 parameters for the linear interpolation in a single read from global memory mfp_Woodcock = mfp_Woodcock_read.x + energy * mfp_Woodcock_read.y; // Interpolated minimum MFP material_old = -2; // Set an impossible material to force an update of the MFPs data for the nex energy interval // -- Update scatter state: if (scatter_state==(signed char)0) scatter_state = (signed char)1; // Set scatter_state == 1: Compton scattered particle else scatter_state = (signed char)3; // Set scatter_state == 3: Multi-scattered particle } } else { prob += mfp_density * (mfp_table_read_a.z + energy * mfp_table_read_b.z); // Interpolate total Rayleigh MFP ('z' component) if (randno<prob) // [Checking Rayleigh scattering] { // *** Rayleigh interaction: // -- Sample angular deflection: double costh_Rayleigh; float pmax_current = rayleigh_table->pmax[(index+1)*MAX_MATERIALS+material0]; // Get max (ie, value for next bin?) cumul prob square form factor for Rayleigh sampling GRAa(&energy, &costh_Rayleigh, &material0, &pmax_current, &seed, rayleigh_table); rotate_double(&direction, costh_Rayleigh, /*phi=2*pi*PRN=*/ 6.28318530717958647693*ranecu_double(&seed)); // -- Update scatter state: if (scatter_state==(signed char)0) scatter_state = (signed char)2; // Set scatter_state == 1: Rayleigh scattered particle else scatter_state = (signed char)3; // Set scatter_state == 3: Multi-scattered particle } else { // *** Photoelectric interaction (or pair production): mark particle for absorption after dose tally (ie, index<0)! randno = -energy; // Save temporal copy of the (negative) energy deposited in the interaction (variable randno not necessary anymore). index = -11; // A negative "index" marks that the particle was absorved and that it will never arrive at the detector. } } // -- Tally the dose deposited in Compton and photoelectric interactions: if (randno<-0.001f) { float Edep = -1.0f*randno; // If any energy was deposited, this variable will temporarily store the negative value of Edep. // -- Tally the dose deposited in the current material, if enabled (ie, array allocated and not null): if (materials_dose!=NULL) tally_materials_dose(&Edep, &material0, materials_dose); // !!tally_materials_dose!! // -- Tally the energy deposited in the current voxel, if enabled (tally disabled when dose_ROI_x_max_CONST is negative). DOSE TALLY // Optional code to skip dose tally in air (material=0): if (dose_ROI_x_max_CONST > -1 && 0!=material0) if (dose_ROI_x_max_CONST > -1) tally_voxel_energy_deposition(&Edep, &voxel_coord, voxels_Edep); } // -- Break interaction loop for particles that have been absorbed or with energy below the tabulated cutoff: particle is "absorbed" (ie, track discontinued). if (index<0) break; } // [Cycle the X-ray interaction loop] if (index>-1) { // -- Particle escaped the voxels but was not absorbed, check if it will arrive at the detector and tally its energy: tally_image(&energy, &position, &direction, &scatter_state, image, &source_data_SHARED, &detector_data_SHARED, &seed); } } // [Continue with a new history] // -- Store the final random seed used by the last thread in the grid to global memory in order to continue the random secuence in successive projections in same GPU without overlapping. !!DBTv1.4!! // Since I am only storing the 'x' component and using it to init both parts of the ranecu generator, the sequence will actually diverge, but I warranty that at least one MLCG will stay uncorrelated. !!DeBuG!! if ( (blockIdx.x == (gridDim.x-1)) && (threadIdx.x == (blockDim.x-1))) { *seed_input_device = seed.x; // Store seed in GPU memory, but only for the thread with the largest id } } // [All tracks simulated for this kernel call: return to CPU] //////////////////////////////////////////////////////////////////////////////// //! Tally the dose deposited in the voxels. //! This function is called whenever a particle suffers a Compton or photoelectric //! interaction. It is not necessary to call this function if the dose tally //! was disabled in the input file (ie, dose_ROI_x_max_CONST < 0). //! Electrons are not transported in MC-GPU and therefore we are approximating //! that the dose is equal to the KERMA (energy released by the photons alone). //! This approximation is acceptable when there is electronic equilibrium and when //! the range of the secondary electrons is shorter than the voxel size. Usually the //! doses will be acceptable for photon energies below 1 MeV. The dose estimates may //! not be accurate at the interface of low density volumes. //! //! We need to use atomicAdd() in the GPU to prevent that multiple threads update the //! same voxel at the same time, which would result in a lose of information. //! This is very improbable when using a large number of voxels but gives troubles //! with a simple geometries with few voxels (in this case the atomicAdd will slow //! down the code because threads will update the voxel dose secuentially). //! //! //! @param[in] Edep Energy deposited in the interaction //! @param[in] voxel_coord Voxel coordinates, needed to check if particle located inside the input region of interest (ROI) //! @param[out] voxels_Edep ulonglong2 array containing the 3D voxel dose and dose^2 (ie, uncertainty) as unsigned integers scaled by SCALE_eV. //////////////////////////////////////////////////////////////////////////////// __device__ inline void tally_voxel_energy_deposition(float* Edep, short3* voxel_coord, ulonglong2* voxels_Edep) { if((voxel_coord->x < dose_ROI_x_min_CONST) || (voxel_coord->x > dose_ROI_x_max_CONST) || (voxel_coord->y < dose_ROI_y_min_CONST) || (voxel_coord->y > dose_ROI_y_max_CONST) || (voxel_coord->z < dose_ROI_z_min_CONST) || (voxel_coord->z > dose_ROI_z_max_CONST)) { return; // -- Particle outside the ROI: return without tallying anything. } // -- Particle inside the ROI: tally Edep. register int DX = 1 + (int)(dose_ROI_x_max_CONST - dose_ROI_x_min_CONST); register int num_voxel = (int)(voxel_coord->x-dose_ROI_x_min_CONST) + ((int)(voxel_coord->y-dose_ROI_y_min_CONST))*DX + ((int)(voxel_coord->z-dose_ROI_z_min_CONST))*DX*(1 + (int)(dose_ROI_y_max_CONST-dose_ROI_y_min_CONST)); atomicAdd(&voxels_Edep[num_voxel].x, __float2ull_rn((*Edep)*SCALE_eV) ); // Energy deposited at the voxel, scaled by the factor SCALE_eV and rounded. atomicAdd(&voxels_Edep[num_voxel].y, __float2ull_rn((*Edep)*(*Edep)) ); // (not using SCALE_eV for std_dev to prevent overflow) return; } //////////////////////////////////////////////////////////////////////////////// //! Source that creates primary x rays, according to the defined source model. //! The particles are automatically moved to the surface of the voxel bounding box, //! to start the tracking inside a real material. If the sampled particle do not //! enter the voxels, it is init in the focal spot and the main program will check //! if it arrives at the detector or not. //! //! @param[in] source_data Structure describing the source. //! @param[in] source_energy_data_CONST Global variable in constant memory space describing the source energy spectrum. //! @param[out] position Initial particle position (particle transported inside the voxel bbox). //! @param[out] direction Sampled particle direction (cosine vectors). //! @param[out] energy Sampled energy of the new x ray. //! @param[in] seed Current seed of the random number generator, requiered to sample the movement direction. //! @param[out] absvox Set to <0 if primary particle will not cross the voxels, not changed otherwise (>0). //////////////////////////////////////////////////////////////////////////////// __device__ inline void source(float3* position, float3* direction, float* energy, int2* seed, unsigned int* absvox, struct source_struct* source_data_SHARED, struct detector_struct* detector_data_SHARED) { // *** Sample the initial x-ray energy following the input energy spectrum using the Walker aliasing algorithm from PENELOPE: // The following code is equivalent to calling the function "seeki_walker": int sampled_bin = seeki_walker(source_data_CONST.espc_cutoff, source_data_CONST.espc_alias, ranecu(seed), source_data_CONST.num_bins_espc); int sampled_bin; float RN = ranecu(seed) * source_energy_data_CONST.num_bins_espc; // Find initial interval (array starting at 0): int int_part = __float2int_rd(RN); // -- Integer part (round down) float fraction_part = RN - ((float)int_part); // -- Fractional part if (fraction_part < source_energy_data_CONST.espc_cutoff[int_part]) // Check if we are in the aliased part sampled_bin = int_part; // Below the cutoff: return current value else sampled_bin = (int)source_energy_data_CONST.espc_alias[int_part]; // Above the cutoff: return alias // Linear interpolation of the final energy within the sampled energy bin: *energy = source_energy_data_CONST.espc[sampled_bin] + ranecu(seed) * (source_energy_data_CONST.espc[sampled_bin+1] - source_energy_data_CONST.espc[sampled_bin]); // *** If not a point source, sample the focal spot position using a uniformly-distributed angle on a sphere AND a Gaussian-distributed random radius: !!DBTv1.4!! if (source_data_SHARED->focal_spot_FWHM > 5.0e-7f) { float g = sample_gausspdf_below2sigma(seed); // Return a Gaussian distributed random value located at less than 2 sigma from the center. !!DBTv1.4!! // Cropping the Gaussian dist at 2 sigma to prevent generating photons unrealistically far from the focal spot center. The 2 sigma limit has been set arbitrary and will affect 4.55% of sampled locations. // Experimental focal spot measurements show that the spot is quite sharp [A Burgess, "Focal spots: I. MTF separability", Invest Radiol 12, p. 36-43 (1977)] //ALTERNATIVE METHOD: float g = sample_gausspdf(seed); // Return a Gaussian distributed random value. !!DBTv1.4!! //ALTERNATIVE METHOD: gausspdf(&g1, &g2, seed); // Sample 2 independent Gaussian distributed random variables. float cos_thetaFS = 2.0f*ranecu(seed)-1.0f; // Sample uniform points on a sphere float sin_thetaFS = sqrtf(1.0f-cos_thetaFS*cos_thetaFS); float phiFS = (PI*2.0f)*ranecu(seed); float cos_phiFS, sin_phiFS; sincos(phiFS, &sin_phiFS, &cos_phiFS); // Full Width at Half Maximum for Gaussian curve: FWHM = [2*sqrt(2*ln(2))] * sigma = 2.3548 * sigma // For a focal spot with FWHM = 0.0200 cm --> sigma = 0.0200/2.354820 = 0.0200*0.4246609 = 0.008493 float r = g * source_data_SHARED->focal_spot_FWHM * 0.424660900144f; // Use a Gaussian distribution for the radius // Set current focal spot position with sampled focal spot shift (source_data_SHARED->position was already rotated to the appropriate angle): position->x = source_data_SHARED->position.x + r*sin_thetaFS*cos_phiFS; position->y = source_data_SHARED->position.y + r*sin_thetaFS*sin_phiFS; position->z = source_data_SHARED->position.z + r*cos_thetaFS; } else { // Set default focal spot position for point source: position->x = source_data_SHARED->position.x; position->y = source_data_SHARED->position.y; position->z = source_data_SHARED->position.z; } // *** Sample the initial direction: do // Iterate sampling if the sampled direction is not acceptable to get a square field at the given phi (rejection sampling): force square field for any phi!! { // Using the algorithm used in PENMAIN.f, from penelope 2008 (by F. Salvat). direction->z = source_data_SHARED->cos_theta_low + ranecu(seed)*source_data_SHARED->D_cos_theta; // direction->z = w = cos(theta_sampled) register float phi_sampled = source_data_SHARED->phi_low + ranecu(seed)*source_data_SHARED->D_phi; register float sin_theta_sampled = sqrtf(1.0f - direction->z*direction->z); float sinphi_sampled, cosphi_sampled; sincos(phi_sampled, &sinphi_sampled,&cosphi_sampled); // Calculate the SIN and COS at the same time. direction->y = sin_theta_sampled * sinphi_sampled; direction->x = sin_theta_sampled * cosphi_sampled; } while( (fabsf(direction->z/(direction->y+1.0e-8f)) > source_data_SHARED->max_height_at_y1cm) || // Force square field for any phi by rejection sampling. (The "+1e-8" prevents division by zero) (fabsf(direction->x/(direction->y+1.0e-8f)) > source_data_SHARED->max_width_at_y1cm) ); //!!DBTv1.4!! // -- Apply the rotation that moves the emission direction from the default direction pointing to (0,1,0), to the required acquistion orientation: apply_rotation(direction, source_data_SHARED->rot_fan); //!!DBTv1.4!! // *** Simulate motion blur (if needed): Rotate focal spot position and emission direction according to a uniformly-sampled angular motion blur !!DBTv1.4!! if (source_data_SHARED->rotation_blur>EPS) { position->x -= source_data_SHARED->rotation_point.x; // Move to the coordinate system where rotation point is at the origin to apply the rotation position->y -= source_data_SHARED->rotation_point.y; position->z -= source_data_SHARED->rotation_point.z; float blur_angle = source_data_SHARED->rotation_blur*(ranecu(seed)-0.5f); // Uniform sampling of angular motion blur before and after the nominal acquisition angle // rotate_around_axis_Rodrigues(&blur_angle, &source_data_SHARED->axis_of_rotation, position); // Rotate position around rotation angle using Rodrigues' formula (http://mathworld.wolfram.com/RodriguesRotationFormula.html) rotate_2vectors_around_axis_Rodrigues(&blur_angle, &source_data_SHARED->axis_of_rotation, position, direction); // Rotate position and direction around rotation angle using Rodrigues' formula (http://mathworld.wolfram.com/RodriguesRotationFormula.html) position->x += source_data_SHARED->rotation_point.x; // Move back to the real-world coordinate system where rotation point is not at the origin position->y += source_data_SHARED->rotation_point.y; position->z += source_data_SHARED->rotation_point.z; } // To be safe, renormalize the direction vector to 1 (should not be necessary but single precision math might accumulate errors) double NORM = rsqrt(direction->x*direction->x + direction->y*direction->y + direction->z*direction->z); // !!DeBuG!! Check if it is really necessary to renormalize in a real simulation!! direction->x = NORM*direction->x; direction->y = NORM*direction->y; direction->z = NORM*direction->z; // printf("%.20lf %.20lf %.20lf\n", NORM, rsqrt(direction->x*direction->x + direction->y*direction->y + direction->z*direction->z), diff); //!!VERBOSE!! !!DeBuG!! // *** Move the particle to the inside of the voxel bounding box: move_to_bbox(position, direction, absvox); } //////////////////////////////////////////////////////////////////////////////// //! Functions to moves a particle towards the inside of the voxelized geometry bounding box. //! An EPSILON distance is added to make sure the particles will be clearly inside the bbox, //! not exactly on the surface. //! //! This algorithm makes the following assumptions: //! - The back lower vertex of the voxel bounding box is always located at the origin: (x0,y0,z0)=(0,0,0). //! - The initial value of "position" corresponds to the focal spot location. //! - When a ray is not pointing towards the bbox plane that it should cross according to the sign of the direction, //! I assign a distance to the intersection =0 instead of the real negative distance. The wall that will be //! crossed to enter the bbox is always the furthest and therefore a 0 distance will never be used except //! in the case of a ray starting inside the bbox or outside the bbox and not pointing to any of the 3 planes. //! In this situation the ray will be transported a 0 distance, meaning that it will stay at the focal spot. //! //! (Interesting information on ray-box intersection: http://tog.acm.org/resources/GraphicsGems/gems/RayBox.c) //! //! @param[in,out] position Particle position: initially set to the focal spot, returned transported inside the voxel bbox. //! @param[out] direction Sampled particle direction (cosine vectors). //! @param[out] intersection_flag Set to <0 if particle outside bbox and will not cross the voxels, not changed otherwise. //! @param[in] size_bbox Global variable from structure voxel_data_CONST: size of the bounding box. //! @param[in] offset Global variable from structure voxel_data_CONST: offset of the geometry in x, y, and z. //////////////////////////////////////////////////////////////////////////////// __device__ inline void move_to_bbox(float3* position, float3* direction, unsigned int* intersection_flag) { float dist_y, dist_x, dist_z; // -Distance to the nearest Y plane: if ((direction->y) > EPS_SOURCE) // Moving to +Y: check distance to y=0 plane { // Check Y=0 (bbox wall): if (position->y > voxel_data_CONST.offset.y) //!!DBTv1.4!! Allowing a 3D offset of the voxelized geometry (default origin at lower back corner). dist_y = 0.0f; // No intersection with this plane: particle inside or past the box // The actual distance would be negative but we set it to 0 bc we will not move the particle if no intersection exist. else dist_y = EPS_SOURCE + (voxel_data_CONST.offset.y-position->y)/(direction->y); // dist_y > 0 for sure in this case } else if ((direction->y) < NEG_EPS_SOURCE) { // Check Y=voxel_data_CONST.size_bbox.y: if (position->y < (voxel_data_CONST.size_bbox.y + voxel_data_CONST.offset.y)) dist_y = 0.0f; // No intersection with this plane else dist_y = EPS_SOURCE + (voxel_data_CONST.size_bbox.y + voxel_data_CONST.offset.y - position->y)/(direction->y); // dist_y > 0 for sure in this case } else // (direction->y)~0 dist_y = NEG_INF; // Particle moving parallel to the plane: no interaction possible (set impossible negative dist = -INFINITE) // -Distance to the nearest X plane: if ((direction->x) > EPS_SOURCE) { // Check X=0: if (position->x > voxel_data_CONST.offset.x) dist_x = 0.0f; else dist_x = EPS_SOURCE + (voxel_data_CONST.offset.x-position->x)/(direction->x); // dist_x > 0 for sure in this case } else if ((direction->x) < NEG_EPS_SOURCE) { // Check X=voxel_data_CONST.size_bbox.x: if (position->x < (voxel_data_CONST.size_bbox.x+voxel_data_CONST.offset.x)) dist_x = 0.0f; else dist_x = EPS_SOURCE + (voxel_data_CONST.size_bbox.x + voxel_data_CONST.offset.x - position->x)/(direction->x); // dist_x > 0 for sure in this case } else dist_x = NEG_INF; // -Distance to the nearest Z plane: if ((direction->z) > EPS_SOURCE) { // Check Z=0: if (position->z > voxel_data_CONST.offset.z) dist_z = 0.0f; else dist_z = EPS_SOURCE + (voxel_data_CONST.offset.z - position->z)/(direction->z); // dist_z > 0 for sure in this case } else if ((direction->z) < NEG_EPS_SOURCE) { // Check Z=voxel_data_CONST.size_bbox.z: if (position->z < (voxel_data_CONST.size_bbox.z+voxel_data_CONST.offset.z)) dist_z = 0.0f; else dist_z = EPS_SOURCE + (voxel_data_CONST.size_bbox.z + voxel_data_CONST.offset.z - position->z)/(direction->z); // dist_z > 0 for sure in this case } else dist_z = NEG_INF; // -- Find the longest distance plane, which is the one that has to be crossed to enter the bbox. // Storing the maximum distance in variable "dist_z". Distance will be =0 if no intersection exists or // if the x ray is already inside the bbox. if ( (dist_y>dist_x) && (dist_y>dist_z) ) dist_z = dist_y; // dist_z == dist_max else if (dist_x>dist_z) dist_z = dist_x; // else // dist_max = dist_z; // -- Move particle from the focal spot (current location) to the bbox wall surface (slightly inside): float x = position->x + dist_z * direction->x; float y = position->y + dist_z * direction->y; float z = position->z + dist_z * direction->z; // Check if the new position is outside the bbox. If not, return the moved location: if ( (x < voxel_data_CONST.offset.x) || (x > (voxel_data_CONST.size_bbox.x+voxel_data_CONST.offset.x)) || (y < voxel_data_CONST.offset.y) || (y > (voxel_data_CONST.size_bbox.y+voxel_data_CONST.offset.y)) || (z < voxel_data_CONST.offset.z) || (z > (voxel_data_CONST.size_bbox.z+voxel_data_CONST.offset.z)) ) { (*intersection_flag) = FLAG_OUTSIDE_VOXELS; // OLD: -111; // Particle outside the bbox AND not pointing to the bbox: set absvox<0 to skip interaction sampling. Leave particle position at focal spot. } else { position->x = x; position->y = y; position->z = z; } } //////////////////////////////////////////////////////////////////////////////// //! Upper limit of the number of random values sampled in a single track. //! I need a large leap for simulations containing a heavy element that causes a lot of delta scattering (eg, for a 15 keV simulation with bone and water I might have 10 delta scatterings; adding Tungsten I might have >650 deltas, and each delta iteration consumes two PRN). #define LEAP_DISTANCE 2048 // #define LEAP_DISTANCE 256 //!!DeBuG!! !!DBTv1.4!! 256 is too low when using Tungsten!!! //! Multipliers and moduli for the two MLCG in RANECU. #define a1_RANECU 40014 #define m1_RANECU 2147483563 #define a2_RANECU 40692 #define m2_RANECU 2147483399 //////////////////////////////////////////////////////////////////////////////// //! Initialize the pseudo-random number generator (PRNG) RANECU to a position //! far away from the previous history (leap frog technique). //! //! Each calculated seed initiates a consecutive and disjoint sequence of //! pseudo-random numbers with length LEAP_DISTANCE, that can be used to //! in a parallel simulation (Sequence Splitting parallelization method). //! The basic equation behind the algorithm is: //! S(i+j) = (a**j * S(i)) MOD m = [(a**j MOD m)*S(i)] MOD m , //! which is described in: //! P L'Ecuyer, Commun. ACM 31 (1988) p.742 //! //! This function has been adapted from "seedsMLCG.f", see: //! A Badal and J Sempau, Computer Physics Communications 175 (2006) p. 440-450 //! //! @param[in] history Particle bach number. //! @param[in] seed_input Initial PRNG seed input (used to initiate both MLCGs in RANECU). //! @param[out] seed Initial PRNG seeds for the present history. //! //////////////////////////////////////////////////////////////////////////////// __device__ inline void init_PRNG(int history_batch, int histories_per_thread, int seed_input, int2* seed) { // -- Move the RANECU generator to a unique position for the current batch of histories: // I have to use an "unsigned long long int" value to represent all the simulated histories in all previous batches // The maximum unsigned long long int value is ~1.8e19: if history >1.8e16 and LEAP_DISTANCE==1000, 'leap' will overflow. // **** 1st MLCG: unsigned long long int leap = ((unsigned long long int)(history_batch+1))*(histories_per_thread*LEAP_DISTANCE); int y = 1; int z = a1_RANECU; // -- Calculate the modulo power '(a^leap)MOD(m)' using a divide-and-conquer algorithm adapted to modulo arithmetic for(;;) { // (A2) Halve n, and store the integer part and the residue if (0!=(leap&01)) // (bit-wise operation for MOD(leap,2), or leap%2 ==> proceed if leap is an odd number) Equivalent: t=(short)(leap%2); { leap >>= 1; // Halve n moving the bits 1 position right. Equivalent to: leap=(leap/2); y = abMODm(m1_RANECU,z,y); // (A3) Multiply y by z: y = [z*y] MOD m if (0==leap) break; // (A4) leap==0? ==> finish } else // (leap is even) { leap>>= 1; // Halve leap moving the bits 1 position right. Equivalent to: leap=(leap/2); } z = abMODm(m1_RANECU,z,z); // (A5) Square z: z = [z*z] MOD m } // AjMODm1 = y; // Exponentiation finished: AjMODm = expMOD = y = a^j // -- Compute and display the seeds S(i+j), from the present seed S(i), using the previously calculated value of (a^j)MOD(m): // S(i+j) = [(a**j MOD m)*S(i)] MOD m // S_i = abMODm(m,S_i,AjMODm) seed->x = abMODm(m1_RANECU, seed_input, y); // Using the input seed as the starting seed // **** 2nd MLCG (repeating the previous calculation for the 2nd MLCG parameters): leap = ((unsigned long long int)(history_batch+1))*(histories_per_thread*LEAP_DISTANCE); y = 1; z = a2_RANECU; for(;;) { // (A2) Halve n, and store the integer part and the residue if (0!=(leap&01)) // (bit-wise operation for MOD(leap,2), or leap%2 ==> proceed if leap is an odd number) Equivalent: t=(short)(leap%2); { leap >>= 1; // Halve n moving the bits 1 position right. Equivalent to: leap=(leap/2); y = abMODm(m2_RANECU,z,y); // (A3) Multiply y by z: y = [z*y] MOD m if (0==leap) break; // (A4) leap==0? ==> finish } else // (leap is even) { leap>>= 1; // Halve leap moving the bits 1 position right. Equivalent to: leap=(leap/2); } z = abMODm(m2_RANECU,z,z); // (A5) Square z: z = [z*z] MOD m } // AjMODm2 = y; seed->y = abMODm(m2_RANECU, seed_input, y); // Using the input seed as the starting seed } ///////////////////////////////////////////////////////////////////// //! Calculate "(a1*a2) MOD m" with 32-bit integers and avoiding //! the possible overflow, using the Russian Peasant approach //! modulo m and the approximate factoring method, as described //! in: L'Ecuyer and Cote, ACM Trans. Math. Soft. 17 (1991). //! //! This function has been adapted from "seedsMLCG.f", see: //! Badal and Sempau, Computer Physics Communications 175 (2006) //! //! @param[in] m,a,s MLCG parameters //! @return (a1*a2) MOD m // // Input: 0 < a1 < m // 0 < a2 < m // // Return value: (a1*a2) MOD m // ///////////////////////////////////////////////////////////////////// __device__ __host__ inline int abMODm(int m, int a, int s) { // CAUTION: the input parameters are modified in the function but should not be returned to the calling function! (pass by value!) int q, k; int p = -m; // p is always negative to avoid overflow when adding // ** Apply the Russian peasant method until "a =< 32768": while (a>32768) // We assume '32' bit integers (4 bytes): 2^(('32'-2)/2) = 32768 { if (0!=(a&1)) // Store 's' when 'a' is odd Equivalent code: if (1==(a%2)) { p += s; if (p>0) p -= m; } a >>= 1; // Half a (move bits 1 position right) Equivalent code: a = a/2; s = (s-m) + s; // Double s (MOD m) if (s<0) s += m; // (s is always positive) } // ** Employ the approximate factoring method (a is small enough to avoid overflow): q = (int) m / a; k = (int) s / q; s = a*(s-k*q)-k*(m-q*a); while (s<0) s += m; // ** Compute the final result: p += s; if (p<0) p += m; return p; } //////////////////////////////////////////////////////////////////////////////// //! Pseudo-random number generator (PRNG) RANECU returning a float value //! (single precision version). //! //! @param[in,out] seed PRNG seed (seed kept in the calling function and updated here). //! @return PRN double value in the open interval (0,1) //! //////////////////////////////////////////////////////////////////////////////// __device__ inline float ranecu(int2* seed) { int i1 = (int)(seed->x/53668); seed->x = 40014*(seed->x-i1*53668)-i1*12211; int i2 = (int)(seed->y/52774); seed->y = 40692*(seed->y-i2*52774)-i2*3791; if (seed->x < 0) seed->x += 2147483563; if (seed->y < 0) seed->y += 2147483399; i2 = seed->x-seed->y; if (i2 < 1) i2 += 2147483562; return (__int2float_rn(i2)*4.65661305739e-10f); // 4.65661305739e-10 == 1/2147483563 } //////////////////////////////////////////////////////////////////////////////// //! Pseudo-random number generator (PRNG) RANECU returning a double value. //////////////////////////////////////////////////////////////////////////////// __device__ inline double ranecu_double(int2* seed) { int i1 = (int)(seed->x/53668); seed->x = 40014*(seed->x-i1*53668)-i1*12211; int i2 = (int)(seed->y/52774); seed->y = 40692*(seed->y-i2*52774)-i2*3791; if (seed->x < 0) seed->x += 2147483563; if (seed->y < 0) seed->y += 2147483399; i2 = seed->x-seed->y; if (i2 < 1) i2 += 2147483562; return (__int2double_rn(i2)*4.6566130573917692e-10); } //////////////////////////////////////////////////////////////////////////////// __host__ inline double ranecu_double_CPU(int2* seed) { int i1 = (int)(seed->x/53668); seed->x = 40014*(seed->x-i1*53668)-i1*12211; int i2 = (int)(seed->y/52774); seed->y = 40692*(seed->y-i2*52774)-i2*3791; if (seed->x < 0) seed->x += 2147483563; if (seed->y < 0) seed->y += 2147483399; i2 = seed->x-seed->y; if (i2 < 1) i2 += 2147483562; return ((double)(i2)*4.6566130573917692e-10); } //////////////////////////////////////////////////////////////////////////////// //! Find the voxel that contains the current position. //! Report the voxel absolute index and the x,y,z indices. //! The structure containing the voxel number and size is read from CONSTANT memory. //! //! @param[in] position Particle position //! @param[out] voxel_coord Pointer to three integer values (short3*) that will store the x,y and z voxel indices. //! @return Returns "absvox", the voxel number where the particle is //! located (negative if position outside the voxel bbox). //! //////////////////////////////////////////////////////////////////////////////// __device__ inline unsigned int locate_voxel(float3 p, short3* voxel_coord) { p.x -= voxel_data_CONST.offset.x; // Translate the coordinate system to a reference where the voxel's lower back corner is at the origin p.y -= voxel_data_CONST.offset.y; p.z -= voxel_data_CONST.offset.z; if ( (p.y < EPS) || (p.y > (voxel_data_CONST.size_bbox.y-EPS)) || (p.x < EPS) || (p.x > (voxel_data_CONST.size_bbox.x-EPS)) || (p.z < EPS) || (p.z > (voxel_data_CONST.size_bbox.z-EPS)) ) { // -- Particle escaped the voxelized geometry: return FLAG_OUTSIDE_VOXELS; // OLD CODE: return -1; !!DBTv1.4!! } // -- Particle inside the voxelized geometry, find current voxel: // The truncation from float to integer could give troubles for negative coordinates but this will never happen thanks to the IF at the begining of this function. // (no need to use the CUDA function to convert float to integer rounding down (towards minus infinite): __float2int_rd) register int voxel_coord_x, voxel_coord_y, voxel_coord_z; voxel_coord_x = __float2int_rd(p.x * voxel_data_CONST.inv_voxel_size.x); voxel_coord_y = __float2int_rd(p.y * voxel_data_CONST.inv_voxel_size.y); voxel_coord_z = __float2int_rd(p.z * voxel_data_CONST.inv_voxel_size.z); voxel_coord->x = (short int) voxel_coord_x; // Output the voxel coordinates as short int (2 bytes) instead of int (4 bytes) to save registers; avoid type castings in the calculation of the return value. voxel_coord->y = (short int) voxel_coord_y; voxel_coord->z = (short int) voxel_coord_z; return ((unsigned int)(voxel_coord_x + voxel_coord_y*(voxel_data_CONST.num_voxels.x)) + ((unsigned int)voxel_coord_z)*(voxel_data_CONST.num_voxels.x)*(voxel_data_CONST.num_voxels.y)); } ////////////////////////////////////////////////////////////////////// //! Rotates a vector; the rotation is specified by giving //! the polar and azimuthal angles in the "self-frame", as //! determined by the vector to be rotated. //! This function is a literal translation from Fortran to C of //! PENELOPE (v. 2006) subroutine "DIRECT". //! //! @param[in,out] (u,v,w) input vector (=d) in the lab. frame; returns the rotated vector components in the lab. frame //! @param[in] costh cos(theta), angle between d before and after turn //! @param[in] phi azimuthal angle (rad) turned by d in its self-frame // // Output: // (u,v,w) -> rotated vector components in the lab. frame // // Comments: // -> (u,v,w) should have norm=1 on input; if not, it is // renormalized on output, provided norm>0. // -> The algorithm is based on considering the turned vector // d' expressed in the self-frame S', // d' = (sin(th)cos(ph), sin(th)sin(ph), cos(th)) // and then apply a change of frame from S' to the lab // frame. S' is defined as having its z' axis coincident // with d, its y' axis perpendicular to z and z' and its // x' axis equal to y'*z'. The matrix of the change is then // / uv/rho -v/rho u \ // S ->lab: | vw/rho u/rho v | , rho=(u^2+v^2)^0.5 // \ -rho 0 w / // -> When rho=0 (w=1 or -1) z and z' are parallel and the y' // axis cannot be defined in this way. Instead y' is set to // y and therefore either x'=x (if w=1) or x'=-x (w=-1) ////////////////////////////////////////////////////////////////////// __device__ inline void rotate_double(float3* direction, double costh, double phi) // The direction vector is single precision but the rotation is performed in double precision for increased accuracy. { double DXY, NORM, cosphi, sinphi, SDT; DXY = direction->x*direction->x + direction->y*direction->y; sincos(phi, &sinphi,&cosphi); // Calculate the SIN and COS at the same time. sinphi = sin(phi); cosphi = cos(phi); // **** Ensure normalisation NORM = DXY + direction->z*direction->z; // !!DeBuG!! Check if it is really necessary to renormalize in a real simulation!! if (fabs(NORM-1.0)>1.0e-14) { NORM = rsqrt(NORM); direction->x = NORM*direction->x; direction->y = NORM*direction->y; direction->z = NORM*direction->z; DXY = direction->x*direction->x + direction->y*direction->y; } if (DXY>1.0e-28) { SDT = sqrt((1.0-costh*costh)/DXY); float direction_x_in = direction->x; direction->x = direction->x*costh + SDT*(direction_x_in*direction->z*cosphi-direction->y*sinphi); direction->y = direction->y*costh+SDT*(direction->y*direction->z*cosphi+direction_x_in*sinphi); direction->z = direction->z*costh-DXY*SDT*cosphi; } else { SDT = sqrt(1.0-costh*costh); direction->y = SDT*sinphi; if (direction->z>0.0) { direction->x = SDT*cosphi; direction->z = costh; } else { direction->x =-SDT*cosphi; direction->z =-costh; } } } ////////////////////////////////////////////////////////////////////// // *********************************************************************** // * Translation of PENELOPE's "SUBROUTINE GRAa" from FORTRAN77 to C * // *********************************************************************** //! Sample a Rayleigh interaction using the sampling algorithm //! used in PENELOPE 2006. //! //! @param[in] energy Particle energy (not modified with Rayleigh) //! @param[out] costh_Rayleigh Cosine of the angular deflection //! @param[in] material Current voxel material // // CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC // C PENELOPE/PENGEOM (version 2006) C // C Copyright (c) 2001-2006 C // C Universitat de Barcelona C // C Permission to use, copy, modify, distribute and sell this software C // C and its documentation for any purpose is hereby granted without C // C fee, provided that the above copyright notice appears in all C // C copies and that both that copyright notice and this permission C // C notice appear in all supporting documentation. The Universitat de C // C Barcelona makes no representations about the suitability of this C // C software for any purpose. It is provided "as is" without express C // C or implied warranty. C // CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC ////////////////////////////////////////////////////////////////////// __device__ inline void GRAa(float *energy, double *costh_Rayleigh, int *mat, float *pmax_current, int2 *seed, struct rayleigh_struct* cgra) { /* **** Energy grid and interpolation constants for the current energy. */ double xmax = ((double)*energy) * 8.065535669099010e-5; // 8.065535669099010e-5 == 2.0*20.6074/510998.918 double x2max = min_value( (xmax*xmax) , ((double)cgra->xco[(*mat+1)*NP_RAYLEIGH - 1]) ); // Get the last tabulated value of xco for this mat if (xmax < 0.01) { do { *costh_Rayleigh = 1.0 - ranecu_double(seed) * 2.0; } while ( ranecu_double(seed) > (((*costh_Rayleigh)*(*costh_Rayleigh)+1.0)*0.5) ); return; } for(;;) // (Loop will iterate everytime the sampled value is rejected or above maximum) { double ru = ranecu_double(seed) * (double)(*pmax_current); // Pmax for the current energy is entered as a parameter /* **** Selection of the interval (binary search within pre-calculated limits). */ int itn = (int)(ru * (NP_RAYLEIGH-1)); // 'itn' will never reach the last interval 'NP_RAYLEIGH-1', but this is how RITA is implemented in PENELOPE int i__ = (int)cgra->itlco[itn + (*mat)*NP_RAYLEIGH]; int j = (int)cgra->ituco[itn + (*mat)*NP_RAYLEIGH]; if ((j - i__) > 1) { do { register int k = (i__ + j)>>1; // >>1 == /2 if (ru > cgra->pco[k -1 + (*mat)*NP_RAYLEIGH]) i__ = k; else j = k; } while ((j - i__) > 1); } /* **** Sampling from the rational inverse cumulative distribution. */ int index = i__ - 1 + (*mat)*NP_RAYLEIGH; double rr = ru - cgra->pco[index]; double xx; if (rr > 1e-16) { double d__ = (double)(cgra->pco[index+1] - cgra->pco[index]); float aco_index = cgra->aco[index], bco_index = cgra->bco[index], xco_index = cgra->xco[index]; // Avoid multiple accesses to the same global variable xx = (double)xco_index + (double)(aco_index + 1.0f + bco_index)* d__* rr / (d__*d__ + (aco_index*d__ + bco_index*rr) * rr) * (double)(cgra->xco[index+1] - xco_index); } else { xx = cgra->xco[index]; } if (xx < x2max) { // Sampled value below maximum possible value: *costh_Rayleigh = 1.0 - 2.0 * xx / x2max; // !!DeBuG!! costh_Rayleigh in double precision, but not all intermediate steps are!? /* **** Rejection: */ if (ranecu_double(seed) < (((*costh_Rayleigh)*(*costh_Rayleigh) + 1.0)*0.5)) break; // Sample value not rejected! break loop and return. } } } /* graa */ ////////////////////////////////////////////////////////////////////////// // *********************************************************************** // * Translation of PENELOPE's "SUBROUTINE GCOa" from FORTRAN77 to C * // ********************************************************************* * //! Random sampling of incoherent (Compton) scattering of photons, using //! the sampling algorithm from PENELOPE 2006: //! Relativistic impulse approximation with analytical one-electron Compton profiles // NOTE: In penelope, Doppler broadening is not used for E greater than 5 MeV. // We don't use it in GPU to reduce the lines of code and prevent using COMMON/compos/ZT(M) //! @param[in,out] energy incident and final photon energy (eV) //! @param[out] costh_Compton cosine of the polar scattering angle //! @param[in] material Current voxel material //! @param[in] seed RANECU PRNG seed // // CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC // C PENELOPE/PENGEOM (version 2006) C // C Copyright (c) 2001-2006 C // C Universitat de Barcelona C // C Permission to use, copy, modify, distribute and sell this software C // C and its documentation for any purpose is hereby granted without C // C fee, provided that the above copyright notice appears in all C // C copies and that both that copyright notice and this permission C // C notice appear in all supporting documentation. The Universitat de C // C Barcelona makes no representations about the suitability of this C // C software for any purpose. It is provided "as is" without express C // C or implied warranty. C // CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC // // ************************************************************************ __device__ inline void GCOa(float *energy, double *costh_Compton, int *mat, int2 *seed, struct compton_struct* cgco_SHARED) { float s, a1, s0, af, ek, ek2, ek3, tau, pzomc, taumin; float rn[MAX_SHELLS]; double cdt1; // Some variables used in PENELOPE have been eliminated to save register: float aux, taum2, fpzmax, a, a2, ek1 ,rni, xqc, fpz, pac[MAX_SHELLS]; int i__; int my_noscco = cgco_SHARED->noscco[*mat]; // Store the number of oscillators for the input material in a local variable //!!VERBOSE!! static int warning_flag_1 = -1, warning_flag_2 = -1, warning_flag_3 = -1; // Write warnings for the CPU code, but only once. !!DeBuG!! ek = *energy * 1.956951306108245e-6f; // (1.956951306108245e-6 == 1.0/510998.918) ek2 = ek * 2.f + 1.f; ek3 = ek * ek; // ek1 = ek3 - ek2 - 1.; taumin = 1.f / ek2; // taum2 = taumin * taumin; a1 = logf(ek2); // a2 = a1 + ek * 2. * (ek + 1.) * taum2; // a2 was used only once, code moved below /* **** Incoherent scattering function for theta=PI. */ s0 = 0.0f; for (i__ = 0; i__ < my_noscco; i__++) { register float temp = cgco_SHARED->uico[*mat + i__*MAX_MATERIALS]; if (temp < *energy) { register float aux = *energy * (*energy - temp) * 2.f; pzomc = cgco_SHARED->fj0[*mat + i__*MAX_MATERIALS] * (aux - temp * 510998.918f) * rsqrtf(aux + aux + temp * temp) * 1.956951306108245e-6f; // 1.956951306108245e-6 = 1.0/510998.918f // Version using the reciprocal of sqrt in CUDA: faster and more accurate!! // ORIGINAL: pzomc = cgco_SHARED->fj0[*mat + i__*MAX_MATERIALS] * (aux - temp * 510998.918f) / (sqrtf(aux + aux + temp * temp) * 510998.918f); if (pzomc > 0.0f) temp = (0.707106781186545f+pzomc*1.4142135623731f) * (0.707106781186545f+pzomc*1.4142135623731f); else temp = (0.707106781186545f-pzomc*1.4142135623731f) * (0.707106781186545f-pzomc*1.4142135623731f); temp = 0.5f * expf(0.5f - temp); // Calculate EXP outside the IF to avoid branching if (pzomc > 0.0f) temp = 1.0f - temp; s0 += cgco_SHARED->fco[*mat + i__*MAX_MATERIALS] * temp; } } /* **** Sampling tau. */ do { if (ranecu(seed)*/*a2=*/(a1+2.*ek*(ek+1.f)*taumin*taumin) < a1) { tau = powf(taumin, ranecu(seed)); // !!DeBuG!! "powf()" has a big error (7 ULP), the double version has only 2!! } else { tau = sqrtf(1.f + ranecu(seed) * (taumin * taumin - 1.f)); } cdt1 = (double)(1.f-tau) / (((double)tau)*((double)*energy)*1.956951306108245e-6); // !!DeBuG!! The sampled COS will be double precision, but TAU is not!!! if (cdt1 > 2.0) cdt1 = 1.99999999; // !!DeBuG!! Make sure that precision error in POW, SQRT never gives cdt1>2 ==> costh_Compton<-1 /* **** Incoherent scattering function. */ s = 0.0f; for (i__ = 0; i__ < my_noscco; i__++) { register float temp = cgco_SHARED->uico[*mat + i__*MAX_MATERIALS]; if (temp < *energy) { register float aux = (*energy) * (*energy - temp) * ((float)cdt1); if ((aux>1.0e-12f)||(temp>1.0e-12f)) // !!DeBuG!! Make sure the SQRT argument is never <0, and that we never get 0/0 -> NaN when aux=temp=0 !! { pzomc = cgco_SHARED->fj0[*mat + i__*MAX_MATERIALS] * (aux - temp * 510998.918f) * rsqrtf(aux + aux + temp * temp) * 1.956951306108245e-6f; // 1.956951306108245e-6 = 1.0/510998.918f // Version using the reciprocal of sqrt in CUDA: faster and more accurate!! // ORIGINAL: pzomc = cgco_SHARED->fj0[*mat + i__*MAX_MATERIALS] * (aux - temp * 510998.918f) / (sqrtf(aux + aux + temp * temp) * 510998.918f); } else { pzomc = 0.002f; // !!DeBuG!! Using a rough approximation to a sample value of pzomc found using pure double precision: NOT RIGUROUS! But this code is expected to be used very seldom, only in extreme cases. //!!VERBOSE!! if (warning_flag_1<0) //!!VERBOSE!! { warning_flag_1 = +1; // Disable warning, do not show again //!!VERBOSE!! // printf(" [... Small numerical precision error detected computing \"pzomc\" in GCOa (this warning will not be repeated).]\n i__=%d, aux=%.14f, temp=%.14f, pzomc(forced)=%.14f, uico=%.14f, energy=%.7f, cgco_SHARED->fj0=%.14f, mat=%d, cdt1=%.14lf\n", (int)i__, aux, temp, pzomc, cgco_SHARED->uico[*mat+i__*MAX_MATERIALS], *energy, cgco_SHARED->fj0[*mat+i__*MAX_MATERIALS], (int)*mat, cdt1); // !!DeBuG!! //!!VERBOSE!! } } temp = pzomc * 1.4142135623731f; if (pzomc > 0.0f) temp = 0.5f - (temp + 0.70710678118654502f) * (temp + 0.70710678118654502f); // Calculate exponential argument else temp = 0.5f - (0.70710678118654502f - temp) * (0.70710678118654502f - temp); temp = 0.5f * expf(temp); // All threads will calculate the expf together if (pzomc > 0.0f) temp = 1.0f - temp; s += cgco_SHARED->fco[*mat + i__*MAX_MATERIALS] * temp; rn[i__] = temp; } } } while( (ranecu(seed)*s0) > (s*(1.0f+tau*(/*ek1=*/(ek3 - ek2 - 1.0f)+tau*(ek2+tau*ek3)))/(ek3*tau*(tau*tau+1.0f))) ); // **** Rejection function *costh_Compton = 1.0 - cdt1; /* **** Target electron shell. */ for (;;) { register float temp = s*ranecu(seed); float pac = 0.0f; int ishell = my_noscco - 1; // First shell will have number 0 for (i__ = 0; i__ < (my_noscco-1); i__++) // !!DeBuG!! Iterate to (my_noscco-1) only: the last oscillator is excited in case all other fail (no point in double checking) ?? { pac += cgco_SHARED->fco[*mat + i__*MAX_MATERIALS] * rn[i__]; // !!DeBuG!! pac[] is calculated on the fly to save registers! if (pac > temp) // pac[] is calculated on the fly to save registers! { ishell = i__; break; } } /* **** Projected momentum of the target electron. */ temp = ranecu(seed) * rn[ishell]; if (temp < 0.5f) { pzomc = (0.70710678118654502f - sqrtf(0.5f - logf(temp + temp))) / (cgco_SHARED->fj0[*mat + ishell * MAX_MATERIALS] * 1.4142135623731f); } else { pzomc = (sqrtf(0.5f - logf(2.0f - 2.0f*temp)) - 0.70710678118654502f) / (cgco_SHARED->fj0[*mat + ishell * MAX_MATERIALS] * 1.4142135623731f); } if (pzomc < -1.0f) { continue; // re-start the loop } /* **** F(EP) rejection. */ temp = tau * (tau - (*costh_Compton) * 2.f) + 1.f; // this variable was originally called "xqc" // af = sqrt( max_value(temp,1.0e-30f) ) * (tau * (tau - *costh_Compton) / max_value(temp,1.0e-30f) + 1.f); //!!DeBuG!! Make sure the SQRT argument is never <0, and that I don't divide by zero!! if (temp>1.0e-20f) // !!DeBuG!! Make sure the SQRT argument is never <0, and that I don't divide by zero!! { af = sqrtf(temp) * (tau * (tau - ((float)(*costh_Compton))) / temp + 1.f); } else { // When using single precision, it is possible (but very uncommon) to get costh_Compton==1 and tau==1; then temp is 0 and 'af' can not be calculated (0/0 -> nan). Analysing the results obtained using double precision, we found that 'af' would be almost 0 in this situation, with an "average" about ~0.002 (this is just a rough estimation, but using af=0 the value would never be rejected below). af = 0.00200f; // !!DeBuG!! //!!VERBOSE!! if (warning_flag_2<0) //!!VERBOSE!! { warning_flag_2 = +1; // Disable warning, do not show again //!!VERBOSE!! printf(" [... Small numerical precision error detected computing \"af\" in GCOa (this warning will not be repeated)].\n xqc=%.14f, af(forced)=%.14f, tau=%.14f, costh_Compton=%.14lf\n", temp, af, tau, *costh_Compton); // !!DeBuG!! //!!VERBOSE!! } } if (af > 0.0f) { temp = af * 0.2f + 1.f; // this variable was originally called "fpzmax" } else { temp = 1.f - af * 0.2f; } if ( ranecu(seed)*temp < /*fpz =*/(af * max_value( min_value(pzomc,0.2f) , -0.2f ) + 1.f) ) { break; } } /* **** Energy of the scattered photon. */ { register float t, b1, b2, temp; t = pzomc * pzomc; b1 = 1.f - t * tau * tau; b2 = 1.f - t * tau * ((float)(*costh_Compton)); temp = sqrtf( fabsf(b2 * b2 - b1 * (1.0f - t)) ); if (pzomc < 0.0f) temp *= -1.0f; // !Error! energy may increase (slightly) due to inacurate calculation! !!DeBuG!! t = (tau / b1) * (b2 + temp); if (t > 1.0f) { //!!VERBOSE!! if (warning_flag_3<0) //!!VERBOSE!! { warning_flag_3 = +1; // Disable warning, do not show again //!!VERBOSE!! printf("\n [... a Compton event tried to increase the x ray energy due to precision error. Keeping initial energy. (This warning will not be repeated.)]\n scaling=%.14f, costh_Compton=%.14lf\n", t, *costh_Compton); // !!DeBuG!! //!!VERBOSE!! } t = 1.0f; // !!DeBuG!! Avoid increasing energy by hand!!! not nice!! } (*energy) *= t; // (*energy) *= (tau / b1) * (b2 + temp); // Original PENELOPE code } } // [End subroutine GCOa] //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //! Tally the depose deposited inside each material. //! This function is called whenever a particle suffers a Compton or photoelectric //! interaction. The energy released in each interaction is added and later in the //! report function the total deposited energy is divided by the total mass of the //! material in the voxelized object to get the dose. This naturally accounts for //! multiple densities for voxels with the same material (not all voxels have same mass). //! Electrons are not transported in MC-GPU and therefore we are approximating //! that the dose is equal to the KERMA (energy released by the photons alone). //! This approximation is acceptable when there is electronic equilibrium and //! when the range of the secondary electrons is shorter than the organ size. //! //! The function uses atomic functions for a thread-safe access to the GPU memory. //! We can check if this tally was disabled in the input file checking if the array //! materials_dose was allocated in the GPU (disabled if pointer = NULL). //! //! //! @param[in] Edep Energy deposited in the interaction //! @param[in] material Current material id number //! @param[out] materials_dose ulonglong2 array storing the mateials dose [in eV/g] and dose^2 (ie, uncertainty). //////////////////////////////////////////////////////////////////////////////// __device__ inline void tally_materials_dose(float* Edep, int* material, ulonglong2* materials_dose) { // Note: with many histories and few materials the materials_dose integer variables may overflow!! Using double precision floats would be better. Single precision is not good enough because adding small energies to a large counter would give problems. atomicAdd(&materials_dose[*material].x, __float2ull_rn((*Edep)*SCALE_eV) ); // Energy deposited at the material, scaled by the factor SCALE_eV and rounded. atomicAdd(&materials_dose[*material].y, __float2ull_rn((*Edep)*(*Edep)) ); // Square of the dose to estimate standard deviation (not using SCALE_eV for std_dev to prevent overflow) // OLD: materials_dose[*material].x += (unsigned long long int)((*Edep)*SCALE_eV + 0.5f); return; } /* !!inputDensity!! Replacing the hardcoded density_LUT look-up table function with an array in RAM or GPU constant memory: OLD LOOK-UP TABLE USED IN VICTRE SIMULATIONS: //////////////////////////////////////////////////////////////////////////////// //! Look up table that returns the pre-defined density of the input material. //////////////////////////////////////////////////////////////////////////////// __device__ __host__ // Function will be callable from host and also from device inline float density_LUT(int material) //!!FixedDensity_DBT!! { float density; switch(material) // Assuming that first material is number 0 { case 0: // air density = 0.0012f; break; case 1: // fat density = 0.92f; break; case 3: // glandular density = 1.035f; // - Johns&Yaffe1986: 1.035 ; Nominal: 1.06; break; case 10: // Compression Paddle density = 1.06; // polystyrene dens = 1.06 ; PMMA dens = 1.19 !!DBTv1.5!! break; case 2: // skin density = 1.090f; break; case 4: // nipple density = 1.090f; // -> skin? break; // case 6: // muscle // density = 1.05f; // break; case 5: // ligament(88) density = 1.120f; // -> connective Woodard? break; // case 9: // terminal duct lobular unit(95) // density = 1.04f; // -> muscle? // break; // case 7: // duct(125) // density = 1.05f; // break; case 8: // artery(150) and vein(225) density = 1.0f; break; case 11: // Mass/Signal density = 1.06f; // - Johns&Yaffe1986: Min: 1.027, Mean: 1.044, Max: 1.058 ; Nominal: 1.06; break; case 12: // ==Microcalcification density = 1.781f; // 1.781=0.84*2.12 -> reduced density a factor 0.84 according to: Hadjipanteli et al., Phys Med Biol 62 p 858 (2017) // Nominal density Calcium_oxalate=2.12 break; case 13: // ==Tungsten edge density = 19.30f; // !!detectorModel!! break; case 14: // ==a-Se detector density = 4.50f; // !!detectorModel!! break; default: density = 1.05f; // Using the default value for materials that have the same density. } return density; } */ //////////////////////////////////////////////////////////////////////////////// //! Tally a radiographic projection image using a detector layer with the input thickness and material composition. //! This model will reproduce the geometric spreading of the point spread function and the real detector transmission. //////////////////////////////////////////////////////////////////////////////// __device__ inline void tally_image(float* energy, float3* position, float3* direction, signed char* scatter_state, unsigned long long int* image, struct source_struct* source_data_SHARED, struct detector_struct* detector_data_SHARED, int2* seed) //!!detectorModel!! { // Rotate direction to the coordinate system with the detector on XZ plane (Y=0): // !!DBTv1.4!! apply_rotation(direction, detector_data_SHARED->rot_inv); //!!DBTv1.4!! // Check the angle between the x-ray direction and the Y axis (normal of the detector); return if the particle is moving away from the detector: if (direction->y < 0.0175f) return; // Reject particle: angle towards Y axis larger than 89 deg --> particle moving parallel or away from the detector! // Translate coordinate system to have detector centered at origin: // !!DBTv1.4!! position->x -= detector_data_SHARED->center.x; position->y -= detector_data_SHARED->center.y; position->z -= detector_data_SHARED->center.z; // Rotate coordinate system to have detector on XZ plane (Y=0): // !!DBTv1.4!! apply_rotation(position, detector_data_SHARED->rot_inv); // Sample the distance to the next interaction in the material of the detector or antiscatter grid protective covers, to determine if the particle will be absorbed in the covers: !!DBTv1.5!! // ASSUMPTIONS: neglecting scattering and fluorescence in the covers; using MFP at average energy spectrum, not the real MFP at current energy. !!DeBuG!! if (detector_data_SHARED->cover_MFP>0.0f) if ( (-detector_data_SHARED->cover_MFP*logf(ranecu(seed))) < detector_data_SHARED->cover_thickness ) // !!DBTv1.5!! return; // Do not tally particle lost in the cover !!DBTv1.5!! // Distance from the particle position to the detector at plane XZ (Y=0): float dist_detector = -position->y/direction->y; // Sample and add the extra distance the particle needs to travel to reach the first interaction inside the scintillator (particle not detected if interaction behind thickness): !!detectorModel!! dist_detector += -detector_data_SHARED->scintillator_MFP*logf(ranecu(seed)); // Add distance to next interaction inside the detector material to the detector distance //!!detectorModel!! // *** Translate the particle to the detector plane: position->x = position->x + dist_detector*direction->x; position->y = position->y + dist_detector*direction->y; position->z = position->z + dist_detector*direction->z; if (position->y > detector_data_SHARED->scintillator_thickness) return; // Do not tally energy if particle does not interact inside the detector layer. // !!detectorModel!! !!DBTv1.4!! // *** Find if particle interacted inside the detector bbox, and compute pixel number (taking into account a possible offset of the detector far from the default centered with the source): int pixel_coord_x = __float2int_rd((position->x - detector_data_SHARED->offset.x + 0.5f*detector_data_SHARED->width_X) * detector_data_SHARED->inv_pixel_size_X); // CUDA intrinsic function converts float to integer rounding down (to minus inf) if ((pixel_coord_x>-1)&&(pixel_coord_x<detector_data_SHARED->num_pixels.x)) { int pixel_coord_z = __float2int_rd((position->z - detector_data_SHARED->offset.y + 0.5f*detector_data_SHARED->height_Z) * detector_data_SHARED->inv_pixel_size_Z); if ((pixel_coord_z>-1)&&(pixel_coord_z<detector_data_SHARED->num_pixels.y)) { // --Sample if the particle is absorbed in the antiscatter grid (scatter or fluorescence in the grid not simulated): if (detector_data_SHARED->grid_freq>0.0f) { if (ranecu(seed) > antiscatter_grid_transmission_prob(position, direction, detector_data_SHARED)) //!!DBTv1.5!! return; } // --Sample if all the energy is deposited in the pixel or if a fluorescence x-ray was generated and was able to escape detection: // (k-edge energies available at: http://www.esrf.eu/UsersAndScience/Experiments/StructMaterials/ID11/ID11UserGuide/ID11Edges) int flag_fluorescence = 0; float edep = *energy; if (*energy > detector_data_SHARED->kedge_energy) { if (ranecu(seed) < detector_data_SHARED->fluorescence_yield) { edep -= detector_data_SHARED->fluorescence_energy; // !!DBTv1.4!! Subtract the input average K fluorescence energy from the deposited energy. The fluorescence photon is simulated afterwards. flag_fluorescence = 1; // !!TrackFluorescence!! } } // -- Particle enters the detector! Tally the particle energy in the corresponding pixel (in integer SCALE_eV fractions of eV): // Using a CUDA atomic function (not available for global floats yet) to read and increase the pixel value in a single instruction, blocking interferences from other threads. // The offset for the primaries or scatter images are calculated considering that: // scatter_state=0 for non-scattered, =1 for Compton, =2 for Rayleigh, and =3 for multiple scatter. // - Do not count the energy deposited inside the top or bottom blocking (dead) layers of the detector (fluorescence still generated). !!BLOCKING_LAYER!! // After rotation, the detector top layer starts at Y=0 and grows towards positive Y (bottom), with radiation expected from negative Y, moving towards positive Y. if ((position->y > BLOCKING_LAYER_TOP) && (position->y < (detector_data_SHARED->scintillator_thickness-BLOCKING_LAYER_BOTTOM))) // !!BLOCKING_LAYER!! atomicAdd(( image + // Pointer to beginning of image array (int)(*scatter_state) * detector_data_SHARED->total_num_pixels + // Offset to corresponding scatter image (pixel_coord_x + pixel_coord_z*(detector_data_SHARED->num_pixels.x)) ), // Offset to the corresponding pixel __float2ull_rn(edep*SCALE_eV) ); // Energy arriving at the pixel, scaled by the factor SCALE_eV and rounded. // The maximum unsigned long long int value is ~1.8e19: // *** Track Fluorescence inside detector: !!TrackFluorescence!! if (flag_fluorescence==1) { // -- Sample direction of emission of fluorescence photon isotropically: direction->z = 1.0f - 2.0*ranecu(seed); float sintheta = sqrtf(1.0f - direction->z*direction->z); float phi = (2.0f*PI)*ranecu(seed); float cos_phi, sin_phi; sincos(phi, &sin_phi, &cos_phi); direction->y = sintheta*sin_phi; direction->x = sintheta*cos_phi; // -- Sample distance to next fluorescence interaction inside scintillator, using the input MFP at the fluorescence energy: dist_detector = -detector_data_SHARED->fluorescence_MFP*logf(ranecu(seed)); // -- Tally fluorescence energy in the corresponding pixel, unless escaped: position->y = position->y + dist_detector*direction->y; if ((position->y > BLOCKING_LAYER_TOP) && (position->y < (detector_data_SHARED->scintillator_thickness-BLOCKING_LAYER_BOTTOM))) // !!BLOCKING_LAYER!! { position->x = position->x + dist_detector*direction->x; pixel_coord_x = __float2int_rd((position->x - detector_data_SHARED->offset.x + 0.5f*detector_data_SHARED->width_X) * detector_data_SHARED->inv_pixel_size_X); // CUDA intrinsic function converts float to integer rounding down (to minus inf) if ((pixel_coord_x>-1)&&(pixel_coord_x<detector_data_SHARED->num_pixels.x)) { position->z = position->z + dist_detector*direction->z; pixel_coord_z = __float2int_rd((position->z - detector_data_SHARED->offset.y + 0.5f*detector_data_SHARED->height_Z) * detector_data_SHARED->inv_pixel_size_Z); if ((pixel_coord_z>-1)&&(pixel_coord_z<detector_data_SHARED->num_pixels.y)) atomicAdd(( image + (int)(*scatter_state) * detector_data_SHARED->total_num_pixels + (pixel_coord_x + pixel_coord_z*(detector_data_SHARED->num_pixels.x)) ), __float2ull_rn(detector_data_SHARED->fluorescence_energy*SCALE_eV) ); // !!TrackFluorescence!! } } } } } } //////////////////////////////////////////////////////////////////////////////// //! Sample two random values with a Gaussian PDF. //! Uses the polar method to avoid expensive trigonometric calls implied by the alternative Box-Muller method. //! (**Code adapted from penEasyv20140609/penaux.F**) //////////////////////////////////////////////////////////////////////////////// __device__ inline void gausspdf(float *g1, float *g2, int2 *seed) { float x,y,u; do { x = 1.0f-2.0f*ranecu(seed); y = 1.0f-2.0f*ranecu(seed); u = x*x+y*y; } while ((u>=1.0f)||(u<1.0e-10f)); // Reject point and repeat float s = sqrtf(-2.0f*logf(u)/u); *g1 = x*s; // First Gaussian-distributed random variable *g2 = y*s; // Second independent Gaussian-distributed random variable } inline void gausspdf_double_CPU(double *g1, double *g2, int2 *seed) { double x,y,u; do { x = 1.0-2.0*ranecu_double_CPU(seed); y = 1.0-2.0*ranecu_double_CPU(seed); u = x*x+y*y; } while ((u>=1.0)||(u<1.0e-10)); // Reject point and repeat double s = sqrt(-2.0*log(u)/u); *g1 = x*s; // First Gaussian-distributed random variable *g2 = y*s; // Second independent Gaussian-distributed random variable } // //////////////////////////////////////////////////////////////////////////////// // //! Return a random value with a Gaussian PDF. // //! Uses the polar method to avoid expensive trigonometric calls implied by the alternative Box-Muller method. // // (**Code adapted from penEasyv20140609/penaux.F**) // //////////////////////////////////////////////////////////////////////////////// // __device__ inline float sample_gausspdf(int2 *seed) // { // float x,y,u; // do // { // x = 1.0f-2.0f*ranecu(seed); // y = 1.0f-2.0f*ranecu(seed); // u = x*x+y*y; // } while ((u>=1.0f)||(u<1.0e-10f)); // Reject point and repeat // return (x*sqrtf(-2.0f*logf(u)/u)); // Return Gaussian-distributed random value // } //////////////////////////////////////////////////////////////////////////////// //! Return a random value with a Gaussian PDF, with the distribution cropped at 2 sigma. //! Uses the polar method to avoid expensive trigonometric calls implied by the alternative Box-Muller method. // (**Code adapted from penEasyv20140609/penaux.F**) // // In a Gaussian distribution, 4.55% of sampled points are farther than 2*sigma; FWHM/2 = sqrt(2*ln(2))*sigma = 1.1774*sigma. // Cropping the Gaussian at 2 sigma we prevent generating photons unrealistically far from the focal spot center. // Experimental focal spot measurements show that the spot is quite sharp [A Burgess, "Focal spots: I. MTF separability", Invest Radiol 12, p. 36-43 (1977)] // //////////////////////////////////////////////////////////////////////////////// __device__ inline float sample_gausspdf_below2sigma(int2 *seed) { float g; do // Iterate function until we get a value under 2*sigma { float x,y,u; do { x = 1.0f-2.0f*ranecu(seed); y = 1.0f-2.0f*ranecu(seed); u = x*x+y*y; } while ((u>=1.0f)||(u<1.0e-10f)); // Reject point and repeat float s = sqrtf(-2.0f*logf(u)/u); g = x*s; // First Gaussian-distributed random variable if (fabsf(g)<2.0f) break; // exit loop and return g = y*s; // Second independent Gaussian-distributed random variable } while (fabsf(g)>2.0f); return g; // Return Gaussian-distributed random value under 2*sigma } //!* Rotate input vector (x,y,z) around the input rotation axis (wx,wy,wz) for the input angle, using Rodrigues' formula to compute the rotation matrix (http://mathworld.wolfram.com/RodriguesRotationFormula.html) : __device__ __host__ inline void rotate_around_axis_Rodrigues(float *angle, float3 *w, float3 *p) { if (fabs(*angle)>1.0e-8f) // Apply rotation only if input angle is not 0 { float s, c; sincos(*angle, &s,&c); // Precompute sinus and cosinus of input angle float x0 = p->x; // Temporary copies float y0 = p->y; // Construct and apply rotation matrix using Rodrigues' formula: float m1 = c+(w->x)*(w->x)*(1-c); // m1 float m2 = (w->z)*s+(w->x)*(w->y)*(1-c); // m4 float m3 =-(w->y)*s+(w->x)*(w->z)*(1-c); // m7 p->x = x0*m1 + y0*m2 +(p->z)*m3; // x=x0*m1+y0*m4+z0*m7 m1 =-(w->z)*s+(w->x)*(w->y)*(1-c); // m2 m2 = c+(w->y)*(w->y)*(1-c); // m5 m3 = (w->x)*s+(w->y)*(w->z)*(1-c); // m8 p->y = x0*m1 + y0*m2 + (p->z)*m3; // y=x0*m2+y0*m5+z0*m8 m1 = (w->y)*s+(w->x)*(w->z)*(1-c); // m3 m2 =-(w->x)*s+(w->y)*(w->z)*(1-c); // m6 m3 = c+(w->z)*(w->z)*(1-c); // m9 p->z = x0*m1 + y0*m2 + (p->z)*m3; // z=x0*m3+y0*m6+z0*m9 } } //!* Rotate the TWO input vectors (x,y,z) around the input rotation axis (wx,wy,wz) for the input angle, using Rodrigues' formula to compute the rotation matrix (http://mathworld.wolfram.com/RodriguesRotationFormula.html) : //!* Rotating the two vectors together I can re-use the rotation matrix computed on the fly __device__ __host__ inline void rotate_2vectors_around_axis_Rodrigues(float *angle, float3 *w, float3 *p, float3 *v) { if (fabs(*angle)>1.0e-8f) // Apply rotation only if input angle is not 0 { float s, c; sincos(*angle, &s,&c); // Precompute sinus and cosinus of input angle float x0 = p->x, y0 = p->y; // Temporary copies float v0 = v->x, w0 = v->y; // Construct and apply rotation matrix using Rodrigues' formula: float m1 = c+(w->x)*(w->x)*(1-c); // m1 float m2 = (w->z)*s+(w->x)*(w->y)*(1-c); // m4 float m3 =-(w->y)*s+(w->x)*(w->z)*(1-c); // m7 p->x = x0*m1 + y0*m2 +(p->z)*m3; // x=x0*m1+y0*m4+z0*m7 v->x = v0*m1 + w0*m2 +(v->z)*m3; m1 =-(w->z)*s+(w->x)*(w->y)*(1-c); // m2 m2 = c+(w->y)*(w->y)*(1-c); // m5 m3 = (w->x)*s+(w->y)*(w->z)*(1-c); // m8 p->y = x0*m1 + y0*m2 + (p->z)*m3; // y=x0*m2+y0*m5+z0*m8 v->y = v0*m1 + w0*m2 + (v->z)*m3; m1 = (w->y)*s+(w->x)*(w->z)*(1-c); // m3 m2 =-(w->x)*s+(w->y)*(w->z)*(1-c); // m6 m3 = c+(w->z)*(w->z)*(1-c); // m9 p->z = x0*m1 + y0*m2 + (p->z)*m3; // z=x0*m3+y0*m6+z0*m9 v->z = v0*m1 + w0*m2 + (v->z)*m3; } } //!* Rotate the input vector (float3) multiplying by the input rotation matrix (float m[9]). __device__ __host__ inline void apply_rotation(float3 *v, float *m) { float tmp_x = v->x, tmp_y = v->y; v->x = tmp_x*m[0] + tmp_y*m[1] + v->z*m[2]; v->y = tmp_x*m[3] + tmp_y*m[4] + v->z*m[5]; v->z = tmp_x*m[6] + tmp_y*m[7] + v->z*m[8]; } //////////////////////////////////////////////////////////////////////////////// //! Analytical model of a 1D focused antiscatter grid based on the work of Day and Dance [Phys Med Biol 28, p. 1429-1433 (1983)]. //! The model returns the probability of transmission through the grid for the current x-ray direction. //! The position of the particle in the default reference frame with the detector centered at the origin and laying on the XZ plane is used to compute the focused grid angle. //! //! ASSUMPTIONS: //! - Currently the x-ray energy is not used: the attenuation at the average energy is assumed for every x-ray. !!DeBuG!! //! - Assuming that the focal length of the grid is always identical to the input source-to-detector distance (sdd). !!DeBuG!! //! - The Day and Dance equations are for an uniform oblique grid and the change in angle for consecutive strips is not modeled. As they explain, this is unlikely to be relevant because //! the prob of x-rays traversing many strips is extremely low, and consecutive strips have very similar angulation. //! //! - Using double precision for variables that have to be inverted to avoid inaccuracy for collimated rays (u2 close to 0). Using exclusively double takes 4 times more than exclusively floats! //////////////////////////////////////////////////////////////////////////////// __device__ inline float antiscatter_grid_transmission_prob(float3* position, float3* direction, struct detector_struct* detector_data_SHARED) //!!DBTv1.5!! { // -- Compute grid angle at the current location on the detector: // The default MC-GPU detector orientation is on the XZ plane, perpendicular to Y axis, pointing towards Y. I have to transform to Day&Dance 1983 reference on XY plane, perpendicular to Z axis. // The position is already shifted to have the origin at the center of the detector: I can use the position as is to compute the incidence angle -> grid angle for focused grid. double grid_angle, u, w; if (detector_data_SHARED->grid_ratio<0.0f) { // <0 --> input orientation == 0 ==> 1D collimated grid with strips perpendicular to lateral direction X (mammo style), as in Day&Dance1983. grid_angle = (0.5*PI) - atan2(position->x, detector_data_SHARED->sdd); // A 0 deg angle between the incident beam and the strips corresponds to a grid angle (sigma) of 90 deg = PI/2 u = direction->x; w = direction->y; } else { // >0 --> input orientation == 1 ==> 1D collimated grid with strips parallel to lateral direction X and perpendicular to Z direction (DBT style): switch Z and X axis grid_angle = (0.5*PI) - atan2(position->z, detector_data_SHARED->sdd); u = direction->z; w = direction->y; } float C = 1.0f/detector_data_SHARED->grid_freq; float d2 = detector_data_SHARED->grid_strip_thickness/sinf(grid_angle); // Strip thickness in grid reference system (eq. page 1429, Day&Dance1983) float D2 = C - d2; // Distance between consecutive grid strips float h = fabsf(detector_data_SHARED->grid_ratio) * D2; // Compute the eight of the grid strips, according to the input grid ratio. Using absolute value bc sign encodes grid orientation in my implementation. double u2 = fabs(u - w/tan(grid_angle)); // (eq. 1, Day&Dance1983) Note: u2 is the direction RATIO in the oblique referrence system, not the direction COSINE. if (u2<1.0e-9) u2 = 1.0e-8; // !!DeBuG!! Perfectly collimated particles going parallel to strips will have u2=alpha=0. This might gives NaN computing A, but only for few angles (21 deg)??? Add arbitrary epsilon to prevent 0/0. double P = (h/w)*u2; // (eq. 4, Day&Dance1983) double n = floor(P*detector_data_SHARED->grid_freq); // grid_freq = 1/C float q = P - n*C; double alpha = u2/(detector_data_SHARED->grid_strip_mu-detector_data_SHARED->grid_interspace_mu); // (eq. 8, Day&Dance1983) double inv_alpha = 1.0/alpha; // Grid transmission: probability of a photon passing through the grid without interaction: float A = expf(-detector_data_SHARED->grid_interspace_mu*h/w - d2*n*inv_alpha); // (eq. 9, Day&Dance1983) float H = 0.0f; // Step function if (q>=D2) H = 1.0f; float B = (fabsf(q-D2)+2.0f*(float)alpha) * expf((H*(D2-q))*inv_alpha) + (fabsf(d2-q)-2.0f*(float)alpha) * expf((-0.5f*(d2+q-fabsf(d2-q)))*inv_alpha); // (eq. 12, Day&Dance1983) return (A*B*detector_data_SHARED->grid_freq); // (eq. 10, Day&Dance1983) ; grid_freq = 1/C } //////////////////////////////////////////////////////////////////////////////// //! Find the material number at the current location searching the binary tree structure. //! //! @param[in] position Particle position //! @param[in] bitree Array with the binary trees for every non-uniform coarse voxel //! @param[in] bitree_root_index Index of the root node of the current coarse voxel within bitree array //! @param[in] voxel_coord Voxel coordinates, needed to determine the location of the lower wall of the current coarse voxel //! @param[in] voxel_data_CONST.voxel_size Global variable with the size of the low resolution coarse voxels [cm] //! @param[in] voxel_data_CONST.voxel_size_HiRes Global variable with the size of the original high resolution voxels [cm] //! @param[in] voxel_data_CONST.num_voxels_coarse Global variable with the number of sub-voxels in a coarse voxel //! @param[in] voxel_data_CONST.offset Global variable with the location of the lower walls of the complete voxelized geometry //! @return Material number found at the input position (composition of the tree leaf in that point) //////////////////////////////////////////////////////////////////////////////// __device__ int find_material_bitree(const float3* position, char* bitree, const int bitree_root_index, short3* voxel_coord) // !!bitree!! v1.5b { // -- Define variable used during the tree traversal: int bitree_node=0, node=0; // Binary tree node index for the current coarse voxel int3 node_width; node_width.x = voxel_data_CONST.num_voxels_coarse.x; node_width.y = voxel_data_CONST.num_voxels_coarse.y; node_width.z = voxel_data_CONST.num_voxels_coarse.z; float3 node_lower_wall; node_lower_wall.x = voxel_coord->x*voxel_data_CONST.voxel_size.x + voxel_data_CONST.offset.x; // Init lower bound walls in x,y,z node_lower_wall.y = voxel_coord->y*voxel_data_CONST.voxel_size.y + voxel_data_CONST.offset.y; node_lower_wall.z = voxel_coord->z*voxel_data_CONST.voxel_size.z + voxel_data_CONST.offset.z; // -- Recursively traverse the tree (from the root node) until we get a final node (positive bitree value). // The X, Y, Z axes are divided in half sequentially in each iteration of the loop. for(;;) { bitree_node = (int)bitree[node+bitree_root_index]; // Every acces to the bitree array has to be offset by the root node index "bitree_root_index" if (bitree_node>-1) // Check if we are already in a final node (empty geometry or final node found in previous Z division): break; // We reached a final node! Exit infinite loop. // Negative node value: we need to continue traversing down the tree // -- Check X axis: if (node_width.x > 1) // Never split a dimension that is just one voxel wide. Skip splitting and advance to the next dimension. !!DeBuG!! { int width_2nd = node_width.x/2; node_width.x = node_width.x - width_2nd; // Integer length of the first node: +1 longer than the second if distance is odd float splitting_plane = node_lower_wall.x + node_width.x*voxel_data_CONST.voxel_size_HiRes.x; // Using the original high res voxel size to determine the location of the splitting plane // Check in which side of the middle plane the current position is located: if (position->x < splitting_plane) { // -Point below (left) the middle plane: move to the following element of the bitree array. node++; // Move to the first child: following node } else { // -Point above (right) the middle plane: skip the following subnodes (first half of the node) and move directly to the second half node node = -bitree_node; // Advance to the location of the 2nd half (stored as a negative value) node_lower_wall.x = splitting_plane; // The splitting plane is now the lower plane of the subnode node_width.x = width_2nd; // Update length of 2nd half subnode } } bitree_node = (int)bitree[node+bitree_root_index]; if (bitree_node>-1) break; // We reached a final node! Exit infinite loop. // -- Check Y axis: if (node_width.y > 1) { int width_2nd = node_width.y/2; node_width.y = node_width.y - width_2nd; float splitting_plane = node_lower_wall.y + node_width.y*voxel_data_CONST.voxel_size_HiRes.y; if (position->y < splitting_plane) { node++; } else { node = -bitree_node; node_lower_wall.y = splitting_plane; node_width.y = width_2nd; } } bitree_node = (int)bitree[node+bitree_root_index]; if (bitree_node>-1) break; // We reached a final node! Exit infinite loop. // -- Check Z axis: if (node_width.z > 1) { int width_2nd = node_width.z/2; node_width.z = node_width.z - width_2nd; float splitting_plane = node_lower_wall.z + node_width.z*voxel_data_CONST.voxel_size_HiRes.z; if (position->z < splitting_plane) { node++; } else { node = -bitree_node; node_lower_wall.z = splitting_plane; node_width.z = width_2nd; } } } // -- We reached a final node: return the material number in the current location return (bitree_node); }
14becb47b55faabe6328aa2d0d600983e71b8b7f.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include "caffe/common_layers.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template<typename Dtype> void SplitLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { for (int_tp i = 0; i < top.size(); ++i) { top[i]->ShareData(*bottom[0]); } } template<typename Dtype> void SplitLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM if (top.size() == 1) { caffe_copy(count_, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff()); return; } caffe_gpu_add(count_, top[0]->gpu_diff(), top[1]->gpu_diff(), bottom[0]->mutable_gpu_diff()); // Add remaining top blob diffs. for (int_tp i = 2; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_gpu_axpy(count_, Dtype(1.), top_diff, bottom_diff); } #endif // USE_ROCM } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); if (top.size() == 1) { greentea_copy<Dtype>(count_, (cl_mem) (top[0]->gpu_diff()), 0, (cl_mem) (bottom[0]->mutable_gpu_diff()), 0, &ctx); return; } greentea_gpu_add<Dtype>(this->device_->id(), count_, (cl_mem) (top[0]->gpu_diff()), 0, (cl_mem) (top[1]->gpu_diff()), 0, (cl_mem) (bottom[0]->mutable_gpu_diff()), 0); // Add remaining top blob diffs. for (int_tp i = 2; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); greentea_gpu_axpy<Dtype>(this->device_->id(), count_, Dtype(1.), (cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0); } #endif // USE_GREENTEA } } INSTANTIATE_LAYER_GPU_FUNCS(SplitLayer); } // namespace caffe
14becb47b55faabe6328aa2d0d600983e71b8b7f.cu
#include <vector> #include "caffe/common_layers.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template<typename Dtype> void SplitLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { for (int_tp i = 0; i < top.size(); ++i) { top[i]->ShareData(*bottom[0]); } } template<typename Dtype> void SplitLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (top.size() == 1) { caffe_copy(count_, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff()); return; } caffe_gpu_add(count_, top[0]->gpu_diff(), top[1]->gpu_diff(), bottom[0]->mutable_gpu_diff()); // Add remaining top blob diffs. for (int_tp i = 2; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_gpu_axpy(count_, Dtype(1.), top_diff, bottom_diff); } #endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); if (top.size() == 1) { greentea_copy<Dtype>(count_, (cl_mem) (top[0]->gpu_diff()), 0, (cl_mem) (bottom[0]->mutable_gpu_diff()), 0, &ctx); return; } greentea_gpu_add<Dtype>(this->device_->id(), count_, (cl_mem) (top[0]->gpu_diff()), 0, (cl_mem) (top[1]->gpu_diff()), 0, (cl_mem) (bottom[0]->mutable_gpu_diff()), 0); // Add remaining top blob diffs. for (int_tp i = 2; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); greentea_gpu_axpy<Dtype>(this->device_->id(), count_, Dtype(1.), (cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0); } #endif // USE_GREENTEA } } INSTANTIATE_LAYER_GPU_FUNCS(SplitLayer); } // namespace caffe
3ca5dc0e56b5e7ce7c65d50c3829f9a1c022873b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include<iostream> #include<stdlib.h> #include<stdio.h> #include <cusolverDn.h> #include <hip/hip_runtime_api.h> #include "Utilities.cuh" #include "TimingGPU.cuh" /********/ /* MAIN */ /********/ int main(){ int M = 1000; int N = 1000; TimingGPU timerGPU; float elapsedTime; // --- Setting the host matrix float *h_A = (float *)malloc(M * N * sizeof(float)); for (unsigned int i = 0; i < M; i++){ for (unsigned int j = 0; j < N; j++){ h_A[j*M + i] = (i + j) * (i + j); } } // --- Setting the device matrix and moving the host matrix to the device float *d_A; gpuErrchk(hipMalloc(&d_A, M * N * sizeof(float))); gpuErrchk(hipMemcpy(d_A, h_A, M * N * sizeof(float), hipMemcpyHostToDevice)); // --- host side SVD results space float *h_U = (float *)malloc(M * M * sizeof(float)); float *h_V = (float *)malloc(N * N * sizeof(float)); float *h_S = (float *)malloc(N * sizeof(float)); // --- device side SVD workspace and matrices int work_size = 0; int *devInfo; gpuErrchk(hipMalloc(&devInfo, sizeof(int))); float *d_U; gpuErrchk(hipMalloc(&d_U, M * M * sizeof(float))); float *d_V; gpuErrchk(hipMalloc(&d_V, N * N * sizeof(float))); float *d_S; gpuErrchk(hipMalloc(&d_S, N * sizeof(float))); cusolverStatus_t stat; // --- CUDA solver initialization hipsolverDnHandle_t solver_handle; cusolveSafeCall(hipsolverDnCreate(&solver_handle)); cusolveSafeCall(hipsolverDnSgesvd_bufferSize(solver_handle, M, N, &work_size)); float *work; gpuErrchk(hipMalloc(&work, work_size * sizeof(float))); // --- CUDA SVD execution - Singular values only timerGPU.StartCounter(); cusolveSafeCall(hipsolverDnSgesvd(solver_handle, 'N', 'N', M, N, d_A, M, d_S, NULL, M, NULL, N, work, work_size, NULL, devInfo)); elapsedTime = timerGPU.GetCounter(); int devInfo_h = 0; gpuErrchk(hipMemcpy(&devInfo_h, devInfo, sizeof(int), hipMemcpyDeviceToHost)); if (devInfo_h == 0) printf("SVD successfull for the singular values calculation only\n\n"); else if (devInfo_h < 0) printf("SVD unsuccessfull for the singular values calculation only. Parameter %i is wrong\n", -devInfo_h); else printf("SVD unsuccessfull for the singular values calculation only. A number of %i superdiagonals of an intermediate bidiagonal form did not converge to zero\n", devInfo_h); printf("Calculation of the singular values only: %f ms\n\n", elapsedTime); // --- Moving the results from device to host //gpuErrchk(hipMemcpy(h_S, d_S, N * sizeof(float), hipMemcpyDeviceToHost)); //for (int i = 0; i < N; i++) std::cout << "d_S[" << i << "] = " << h_S[i] << std::endl; // --- CUDA SVD execution - Full SVD timerGPU.StartCounter(); cusolveSafeCall(hipsolverDnSgesvd(solver_handle, 'A', 'A', M, N, d_A, M, d_S, d_U, M, d_V, N, work, work_size, NULL, devInfo)); elapsedTime = timerGPU.GetCounter(); devInfo_h = 0; gpuErrchk(hipMemcpy(&devInfo_h, devInfo, sizeof(int), hipMemcpyDeviceToHost)); if (devInfo_h == 0) printf("SVD successfull for the full SVD calculation\n\n"); else if (devInfo_h < 0) printf("SVD unsuccessfull for the full SVD calculation. Parameter %i is wrong\n", -devInfo_h); else printf("SVD unsuccessfull for the full SVD calculation. A number of %i superdiagonals of an intermediate bidiagonal form did not converge to zero\n", devInfo_h); printf("Calculation of the full SVD calculation: %f ms\n\n", elapsedTime); cusolveSafeCall(hipsolverDnDestroy(solver_handle)); return 0; }
3ca5dc0e56b5e7ce7c65d50c3829f9a1c022873b.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include<iostream> #include<stdlib.h> #include<stdio.h> #include <cusolverDn.h> #include <cuda_runtime_api.h> #include "Utilities.cuh" #include "TimingGPU.cuh" /********/ /* MAIN */ /********/ int main(){ int M = 1000; int N = 1000; TimingGPU timerGPU; float elapsedTime; // --- Setting the host matrix float *h_A = (float *)malloc(M * N * sizeof(float)); for (unsigned int i = 0; i < M; i++){ for (unsigned int j = 0; j < N; j++){ h_A[j*M + i] = (i + j) * (i + j); } } // --- Setting the device matrix and moving the host matrix to the device float *d_A; gpuErrchk(cudaMalloc(&d_A, M * N * sizeof(float))); gpuErrchk(cudaMemcpy(d_A, h_A, M * N * sizeof(float), cudaMemcpyHostToDevice)); // --- host side SVD results space float *h_U = (float *)malloc(M * M * sizeof(float)); float *h_V = (float *)malloc(N * N * sizeof(float)); float *h_S = (float *)malloc(N * sizeof(float)); // --- device side SVD workspace and matrices int work_size = 0; int *devInfo; gpuErrchk(cudaMalloc(&devInfo, sizeof(int))); float *d_U; gpuErrchk(cudaMalloc(&d_U, M * M * sizeof(float))); float *d_V; gpuErrchk(cudaMalloc(&d_V, N * N * sizeof(float))); float *d_S; gpuErrchk(cudaMalloc(&d_S, N * sizeof(float))); cusolverStatus_t stat; // --- CUDA solver initialization cusolverDnHandle_t solver_handle; cusolveSafeCall(cusolverDnCreate(&solver_handle)); cusolveSafeCall(cusolverDnSgesvd_bufferSize(solver_handle, M, N, &work_size)); float *work; gpuErrchk(cudaMalloc(&work, work_size * sizeof(float))); // --- CUDA SVD execution - Singular values only timerGPU.StartCounter(); cusolveSafeCall(cusolverDnSgesvd(solver_handle, 'N', 'N', M, N, d_A, M, d_S, NULL, M, NULL, N, work, work_size, NULL, devInfo)); elapsedTime = timerGPU.GetCounter(); int devInfo_h = 0; gpuErrchk(cudaMemcpy(&devInfo_h, devInfo, sizeof(int), cudaMemcpyDeviceToHost)); if (devInfo_h == 0) printf("SVD successfull for the singular values calculation only\n\n"); else if (devInfo_h < 0) printf("SVD unsuccessfull for the singular values calculation only. Parameter %i is wrong\n", -devInfo_h); else printf("SVD unsuccessfull for the singular values calculation only. A number of %i superdiagonals of an intermediate bidiagonal form did not converge to zero\n", devInfo_h); printf("Calculation of the singular values only: %f ms\n\n", elapsedTime); // --- Moving the results from device to host //gpuErrchk(cudaMemcpy(h_S, d_S, N * sizeof(float), cudaMemcpyDeviceToHost)); //for (int i = 0; i < N; i++) std::cout << "d_S[" << i << "] = " << h_S[i] << std::endl; // --- CUDA SVD execution - Full SVD timerGPU.StartCounter(); cusolveSafeCall(cusolverDnSgesvd(solver_handle, 'A', 'A', M, N, d_A, M, d_S, d_U, M, d_V, N, work, work_size, NULL, devInfo)); elapsedTime = timerGPU.GetCounter(); devInfo_h = 0; gpuErrchk(cudaMemcpy(&devInfo_h, devInfo, sizeof(int), cudaMemcpyDeviceToHost)); if (devInfo_h == 0) printf("SVD successfull for the full SVD calculation\n\n"); else if (devInfo_h < 0) printf("SVD unsuccessfull for the full SVD calculation. Parameter %i is wrong\n", -devInfo_h); else printf("SVD unsuccessfull for the full SVD calculation. A number of %i superdiagonals of an intermediate bidiagonal form did not converge to zero\n", devInfo_h); printf("Calculation of the full SVD calculation: %f ms\n\n", elapsedTime); cusolveSafeCall(cusolverDnDestroy(solver_handle)); return 0; }
c8ebde6e0f1c8e110c5ebed4874463862362a856.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THHUNN/generic/ClassNLLCriterion.hip" #else void THNN_(ClassNLLCriterion_updateOutput)( THCState *state, THCTensor *input, THCIndexTensor *target, THCTensor *output, int64_t reduction, THCTensor *weights, THCTensor *total_weight, int64_t ignore_index) { if (THCIndexTensor_(nDimension)(state, target) > 1) { THError("multi-target not supported"); } int n_dims = THCTensor_(nDimensionLegacyNoScalars)(state, input); int n_classes = THCTensor_(sizeLegacyNoScalars)(state, input, n_dims - 1); if (weights) { THCUNN_assertSameGPU( state, 5, input, target, weights, output, total_weight ); } else { THCUNN_assertSameGPU( state, 4, input, target, output, total_weight ); } if (n_dims != 1 && n_dims != 2) { THError("input tensor should be 1D or 2D"); } int64_t batch_size = n_dims == 1 ? 1 : THCTensor_(sizeLegacyNoScalars)(state, input, 0); int64_t num_targets = THCudaLongTensor_sizeLegacyNoScalars(state, target, 0); THArgCheck(batch_size == num_targets, 2, "mismatch between the batch size of input (%ld) and that of target (%ld)", batch_size, num_targets); if (weights && THCTensor_(nElement)(state, weights) != n_classes) { THCDescBuff s1 = THCTensor_(sizeDesc)(state, weights); THError("weight tensor should be defined either for all %d classes or no classes" " but got weight tensor of shape: %s", n_classes, s1.str); } if (reduction == at::Reduction::None && n_dims == 2) { THCTensor_(resize1d)(state, output, batch_size); if (batch_size == 0) { // This guards from unnecessary operations and launching CUDA kernel with 0 blocks. return; } if (weights) { weights = THCTensor_(newContiguous)(state, weights); } hipLaunchKernelGGL(( ClassNLLCriterion_updateOutput_no_reduce_kernel<scalar_t>) , dim3(GET_BLOCKS(batch_size)), dim3(CUDA_NUM_THREADS), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), batch_size, toDeviceTensor<scalar_t, 2>(state, input), toDeviceTensor<THCIndex_t, 1>(state, target), toDeviceTensor<scalar_t, 1>(state, output), weights ? THCTensor_(data)(state, weights) : NULL, n_classes, ignore_index); C10_HIP_KERNEL_LAUNCH_CHECK(); if (weights) { THCTensor_(free)(state, weights); } return; } THCTensor_(resize0d)(state, output); THCTensor_(resize0d)(state, total_weight); input = THCTensor_(newContiguous)(state, input); weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL; target = THCIndexTensor_(newContiguous)(state, target); scalar_t *input_data = THCTensor_(data)(state, input); scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL; THCIndex_t *target_data = THCIndexTensor_(data)(state, target); scalar_t *output_data = THCTensor_(data)(state, output); scalar_t *total_weight_data = THCTensor_(data)(state, total_weight); if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 1) { hipLaunchKernelGGL(( cunn_ClassNLLCriterion_updateOutput_kernel1<scalar_t>) , dim3(1), dim3(1), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), output_data, total_weight_data, input_data, target_data, weights_data, reduction == at::Reduction::Mean, n_classes, ignore_index ); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 2) { hipLaunchKernelGGL(( cunn_ClassNLLCriterion_updateOutput_kernel<scalar_t, accreal>) , dim3(1), dim3(NTHREADS), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), output_data, total_weight_data, input_data, target_data, weights_data, reduction == at::Reduction::Mean, THCTensor_(size)(state, input, 0), THCTensor_(size)(state, input, 1), n_classes, ignore_index ); C10_HIP_KERNEL_LAUNCH_CHECK(); } if (weights) { THCTensor_(free)(state, weights); } THCIndexTensor_(free)(state, target); THCTensor_(free)(state, input); } void THNN_(ClassNLLCriterion_updateGradInput)( THCState *state, THCTensor *input, THCIndexTensor *target, THCTensor *gradOutput, THCTensor *gradInput, int64_t reduction, THCTensor *weights, THCTensor *total_weight, int64_t ignore_index) { if (THCIndexTensor_(nDimensionLegacyNoScalars)(state, target) > 1) { THError("multi-target not supported"); } int n_dims = THCTensor_(nDimensionLegacyNoScalars)(state, input); int n_classes = THCTensor_(size)(state, input, n_dims - 1); THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); THArgCheck(THCTensor_(isContiguous)(state, gradInput), 4, "gradInput must be contiguous"); if (weights) { THCUNN_assertSameGPU( state, 5, weights, input, target, gradInput, total_weight ); } else { THCUNN_assertSameGPU( state, 4, input, target, gradInput, total_weight ); } if (n_dims != 1 && n_dims != 2) { THError("input tensor should be 1D or 2D"); } int64_t batch_size = n_dims == 1 ? 1 : THCTensor_(size)(state, input, 0); int64_t num_targets = THCudaLongTensor_sizeLegacyNoScalars(state, target, 0); THArgCheck(batch_size == num_targets, 2, "mismatch between the batch size of input (%ld) and that of target (%ld)", batch_size, num_targets); if (weights && THCTensor_(nElement)(state, weights) != n_classes) { THError("weight tensor should be defined either for all or no classes"); } if (reduction == at::Reduction::None && n_dims == 2) { THCUNN_check_dim_size(state, gradOutput, 1, 0, batch_size); if (batch_size == 0) { // This guards from unnecessary operations and launching CUDA kernel with 0 blocks. return; } if (weights) { weights = THCTensor_(newContiguous)(state, weights); } hipLaunchKernelGGL(( ClassNLLCriterion_updateGradInput_no_reduce_kernel<scalar_t>) , dim3(GET_BLOCKS(batch_size)), dim3(CUDA_NUM_THREADS), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), batch_size, toDeviceTensor<THCIndex_t, 1>(state, target), toDeviceTensor<scalar_t, 1>(state, gradOutput), toDeviceTensor<scalar_t, 2>(state, gradInput), weights ? THCTensor_(data)(state, weights) : NULL, n_classes, ignore_index); C10_HIP_KERNEL_LAUNCH_CHECK(); if (weights) { THCTensor_(free)(state, weights); } return; } weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL; target = THCIndexTensor_(newContiguous)(state, target); THCUNN_check_dim_size(state, gradOutput, 1, 0, 1); scalar_t *gradOutput_data = THCTensor_(data)(state, gradOutput); scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL; scalar_t *gradInput_data = THCTensor_(data)(state, gradInput); THCIndex_t *target_data = THCIndexTensor_(data)(state, target); scalar_t *total_weight_data = THCTensor_(data)(state, total_weight); if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 1) { hipLaunchKernelGGL(( cunn_ClassNLLCriterion_updateGradInput_kernel1<scalar_t>) , dim3(1), dim3(1), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, weights_data, target_data, total_weight_data, reduction == at::Reduction::Mean, n_classes, ignore_index ); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( cunn_ClassNLLCriterion_updateGradInput_kernel<scalar_t>) , dim3(1), dim3(NTHREADS), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, target_data, weights_data, total_weight_data, reduction == at::Reduction::Mean, THCTensor_(size)(state, input, 0), THCTensor_(size)(state, input, 1), n_classes, ignore_index ); C10_HIP_KERNEL_LAUNCH_CHECK(); } if (weights) { THCTensor_(free)(state, weights); } THCIndexTensor_(free)(state, target); } #endif
c8ebde6e0f1c8e110c5ebed4874463862362a856.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THCUNN/generic/ClassNLLCriterion.cu" #else void THNN_(ClassNLLCriterion_updateOutput)( THCState *state, THCTensor *input, THCIndexTensor *target, THCTensor *output, int64_t reduction, THCTensor *weights, THCTensor *total_weight, int64_t ignore_index) { if (THCIndexTensor_(nDimension)(state, target) > 1) { THError("multi-target not supported"); } int n_dims = THCTensor_(nDimensionLegacyNoScalars)(state, input); int n_classes = THCTensor_(sizeLegacyNoScalars)(state, input, n_dims - 1); if (weights) { THCUNN_assertSameGPU( state, 5, input, target, weights, output, total_weight ); } else { THCUNN_assertSameGPU( state, 4, input, target, output, total_weight ); } if (n_dims != 1 && n_dims != 2) { THError("input tensor should be 1D or 2D"); } int64_t batch_size = n_dims == 1 ? 1 : THCTensor_(sizeLegacyNoScalars)(state, input, 0); int64_t num_targets = THCudaLongTensor_sizeLegacyNoScalars(state, target, 0); THArgCheck(batch_size == num_targets, 2, "mismatch between the batch size of input (%ld) and that of target (%ld)", batch_size, num_targets); if (weights && THCTensor_(nElement)(state, weights) != n_classes) { THCDescBuff s1 = THCTensor_(sizeDesc)(state, weights); THError("weight tensor should be defined either for all %d classes or no classes" " but got weight tensor of shape: %s", n_classes, s1.str); } if (reduction == at::Reduction::None && n_dims == 2) { THCTensor_(resize1d)(state, output, batch_size); if (batch_size == 0) { // This guards from unnecessary operations and launching CUDA kernel with 0 blocks. return; } if (weights) { weights = THCTensor_(newContiguous)(state, weights); } ClassNLLCriterion_updateOutput_no_reduce_kernel<scalar_t> <<<GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, c10::cuda::getCurrentCUDAStream()>>>( batch_size, toDeviceTensor<scalar_t, 2>(state, input), toDeviceTensor<THCIndex_t, 1>(state, target), toDeviceTensor<scalar_t, 1>(state, output), weights ? THCTensor_(data)(state, weights) : NULL, n_classes, ignore_index); C10_CUDA_KERNEL_LAUNCH_CHECK(); if (weights) { THCTensor_(free)(state, weights); } return; } THCTensor_(resize0d)(state, output); THCTensor_(resize0d)(state, total_weight); input = THCTensor_(newContiguous)(state, input); weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL; target = THCIndexTensor_(newContiguous)(state, target); scalar_t *input_data = THCTensor_(data)(state, input); scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL; THCIndex_t *target_data = THCIndexTensor_(data)(state, target); scalar_t *output_data = THCTensor_(data)(state, output); scalar_t *total_weight_data = THCTensor_(data)(state, total_weight); if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 1) { cunn_ClassNLLCriterion_updateOutput_kernel1<scalar_t> <<<1, 1, 0, c10::cuda::getCurrentCUDAStream()>>>( output_data, total_weight_data, input_data, target_data, weights_data, reduction == at::Reduction::Mean, n_classes, ignore_index ); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 2) { cunn_ClassNLLCriterion_updateOutput_kernel<scalar_t, accreal> <<<1, NTHREADS, 0, c10::cuda::getCurrentCUDAStream()>>>( output_data, total_weight_data, input_data, target_data, weights_data, reduction == at::Reduction::Mean, THCTensor_(size)(state, input, 0), THCTensor_(size)(state, input, 1), n_classes, ignore_index ); C10_CUDA_KERNEL_LAUNCH_CHECK(); } if (weights) { THCTensor_(free)(state, weights); } THCIndexTensor_(free)(state, target); THCTensor_(free)(state, input); } void THNN_(ClassNLLCriterion_updateGradInput)( THCState *state, THCTensor *input, THCIndexTensor *target, THCTensor *gradOutput, THCTensor *gradInput, int64_t reduction, THCTensor *weights, THCTensor *total_weight, int64_t ignore_index) { if (THCIndexTensor_(nDimensionLegacyNoScalars)(state, target) > 1) { THError("multi-target not supported"); } int n_dims = THCTensor_(nDimensionLegacyNoScalars)(state, input); int n_classes = THCTensor_(size)(state, input, n_dims - 1); THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); THArgCheck(THCTensor_(isContiguous)(state, gradInput), 4, "gradInput must be contiguous"); if (weights) { THCUNN_assertSameGPU( state, 5, weights, input, target, gradInput, total_weight ); } else { THCUNN_assertSameGPU( state, 4, input, target, gradInput, total_weight ); } if (n_dims != 1 && n_dims != 2) { THError("input tensor should be 1D or 2D"); } int64_t batch_size = n_dims == 1 ? 1 : THCTensor_(size)(state, input, 0); int64_t num_targets = THCudaLongTensor_sizeLegacyNoScalars(state, target, 0); THArgCheck(batch_size == num_targets, 2, "mismatch between the batch size of input (%ld) and that of target (%ld)", batch_size, num_targets); if (weights && THCTensor_(nElement)(state, weights) != n_classes) { THError("weight tensor should be defined either for all or no classes"); } if (reduction == at::Reduction::None && n_dims == 2) { THCUNN_check_dim_size(state, gradOutput, 1, 0, batch_size); if (batch_size == 0) { // This guards from unnecessary operations and launching CUDA kernel with 0 blocks. return; } if (weights) { weights = THCTensor_(newContiguous)(state, weights); } ClassNLLCriterion_updateGradInput_no_reduce_kernel<scalar_t> <<<GET_BLOCKS(batch_size), CUDA_NUM_THREADS, 0, c10::cuda::getCurrentCUDAStream()>>>( batch_size, toDeviceTensor<THCIndex_t, 1>(state, target), toDeviceTensor<scalar_t, 1>(state, gradOutput), toDeviceTensor<scalar_t, 2>(state, gradInput), weights ? THCTensor_(data)(state, weights) : NULL, n_classes, ignore_index); C10_CUDA_KERNEL_LAUNCH_CHECK(); if (weights) { THCTensor_(free)(state, weights); } return; } weights = weights ? THCTensor_(newContiguous)(state, weights) : NULL; target = THCIndexTensor_(newContiguous)(state, target); THCUNN_check_dim_size(state, gradOutput, 1, 0, 1); scalar_t *gradOutput_data = THCTensor_(data)(state, gradOutput); scalar_t *weights_data = weights ? THCTensor_(data)(state, weights) : NULL; scalar_t *gradInput_data = THCTensor_(data)(state, gradInput); THCIndex_t *target_data = THCIndexTensor_(data)(state, target); scalar_t *total_weight_data = THCTensor_(data)(state, total_weight); if (THCTensor_(nDimensionLegacyNoScalars)(state, input) == 1) { cunn_ClassNLLCriterion_updateGradInput_kernel1<scalar_t> <<<1, 1, 0, c10::cuda::getCurrentCUDAStream()>>>( gradInput_data, gradOutput_data, weights_data, target_data, total_weight_data, reduction == at::Reduction::Mean, n_classes, ignore_index ); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { cunn_ClassNLLCriterion_updateGradInput_kernel<scalar_t> <<<1, NTHREADS, 0, c10::cuda::getCurrentCUDAStream()>>>( gradInput_data, gradOutput_data, target_data, weights_data, total_weight_data, reduction == at::Reduction::Mean, THCTensor_(size)(state, input, 0), THCTensor_(size)(state, input, 1), n_classes, ignore_index ); C10_CUDA_KERNEL_LAUNCH_CHECK(); } if (weights) { THCTensor_(free)(state, weights); } THCIndexTensor_(free)(state, target); } #endif
c83960ed3fae4efe760aef4b251f33f59a64c82b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <cassert> #include <cstring> #include <vector> #include "paddle/fluid/inference/tensorrt/plugin/gelu_op_plugin.h" #include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { // constants for approximating the normal cdf constexpr float A = 1.41421356237309504; // sqrt(2) GeluPlugin* CreateGeluPluginDeserialize(const void* buffer, size_t length) { return new GeluPlugin(buffer, length); } REGISTER_TRT_PLUGIN("gelu plugin", CreateGeluPluginDeserialize); nvinfer1::Dims GeluPlugin::getOutputDimensions(int index, const nvinfer1::Dims* in_dims, int nb_inputs) { assert(nb_inputs == 1); assert(index < this->getNbOutputs()); nvinfer1::Dims const& input_dims = in_dims[0]; nvinfer1::Dims output_dims = input_dims; return output_dims; } template <typename T, unsigned TPB> __global__ void geluKernel(const T a, int n, const T* input, T* output) { const int idx = blockIdx.x * TPB + threadIdx.x; if (idx < n) { const T in = input[idx]; const T cdf = 0.5 * (1.0 + erf(in * 0.5 * a)); output[idx] = in * cdf; } } int computeGelu(hipStream_t stream, int n, const float* input, float* output) { constexpr int blockSize = 256; const int gridSize = (n + blockSize - 1) / blockSize; hipLaunchKernelGGL(( geluKernel<float, blockSize>), dim3(gridSize), dim3(blockSize), 0, stream, A, n, input, output); hipError_t error = hipGetLastError(); if (error != hipSuccess) LOG(ERROR) << hipGetErrorString(error); return 0; } int GeluPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void*, hipStream_t stream) { int status = -1; const float* input = static_cast<const float*>(inputs[0]); float* output = static_cast<float*>(outputs[0]); status = computeGelu(stream, input_volume_ * batchSize, input, output); return status; } } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
c83960ed3fae4efe760aef4b251f33f59a64c82b.cu
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <cassert> #include <cstring> #include <vector> #include "paddle/fluid/inference/tensorrt/plugin/gelu_op_plugin.h" #include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { // constants for approximating the normal cdf constexpr float A = 1.41421356237309504; // sqrt(2) GeluPlugin* CreateGeluPluginDeserialize(const void* buffer, size_t length) { return new GeluPlugin(buffer, length); } REGISTER_TRT_PLUGIN("gelu plugin", CreateGeluPluginDeserialize); nvinfer1::Dims GeluPlugin::getOutputDimensions(int index, const nvinfer1::Dims* in_dims, int nb_inputs) { assert(nb_inputs == 1); assert(index < this->getNbOutputs()); nvinfer1::Dims const& input_dims = in_dims[0]; nvinfer1::Dims output_dims = input_dims; return output_dims; } template <typename T, unsigned TPB> __global__ void geluKernel(const T a, int n, const T* input, T* output) { const int idx = blockIdx.x * TPB + threadIdx.x; if (idx < n) { const T in = input[idx]; const T cdf = 0.5 * (1.0 + erf(in * 0.5 * a)); output[idx] = in * cdf; } } int computeGelu(cudaStream_t stream, int n, const float* input, float* output) { constexpr int blockSize = 256; const int gridSize = (n + blockSize - 1) / blockSize; geluKernel<float, blockSize><<<gridSize, blockSize, 0, stream>>>(A, n, input, output); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) LOG(ERROR) << cudaGetErrorString(error); return 0; } int GeluPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void*, cudaStream_t stream) { int status = -1; const float* input = static_cast<const float*>(inputs[0]); float* output = static_cast<float*>(outputs[0]); status = computeGelu(stream, input_volume_ * batchSize, input, output); return status; } } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
b2554ef22e32c9d60cb1ee54f2de339586638bba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * given a velocity field (vx,vy,vz) and a scalar field lsf * calculate v*grad(lsf) with WENO scheme ******************************************************************************/ #include "shared_utilities.cuh" #include "shared_utilities.cup" // calculate forward and back weno derivative in the direction specified by step[i] // i is 0 for x, 1 for y, 2 for z __device__ inline void weno_derivative(double & df, double & db, double const * lsf, int const step[3], int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges, double ds) { int stencil[7]; // index of the 7 point stencil for(int i=0; i<7; i++){ int row_shift = row_idx + (i - 3) * step[0]; int col_shift = col_idx + (i - 3) * step[1]; int pge_shift = pge_idx + (i - 3) * step[2]; stencil[i] = sub2ind(row_shift, col_shift, pge_shift, rows, cols, pges); } double v[6]; // cell average of derivatives for(int i=0; i<6; i++){ v[i] = (lsf[ stencil[i+1] ] - lsf[ stencil[i] ]) / ds; } db = weno_onesided_derivative(v[0],v[1],v[2],v[3],v[4]); df = weno_onesided_derivative(v[5],v[4],v[3],v[2],v[1]); } __global__ void advection_step(double * astep, double const * vx, double const * vy, double const * vz, double const * lsf, int rows, int cols, int pges, double dx, double dy, double dz) { int row_idx = blockIdx.x * blockDim.x + threadIdx.x; int col_idx = blockIdx.y * blockDim.y + threadIdx.y; int pge_idx = blockIdx.z * blockDim.z + threadIdx.z; if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){ return; } int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges); int compass[3][3] = {{0,1,0},{1,0,0},{0,0,1}}; double xL,xR,yB,yF,zD,zU; weno_derivative(xR,xL,lsf,compass[0],row_idx,col_idx,pge_idx,rows,cols,pges,dx); weno_derivative(yF,yB,lsf,compass[1],row_idx,col_idx,pge_idx,rows,cols,pges,dy); weno_derivative(zU,zD,lsf,compass[2],row_idx,col_idx,pge_idx,rows,cols,pges,dz); astep[ind] = (min2(0.0,vx[ind]) * xR + max2(0.0,vx[ind]) * xL + min2(0.0,vy[ind]) * yF + max2(0.0,vy[ind]) * yB + min2(0.0,vz[ind]) * zU + max2(0.0,vz[ind]) * zD ); }
b2554ef22e32c9d60cb1ee54f2de339586638bba.cu
/******************************************************************************* * given a velocity field (vx,vy,vz) and a scalar field lsf * calculate v*grad(lsf) with WENO scheme ******************************************************************************/ #include "shared_utilities.cuh" #include "shared_utilities.cup" // calculate forward and back weno derivative in the direction specified by step[i] // i is 0 for x, 1 for y, 2 for z __device__ inline void weno_derivative(double & df, double & db, double const * lsf, int const step[3], int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges, double ds) { int stencil[7]; // index of the 7 point stencil for(int i=0; i<7; i++){ int row_shift = row_idx + (i - 3) * step[0]; int col_shift = col_idx + (i - 3) * step[1]; int pge_shift = pge_idx + (i - 3) * step[2]; stencil[i] = sub2ind(row_shift, col_shift, pge_shift, rows, cols, pges); } double v[6]; // cell average of derivatives for(int i=0; i<6; i++){ v[i] = (lsf[ stencil[i+1] ] - lsf[ stencil[i] ]) / ds; } db = weno_onesided_derivative(v[0],v[1],v[2],v[3],v[4]); df = weno_onesided_derivative(v[5],v[4],v[3],v[2],v[1]); } __global__ void advection_step(double * astep, double const * vx, double const * vy, double const * vz, double const * lsf, int rows, int cols, int pges, double dx, double dy, double dz) { int row_idx = blockIdx.x * blockDim.x + threadIdx.x; int col_idx = blockIdx.y * blockDim.y + threadIdx.y; int pge_idx = blockIdx.z * blockDim.z + threadIdx.z; if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){ return; } int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges); int compass[3][3] = {{0,1,0},{1,0,0},{0,0,1}}; double xL,xR,yB,yF,zD,zU; weno_derivative(xR,xL,lsf,compass[0],row_idx,col_idx,pge_idx,rows,cols,pges,dx); weno_derivative(yF,yB,lsf,compass[1],row_idx,col_idx,pge_idx,rows,cols,pges,dy); weno_derivative(zU,zD,lsf,compass[2],row_idx,col_idx,pge_idx,rows,cols,pges,dz); astep[ind] = (min2(0.0,vx[ind]) * xR + max2(0.0,vx[ind]) * xL + min2(0.0,vy[ind]) * yF + max2(0.0,vy[ind]) * yB + min2(0.0,vz[ind]) * zU + max2(0.0,vz[ind]) * zD ); }
77b7197341477462a762d9b74089c6d4c79c0757.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (C) 2010, Florian Kummer, Technische Universitaet Darmstadt, Fachgebiet fuer Stroemungsmechanik * * Use, modification and distribution is subject to the Boost Software * License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * Authors: Christoph Busold * */ extern __shared__ char smem[]; extern "C" __global__ void sparseMultiply(double* values, int* colIdx, int* rowStart, double* result, double* x, double alpha, double beta, int size) { // Dynamically allocated shared memory, should be BlockDim.x + 1 ints (see hipFuncSetSharedSize host code) int* sharedRowStart = (int*)smem; // Indices int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; double rowacc = 0.0; // Each thread loads one element of rowStart if(idx < size) { sharedRowStart[tid] = rowStart[idx]; } // The first thread loads additionally the next element, needed by the last thread if(tid == 0) { int loadIdx = min((blockIdx.x + 1) * blockDim.x, size); int storIdx = size % blockDim.x > 0 && idx + blockDim.x >= size ? size % blockDim.x : blockDim.x; sharedRowStart[storIdx] = rowStart[loadIdx]; } __syncthreads(); if(idx < size) { // Multiply and sum up data of this row for(int i = sharedRowStart[tid]; i < sharedRowStart[tid + 1]; i++) { rowacc += values[i] * x[colIdx[i]]; } result[idx] = result[idx] * beta + rowacc * alpha; } } extern "C" __global__ void accumulateExternal(double* data, int* indices, double* rcvBuffer, double alpha, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { data[indices[idx]] += rcvBuffer[idx] * alpha; } } // In this kernel each block computes multiple cell rows // IMPORTANT: All cell rows must have the same number of cells! // Otherwise sync in kernel might fail, causing crash or incorrect behaviour! extern "C" __global__ void blockMultiply2(double* cellData, double* xData, int* cellColIdx, double* result, double alpha, double beta, int cellsize, int cellrowsperblock, int cellsperrow, int stride, int size) { // Dynamically allocated shared memory, should be blockDim.x doubles for xData double* sharedData = (double*)smem; // Start cell index of this thread int* start = (int*)&sharedData[blockDim.x]; // Column of this thread's cell int* colIdx = (int*)&start[cellrowsperblock]; // Global index int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; // Number of the cell this thread is in int cellid = tid / cellsize; // Thread index inside this cell int cid = tid % cellsize; double rowacc = 0.0; double value; // Load start index for every cell row in this block if(tid < cellrowsperblock) { start[tid] = (cellrowsperblock * blockIdx.x + tid) * cellsperrow; } __syncthreads(); // Loop over all cells, discard overlapping threads inside because of sync for(int i = 0; i < cellsperrow; i++) { // Load column index for every cell if(tid < cellrowsperblock) { colIdx[tid] = cellColIdx[start[tid] + i]; } __syncthreads(); // No overlapping threads if(idx < size) { // Load x at colIdx location into shared memory // colIdx * cellsize is the start index at this column // cid is the row index of this thread sharedData[tid] = xData[colIdx[cellid] * cellsize + cid]; } __syncthreads(); // No overlapping threads if(idx < size) { // Loop over all columns of this cell for(int col = 0; col < cellsize; col++) { // Load value of this column // cell * cellsize * cellsize is the start index of the current cell // col * cellsize is the start index of the current column // cid is the row index of this thread value = cellData[(start[cellid] + i) * stride + col * cellsize + cid]; // Multiply value with x from sharedMemory // cellid * cellsize is the offset for the cell this thread is in // col is the column index of this loop cycle rowacc += value * sharedData[cellid * cellsize + col]; } } __syncthreads(); } // No overlapping threads if(idx < size) { // Write back result result[idx] = result[idx] * beta + rowacc * alpha; } } // In this kernel each block computes one cell row (block size equals cell size) extern "C" __global__ void blockMultiply(double* cellData, double* xData, int* cellColIdx, int* cellRowStart, double* result, double dia, int cellsize, int size) { double* sharedData = (double*)smem; __shared__ int colIdx; __shared__ int start; __shared__ int end; int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; double rowacc = 0.0; double value; if(tid == 0) { start = cellRowStart[blockIdx.x ]; end = cellRowStart[blockIdx.x + 1]; } __syncthreads(); for(int cell = start; cell < end; cell++) { if(tid == 0) { colIdx = cellColIdx[cell]; } __syncthreads(); if(idx < size) { sharedData[tid] = xData[colIdx * cellsize + tid]; } __syncthreads(); if(idx < size) { for(int col = 0; col < cellsize; col++) { value = cellData[cell * cellsize * cellsize + col * cellsize + tid]; rowacc += value * sharedData[col]; } } } if(idx < size) { rowacc += dia * xData[idx]; result[idx] += rowacc; } } // ELLPACKmod format extern "C" __global__ void ellMultiply(double* valData, int* colIdxData, double* xData, double* result, double alpha, double beta, int size, int colCount, int valStride, int colStride) { int idx = blockIdx.x * blockDim.x + threadIdx.x; // Add offsets to the start of this block's value and column data pointers valData += blockIdx.x * colCount * valStride; colIdxData += blockIdx.x * colCount * colStride; int valIdx; int colIdx; // No sync in this kernel, therefore overlapping threads are discarded here if(idx < size) { double rowacc = 0.0; // Loop over all columns for(int col = 0; col < colCount; col++) { // Index of the value and column index to load valIdx = col * valStride + threadIdx.x; colIdx = col * colStride + threadIdx.x; // Load value and multiply with x at column of this value rowacc += valData[valIdx] * xData[colIdxData[colIdx]]; } // Write result back result[idx] = result[idx] * beta + rowacc * alpha; } } // ManualCacheELLPACK format extern "C" __global__ void mcellMultiply(double* valData, unsigned short* colIdxData, int* xSubStart, int* blockSubVector, double* xData, double* result, double alpha, double beta, int size, int colCount, int valStride, int colStride) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; // Add offsets to the start of this block's value and column data pointers valData += blockIdx.x * colCount * valStride; colIdxData += blockIdx.x * colCount * colStride; double* xSub = (double*)smem; __shared__ int xStart; __shared__ int xLength; int valIdx; unsigned short colIdx; if(tid == 0) { xStart = xSubStart[blockIdx.x ]; xLength = xSubStart[blockIdx.x + 1] - xStart; } __syncthreads(); blockSubVector += xStart; int ldIdx = tid; while(ldIdx < xLength) { xSub[ldIdx] = xData[blockSubVector[ldIdx]]; ldIdx += blockDim.x; } __syncthreads(); // No sync inside this loop, therefore overlapping threads are discarded here if(idx < size) { double rowacc = 0.0; // Loop over all columns for(int col = 0; col < colCount; col++) { // Index of the value and column index to load valIdx = col * valStride + tid; colIdx = col * colStride + tid; // Load value and multiply with x at column of this value rowacc += valData[valIdx] * xSub[colIdxData[colIdx]]; } // Write result back result[idx] = result[idx] * beta + rowacc * alpha; } }
77b7197341477462a762d9b74089c6d4c79c0757.cu
/* * Copyright (C) 2010, Florian Kummer, Technische Universitaet Darmstadt, Fachgebiet fuer Stroemungsmechanik * * Use, modification and distribution is subject to the Boost Software * License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at * http://www.boost.org/LICENSE_1_0.txt) * * Authors: Christoph Busold * */ extern __shared__ char smem[]; extern "C" __global__ void sparseMultiply(double* values, int* colIdx, int* rowStart, double* result, double* x, double alpha, double beta, int size) { // Dynamically allocated shared memory, should be BlockDim.x + 1 ints (see cuFuncSetSharedSize host code) int* sharedRowStart = (int*)smem; // Indices int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; double rowacc = 0.0; // Each thread loads one element of rowStart if(idx < size) { sharedRowStart[tid] = rowStart[idx]; } // The first thread loads additionally the next element, needed by the last thread if(tid == 0) { int loadIdx = min((blockIdx.x + 1) * blockDim.x, size); int storIdx = size % blockDim.x > 0 && idx + blockDim.x >= size ? size % blockDim.x : blockDim.x; sharedRowStart[storIdx] = rowStart[loadIdx]; } __syncthreads(); if(idx < size) { // Multiply and sum up data of this row for(int i = sharedRowStart[tid]; i < sharedRowStart[tid + 1]; i++) { rowacc += values[i] * x[colIdx[i]]; } result[idx] = result[idx] * beta + rowacc * alpha; } } extern "C" __global__ void accumulateExternal(double* data, int* indices, double* rcvBuffer, double alpha, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { data[indices[idx]] += rcvBuffer[idx] * alpha; } } // In this kernel each block computes multiple cell rows // IMPORTANT: All cell rows must have the same number of cells! // Otherwise sync in kernel might fail, causing crash or incorrect behaviour! extern "C" __global__ void blockMultiply2(double* cellData, double* xData, int* cellColIdx, double* result, double alpha, double beta, int cellsize, int cellrowsperblock, int cellsperrow, int stride, int size) { // Dynamically allocated shared memory, should be blockDim.x doubles for xData double* sharedData = (double*)smem; // Start cell index of this thread int* start = (int*)&sharedData[blockDim.x]; // Column of this thread's cell int* colIdx = (int*)&start[cellrowsperblock]; // Global index int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; // Number of the cell this thread is in int cellid = tid / cellsize; // Thread index inside this cell int cid = tid % cellsize; double rowacc = 0.0; double value; // Load start index for every cell row in this block if(tid < cellrowsperblock) { start[tid] = (cellrowsperblock * blockIdx.x + tid) * cellsperrow; } __syncthreads(); // Loop over all cells, discard overlapping threads inside because of sync for(int i = 0; i < cellsperrow; i++) { // Load column index for every cell if(tid < cellrowsperblock) { colIdx[tid] = cellColIdx[start[tid] + i]; } __syncthreads(); // No overlapping threads if(idx < size) { // Load x at colIdx location into shared memory // colIdx * cellsize is the start index at this column // cid is the row index of this thread sharedData[tid] = xData[colIdx[cellid] * cellsize + cid]; } __syncthreads(); // No overlapping threads if(idx < size) { // Loop over all columns of this cell for(int col = 0; col < cellsize; col++) { // Load value of this column // cell * cellsize * cellsize is the start index of the current cell // col * cellsize is the start index of the current column // cid is the row index of this thread value = cellData[(start[cellid] + i) * stride + col * cellsize + cid]; // Multiply value with x from sharedMemory // cellid * cellsize is the offset for the cell this thread is in // col is the column index of this loop cycle rowacc += value * sharedData[cellid * cellsize + col]; } } __syncthreads(); } // No overlapping threads if(idx < size) { // Write back result result[idx] = result[idx] * beta + rowacc * alpha; } } // In this kernel each block computes one cell row (block size equals cell size) extern "C" __global__ void blockMultiply(double* cellData, double* xData, int* cellColIdx, int* cellRowStart, double* result, double dia, int cellsize, int size) { double* sharedData = (double*)smem; __shared__ int colIdx; __shared__ int start; __shared__ int end; int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; double rowacc = 0.0; double value; if(tid == 0) { start = cellRowStart[blockIdx.x ]; end = cellRowStart[blockIdx.x + 1]; } __syncthreads(); for(int cell = start; cell < end; cell++) { if(tid == 0) { colIdx = cellColIdx[cell]; } __syncthreads(); if(idx < size) { sharedData[tid] = xData[colIdx * cellsize + tid]; } __syncthreads(); if(idx < size) { for(int col = 0; col < cellsize; col++) { value = cellData[cell * cellsize * cellsize + col * cellsize + tid]; rowacc += value * sharedData[col]; } } } if(idx < size) { rowacc += dia * xData[idx]; result[idx] += rowacc; } } // ELLPACKmod format extern "C" __global__ void ellMultiply(double* valData, int* colIdxData, double* xData, double* result, double alpha, double beta, int size, int colCount, int valStride, int colStride) { int idx = blockIdx.x * blockDim.x + threadIdx.x; // Add offsets to the start of this block's value and column data pointers valData += blockIdx.x * colCount * valStride; colIdxData += blockIdx.x * colCount * colStride; int valIdx; int colIdx; // No sync in this kernel, therefore overlapping threads are discarded here if(idx < size) { double rowacc = 0.0; // Loop over all columns for(int col = 0; col < colCount; col++) { // Index of the value and column index to load valIdx = col * valStride + threadIdx.x; colIdx = col * colStride + threadIdx.x; // Load value and multiply with x at column of this value rowacc += valData[valIdx] * xData[colIdxData[colIdx]]; } // Write result back result[idx] = result[idx] * beta + rowacc * alpha; } } // ManualCacheELLPACK format extern "C" __global__ void mcellMultiply(double* valData, unsigned short* colIdxData, int* xSubStart, int* blockSubVector, double* xData, double* result, double alpha, double beta, int size, int colCount, int valStride, int colStride) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; // Add offsets to the start of this block's value and column data pointers valData += blockIdx.x * colCount * valStride; colIdxData += blockIdx.x * colCount * colStride; double* xSub = (double*)smem; __shared__ int xStart; __shared__ int xLength; int valIdx; unsigned short colIdx; if(tid == 0) { xStart = xSubStart[blockIdx.x ]; xLength = xSubStart[blockIdx.x + 1] - xStart; } __syncthreads(); blockSubVector += xStart; int ldIdx = tid; while(ldIdx < xLength) { xSub[ldIdx] = xData[blockSubVector[ldIdx]]; ldIdx += blockDim.x; } __syncthreads(); // No sync inside this loop, therefore overlapping threads are discarded here if(idx < size) { double rowacc = 0.0; // Loop over all columns for(int col = 0; col < colCount; col++) { // Index of the value and column index to load valIdx = col * valStride + tid; colIdx = col * colStride + tid; // Load value and multiply with x at column of this value rowacc += valData[valIdx] * xSub[colIdxData[colIdx]]; } // Write result back result[idx] = result[idx] * beta + rowacc * alpha; } }
6e71d338dc1e7d7a3e0c7814d2c4f3e69e77f5b0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #if defined(TARGET_GPU) int exec_parallel_gpu(INTEGRATION_MEM *mem, solver_props *props, simengine_output *outputs){ int ret = SUCCESS; unsigned int num_gpu_threads; unsigned int num_gpu_blocks; num_gpu_threads = GPU_BLOCK_SIZE < NUM_MODELS ? GPU_BLOCK_SIZE : NUM_MODELS; num_gpu_blocks = (NUM_MODELS + GPU_BLOCK_SIZE - 1) / GPU_BLOCK_SIZE; // Initialize omp with one thread per processor core omp_set_num_threads(omp_get_num_procs()); // Log outputs using parallel host threads #pragma omp parallel { int status; unsigned int modelid; unsigned int thread_num = omp_get_thread_num(); unsigned int num_threads = omp_get_num_threads(); unsigned int models_per_thread = NUM_MODELS/num_threads; unsigned int extra_models = NUM_MODELS%num_threads; while(SUCCESS == ret && ((output_buffer*)props->ob)->active_models){ // Only Host thread 0 can interact with the GPU if(0 == thread_num){ // Execute models on the GPU hipLaunchKernelGGL(( exec_kernel_gpu), dim3(num_gpu_blocks), dim3(num_gpu_threads), 0, 0, mem); // Copy data back to the host cutilSafeCall(hipMemcpy(props->ob, props->gpu.ob, props->ob_size, hipMemcpyDeviceToHost)); } // Make sure all threads wait for GPU to produce data #pragma omp barrier // Copy data in parallel to external api interface for(modelid = thread_num*models_per_thread; modelid < (thread_num+1)*models_per_thread; modelid++){ status = log_outputs((output_buffer*)props->ob, outputs, modelid); if(SUCCESS != status){ ret = ERRMEM; ret = 9; break; } } // If the number of models is not an even multiple of the number of cores // there will be one additional batch of models, with fewer threads than // the number of cores if(thread_num < extra_models){ for (modelid = 0; modelid < semeta.num_models; ++modelid) { status = log_outputs((output_buffer*)props->ob, outputs, modelid); if (SUCCESS != status){ ret = ERRMEM; ret = 20; break; } } } } // Host threads implicitly join here } // Copy final times ans states from GPU cutilSafeCall(hipMemcpy(props->time, props->gpu.time, props->num_models*sizeof(CDATAFORMAT), hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(props->model_states, props->gpu.model_states, props->statesize*props->num_models*sizeof(CDATAFORMAT), hipMemcpyDeviceToHost)); return ret; } #endif // defined(TARGET_GPU)
6e71d338dc1e7d7a3e0c7814d2c4f3e69e77f5b0.cu
#if defined(TARGET_GPU) int exec_parallel_gpu(INTEGRATION_MEM *mem, solver_props *props, simengine_output *outputs){ int ret = SUCCESS; unsigned int num_gpu_threads; unsigned int num_gpu_blocks; num_gpu_threads = GPU_BLOCK_SIZE < NUM_MODELS ? GPU_BLOCK_SIZE : NUM_MODELS; num_gpu_blocks = (NUM_MODELS + GPU_BLOCK_SIZE - 1) / GPU_BLOCK_SIZE; // Initialize omp with one thread per processor core omp_set_num_threads(omp_get_num_procs()); // Log outputs using parallel host threads #pragma omp parallel { int status; unsigned int modelid; unsigned int thread_num = omp_get_thread_num(); unsigned int num_threads = omp_get_num_threads(); unsigned int models_per_thread = NUM_MODELS/num_threads; unsigned int extra_models = NUM_MODELS%num_threads; while(SUCCESS == ret && ((output_buffer*)props->ob)->active_models){ // Only Host thread 0 can interact with the GPU if(0 == thread_num){ // Execute models on the GPU exec_kernel_gpu<<<num_gpu_blocks, num_gpu_threads>>>(mem); // Copy data back to the host cutilSafeCall(cudaMemcpy(props->ob, props->gpu.ob, props->ob_size, cudaMemcpyDeviceToHost)); } // Make sure all threads wait for GPU to produce data #pragma omp barrier // Copy data in parallel to external api interface for(modelid = thread_num*models_per_thread; modelid < (thread_num+1)*models_per_thread; modelid++){ status = log_outputs((output_buffer*)props->ob, outputs, modelid); if(SUCCESS != status){ ret = ERRMEM; ret = 9; break; } } // If the number of models is not an even multiple of the number of cores // there will be one additional batch of models, with fewer threads than // the number of cores if(thread_num < extra_models){ for (modelid = 0; modelid < semeta.num_models; ++modelid) { status = log_outputs((output_buffer*)props->ob, outputs, modelid); if (SUCCESS != status){ ret = ERRMEM; ret = 20; break; } } } } // Host threads implicitly join here } // Copy final times ans states from GPU cutilSafeCall(cudaMemcpy(props->time, props->gpu.time, props->num_models*sizeof(CDATAFORMAT), cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(props->model_states, props->gpu.model_states, props->statesize*props->num_models*sizeof(CDATAFORMAT), cudaMemcpyDeviceToHost)); return ret; } #endif // defined(TARGET_GPU)
c229c56e2593afefcc736d2c44f54dbbe663b005.hip
// !!! This is a file automatically generated by hipify!!! /*https://cdac.in/index.aspx?id=ev_hpc_gpu-comp-nvidia-cuda-streams#hetr-cuda-prog-cuda-streams*/ #include <stdio.h> #include <time.h> #include <hip/hip_runtime.h> #define BLOCKSIZE 256 #define SIZEOFARRAY 1048576*4 #define KENERL_LOOP 400 // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } #endif return result; } __global__ void arrayAddition(int *device_a, int *device_b, int *device_result, const int offset) { int threadId = threadIdx.x + blockIdx.x * blockDim.x ; int index = threadId + offset; if (threadId < SIZEOFARRAY) for (int i =0; i < KENERL_LOOP; i++) device_result[index]= device_a[index]+device_b[index]; } __host__ void generate_rand_data(unsigned int * host_data_ptr) { for(unsigned int i=0; i < SIZEOFARRAY; i++) { host_data_ptr[i] = (unsigned int) rand(); } } __host__ void start_measure(hipEvent_t * start, hipEvent_t *stop){ hipEventCreate(start,0); hipEventCreate(stop,0); hipEventRecord(*start, 0); } __host__ void stop_measure(hipEvent_t* start, hipEvent_t * stop, float &time) { hipEventRecord(*stop, 0); hipEventSynchronize(*stop); hipEventElapsedTime(&time, *start, *stop); } /* Check for safe return of all calls to the device */ int main ( int argc, char **argv ) { // Get cuda properties hipDeviceProp_t prop; hipSetDevice(0); hipGetDeviceProperties( &prop, 0); printf("maxThreadsPerBlock is %d \n", prop.maxThreadsPerBlock); // Allocate device and host memory const int num_streams = 4; const int stream_size = SIZEOFARRAY / num_streams; const int stream_bytes = stream_size * sizeof(int); const int num_bytes = SIZEOFARRAY * sizeof(int); int *host_a, *host_b, *host_result; int *device_a, *device_b, *device_result; checkCuda(hipMalloc( ( void**)& device_a, num_bytes)); checkCuda(hipMalloc( ( void**)& device_b, num_bytes )); checkCuda(hipMalloc( ( void**)& device_result, num_bytes)); checkCuda(hipHostMalloc((void **)&host_a, num_bytes, hipHostMallocDefault)); checkCuda(hipHostMalloc((void **)&host_b, num_bytes, hipHostMallocDefault)); checkCuda(hipHostMalloc((void **)&host_result, num_bytes, hipHostMallocDefault)); // Instantiate cuda events and streams hipEvent_t start, stop, start2, stop2; float elapsedTime, elapsedTime2; // Create Streams hipStream_t orig; hipStream_t stream[num_streams]; checkCuda( hipStreamCreate(&orig)); for (int i = 0; i < num_streams; ++i) checkCuda( hipStreamCreate(&stream[i]) ); // Instantiate host values for(int index = 0; index < SIZEOFARRAY; index++) { host_a[index] = index; host_b[index] = SIZEOFARRAY - index; } // Run sequential version start_measure(&start, &stop); checkCuda(hipMemcpyAsync(device_a, host_a, num_bytes, hipMemcpyHostToDevice, orig)); checkCuda(hipMemcpyAsync(device_b, host_b, num_bytes, hipMemcpyHostToDevice, orig)); hipLaunchKernelGGL(( arrayAddition), dim3(SIZEOFARRAY/BLOCKSIZE), dim3(BLOCKSIZE), 0, 0, device_a, device_b, device_result, 0); checkCuda(hipMemcpyAsync(host_result, device_result, num_bytes, hipMemcpyDeviceToHost, orig)); stop_measure(&start, &stop, elapsedTime); // Run overlapped stream processing // each stream processes portions of the data start_measure(&start2, &stop2); for (int i = 0; i < num_streams; ++i) { int offset = i * stream_size; checkCuda(hipMemcpyAsync(&device_a[offset], &host_a[offset], stream_bytes, hipMemcpyHostToDevice, stream[i])); checkCuda(hipMemcpyAsync(&device_b[offset], &host_b[offset], stream_bytes, hipMemcpyHostToDevice, stream[i])); } for (int i = 0; i < num_streams; ++i) { int offset = i * stream_size; hipLaunchKernelGGL(( arrayAddition), dim3(stream_size/BLOCKSIZE), dim3(BLOCKSIZE), 0, stream[i], device_a, device_b, device_result, offset); } for (int i = 0; i < num_streams; ++i) { int offset = i * stream_size; checkCuda(hipMemcpyAsync(&host_result[offset], &device_result[offset], stream_bytes, hipMemcpyDeviceToHost, stream[i])); } stop_measure(&start2, &stop2, elapsedTime2); printf("\n Block size: %d \n", BLOCKSIZE); printf("\n Kernal loop size: %d \n", KENERL_LOOP); printf("\n Size of array : %d \n", SIZEOFARRAY); printf("\n Sequential Time taken: %3.1f ms \n", elapsedTime); printf("\n Streams Time taken: %3.1f ms \n", elapsedTime2); hipHostFree(host_a); hipHostFree(host_b); hipHostFree(host_result); hipFree(device_a); hipFree(device_b); hipFree(device_result); return 0; }
c229c56e2593afefcc736d2c44f54dbbe663b005.cu
/*https://cdac.in/index.aspx?id=ev_hpc_gpu-comp-nvidia-cuda-streams#hetr-cuda-prog-cuda-streams*/ #include <stdio.h> #include <time.h> #include <cuda.h> #define BLOCKSIZE 256 #define SIZEOFARRAY 1048576*4 #define KENERL_LOOP 400 // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } #endif return result; } __global__ void arrayAddition(int *device_a, int *device_b, int *device_result, const int offset) { int threadId = threadIdx.x + blockIdx.x * blockDim.x ; int index = threadId + offset; if (threadId < SIZEOFARRAY) for (int i =0; i < KENERL_LOOP; i++) device_result[index]= device_a[index]+device_b[index]; } __host__ void generate_rand_data(unsigned int * host_data_ptr) { for(unsigned int i=0; i < SIZEOFARRAY; i++) { host_data_ptr[i] = (unsigned int) rand(); } } __host__ void start_measure(cudaEvent_t * start, cudaEvent_t *stop){ cudaEventCreate(start,0); cudaEventCreate(stop,0); cudaEventRecord(*start, 0); } __host__ void stop_measure(cudaEvent_t* start, cudaEvent_t * stop, float &time) { cudaEventRecord(*stop, 0); cudaEventSynchronize(*stop); cudaEventElapsedTime(&time, *start, *stop); } /* Check for safe return of all calls to the device */ int main ( int argc, char **argv ) { // Get cuda properties cudaDeviceProp prop; cudaSetDevice(0); cudaGetDeviceProperties( &prop, 0); printf("maxThreadsPerBlock is %d \n", prop.maxThreadsPerBlock); // Allocate device and host memory const int num_streams = 4; const int stream_size = SIZEOFARRAY / num_streams; const int stream_bytes = stream_size * sizeof(int); const int num_bytes = SIZEOFARRAY * sizeof(int); int *host_a, *host_b, *host_result; int *device_a, *device_b, *device_result; checkCuda(cudaMalloc( ( void**)& device_a, num_bytes)); checkCuda(cudaMalloc( ( void**)& device_b, num_bytes )); checkCuda(cudaMalloc( ( void**)& device_result, num_bytes)); checkCuda(cudaHostAlloc((void **)&host_a, num_bytes, cudaHostAllocDefault)); checkCuda(cudaHostAlloc((void **)&host_b, num_bytes, cudaHostAllocDefault)); checkCuda(cudaHostAlloc((void **)&host_result, num_bytes, cudaHostAllocDefault)); // Instantiate cuda events and streams cudaEvent_t start, stop, start2, stop2; float elapsedTime, elapsedTime2; // Create Streams cudaStream_t orig; cudaStream_t stream[num_streams]; checkCuda( cudaStreamCreate(&orig)); for (int i = 0; i < num_streams; ++i) checkCuda( cudaStreamCreate(&stream[i]) ); // Instantiate host values for(int index = 0; index < SIZEOFARRAY; index++) { host_a[index] = index; host_b[index] = SIZEOFARRAY - index; } // Run sequential version start_measure(&start, &stop); checkCuda(cudaMemcpyAsync(device_a, host_a, num_bytes, cudaMemcpyHostToDevice, orig)); checkCuda(cudaMemcpyAsync(device_b, host_b, num_bytes, cudaMemcpyHostToDevice, orig)); arrayAddition<<<SIZEOFARRAY/BLOCKSIZE, BLOCKSIZE>>>(device_a, device_b, device_result, 0); checkCuda(cudaMemcpyAsync(host_result, device_result, num_bytes, cudaMemcpyDeviceToHost, orig)); stop_measure(&start, &stop, elapsedTime); // Run overlapped stream processing // each stream processes portions of the data start_measure(&start2, &stop2); for (int i = 0; i < num_streams; ++i) { int offset = i * stream_size; checkCuda(cudaMemcpyAsync(&device_a[offset], &host_a[offset], stream_bytes, cudaMemcpyHostToDevice, stream[i])); checkCuda(cudaMemcpyAsync(&device_b[offset], &host_b[offset], stream_bytes, cudaMemcpyHostToDevice, stream[i])); } for (int i = 0; i < num_streams; ++i) { int offset = i * stream_size; arrayAddition<<<stream_size/BLOCKSIZE, BLOCKSIZE, 0, stream[i]>>>(device_a, device_b, device_result, offset); } for (int i = 0; i < num_streams; ++i) { int offset = i * stream_size; checkCuda(cudaMemcpyAsync(&host_result[offset], &device_result[offset], stream_bytes, cudaMemcpyDeviceToHost, stream[i])); } stop_measure(&start2, &stop2, elapsedTime2); printf("\n Block size: %d \n", BLOCKSIZE); printf("\n Kernal loop size: %d \n", KENERL_LOOP); printf("\n Size of array : %d \n", SIZEOFARRAY); printf("\n Sequential Time taken: %3.1f ms \n", elapsedTime); printf("\n Streams Time taken: %3.1f ms \n", elapsedTime2); cudaFreeHost(host_a); cudaFreeHost(host_b); cudaFreeHost(host_result); cudaFree(device_a); cudaFree(device_b); cudaFree(device_result); return 0; }
f4450271e8b2644ef9aeb11b087c8ec385268f69.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/fluid/operators/adagrad_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { namespace { template <typename T, int block_size> __global__ void MergeGradKernel(const T* grad, const int64_t* grad_rows, T* grad_merge, const int64_t* grad_merge_rows, size_t grad_merge_rows_size, int64_t row_numel) { const int ty = blockIdx.y; int tid = threadIdx.x; __shared__ size_t grad_merge_idx; if (tid == 0) { for (size_t i = 0; i < grad_merge_rows_size; i++) { if (grad_rows[ty] == grad_merge_rows[i]) { grad_merge_idx = i; } } } __syncthreads(); grad += ty * row_numel; grad_merge += grad_merge_idx * row_numel; for (int index = tid; index < row_numel; index += block_size) { paddle::platform::CudaAtomicAdd(grad_merge + index, grad[index]); } } template <typename T, int block_size> __global__ void SparseAdagradFunctorKernel(const T* grad, const int64_t* rows, const T* learning_rate, T* param, T* moment, int64_t row_numel, T epsilon) { const int ty = blockIdx.y; int tid = threadIdx.x; grad += ty * row_numel; param += rows[ty] * row_numel; moment += rows[ty] * row_numel; for (int index = tid; index < row_numel; index += block_size) { // Since index in rows of SelectedRows can be duplicate, we have to use // Atomic Operation to avoid concurrent write error. paddle::platform::CudaAtomicAdd(param + index, -1.0 * learning_rate[0] * grad[index] / (sqrt(moment[index]) + epsilon)); } } } // namespace template <typename T> struct SparseAdagradFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& grad, const framework::Tensor& learning_rate, T epsilon, framework::Tensor* moment, framework::Tensor* param) { // 1. g_m.rows = set(g.rows) auto grad_width = grad.value().dims()[1]; math::scatter::MergeAdd<platform::CUDADeviceContext, T> merge_func; auto grad_merge = merge_func(context, grad); auto* grad_merge_data = grad_merge.mutable_value()->template data<T>(); framework::Vector<int64_t> merge_rows(grad_merge.rows()); // 2. m += g_m * g_m math::scatter::Mul<platform::CUDADeviceContext, T> sqare_func; auto grad_square = sqare_func(context, grad_merge, grad_merge); math::SelectedRowsAddToTensor<platform::CUDADeviceContext, T> functor; functor(context, grad_square, moment); // 3. update parameter auto* lr = learning_rate.data<T>(); auto* param_data = param->data<T>(); auto* moment_data = moment->data<T>(); const int block_size = 256; dim3 threads(block_size, 1); dim3 grid2(1, merge_rows.size()); hipLaunchKernelGGL(( SparseAdagradFunctorKernel< T, 256>), dim3(grid2), dim3(threads), 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream(), grad_merge_data, merge_rows.CUDAMutableData(context.GetPlace()), lr, param_data, moment_data, grad_width, epsilon); } }; template struct SparseAdagradFunctor<platform::CUDADeviceContext, float>; template struct SparseAdagradFunctor<platform::CUDADeviceContext, double>; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( adagrad, ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, float>, ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, double>);
f4450271e8b2644ef9aeb11b087c8ec385268f69.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/fluid/operators/adagrad_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { namespace { template <typename T, int block_size> __global__ void MergeGradKernel(const T* grad, const int64_t* grad_rows, T* grad_merge, const int64_t* grad_merge_rows, size_t grad_merge_rows_size, int64_t row_numel) { const int ty = blockIdx.y; int tid = threadIdx.x; __shared__ size_t grad_merge_idx; if (tid == 0) { for (size_t i = 0; i < grad_merge_rows_size; i++) { if (grad_rows[ty] == grad_merge_rows[i]) { grad_merge_idx = i; } } } __syncthreads(); grad += ty * row_numel; grad_merge += grad_merge_idx * row_numel; for (int index = tid; index < row_numel; index += block_size) { paddle::platform::CudaAtomicAdd(grad_merge + index, grad[index]); } } template <typename T, int block_size> __global__ void SparseAdagradFunctorKernel(const T* grad, const int64_t* rows, const T* learning_rate, T* param, T* moment, int64_t row_numel, T epsilon) { const int ty = blockIdx.y; int tid = threadIdx.x; grad += ty * row_numel; param += rows[ty] * row_numel; moment += rows[ty] * row_numel; for (int index = tid; index < row_numel; index += block_size) { // Since index in rows of SelectedRows can be duplicate, we have to use // Atomic Operation to avoid concurrent write error. paddle::platform::CudaAtomicAdd(param + index, -1.0 * learning_rate[0] * grad[index] / (sqrt(moment[index]) + epsilon)); } } } // namespace template <typename T> struct SparseAdagradFunctor<platform::CUDADeviceContext, T> { void operator()(const platform::CUDADeviceContext& context, const framework::SelectedRows& grad, const framework::Tensor& learning_rate, T epsilon, framework::Tensor* moment, framework::Tensor* param) { // 1. g_m.rows = set(g.rows) auto grad_width = grad.value().dims()[1]; math::scatter::MergeAdd<platform::CUDADeviceContext, T> merge_func; auto grad_merge = merge_func(context, grad); auto* grad_merge_data = grad_merge.mutable_value()->template data<T>(); framework::Vector<int64_t> merge_rows(grad_merge.rows()); // 2. m += g_m * g_m math::scatter::Mul<platform::CUDADeviceContext, T> sqare_func; auto grad_square = sqare_func(context, grad_merge, grad_merge); math::SelectedRowsAddToTensor<platform::CUDADeviceContext, T> functor; functor(context, grad_square, moment); // 3. update parameter auto* lr = learning_rate.data<T>(); auto* param_data = param->data<T>(); auto* moment_data = moment->data<T>(); const int block_size = 256; dim3 threads(block_size, 1); dim3 grid2(1, merge_rows.size()); SparseAdagradFunctorKernel< T, 256><<<grid2, threads, 0, reinterpret_cast<const platform::CUDADeviceContext&>(context) .stream()>>>( grad_merge_data, merge_rows.CUDAMutableData(context.GetPlace()), lr, param_data, moment_data, grad_width, epsilon); } }; template struct SparseAdagradFunctor<platform::CUDADeviceContext, float>; template struct SparseAdagradFunctor<platform::CUDADeviceContext, double>; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( adagrad, ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, float>, ops::AdagradOpKernel<paddle::platform::CUDADeviceContext, double>);
742653d8feafc81ede93facf5df275b887d0f7dc.hip
// !!! This is a file automatically generated by hipify!!! /** * @file rasterize.cu * @brief CUDA-accelerated rasterization pipeline. * @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek) * @date 2012-2016 * @copyright University of Pennsylvania & STUDENT */ #include <cmath> #include <cstdio> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <thrust/device_ptr.h> #include <thrust/random.h> #include <thrust/remove.h> #include <util/checkCUDAError.h> #include <util/tiny_gltf_loader.h> #include "rasterizeTools.h" #include "rasterize.h" #include <glm/gtc/quaternion.hpp> #include <glm/gtc/matrix_transform.hpp> #include <iostream> #include <vector> #define TEXTURE_MAP 1 #define PERSPECTIVE_CORRECT 1 #define BILINEAR_INTERPOLATION 1 #define BACKFACE_CULL 1 #define NORMAL_INTERPOLATE 1 #define CEL_SHADE 4 #define SOBEL_GRID 8 #define USE_SHARED_SOBEL 1 namespace rasterizer { typedef unsigned short VertexIndex; typedef glm::vec3 VertexAttributePosition; typedef glm::vec3 VertexAttributeNormal; typedef glm::vec2 VertexAttributeTexcoord; typedef unsigned char TextureData; typedef unsigned char BufferByte; enum PrimitiveType{ Point = 1, Line = 2, Triangle = 3 }; struct VertexOut { glm::vec4 pos; // TODO: add new attributes to your VertexOut // The attributes listed below might be useful, // but always feel free to modify on your own glm::vec3 eyePos; // eye space position used for shading glm::vec3 eyeNor; // eye space normal used for shading, cuz normal will go wrong after perspective transformation // glm::vec3 col; #if TEXTURE_MAP == 1 glm::vec2 texcoord0; TextureData* dev_diffuseTex = NULL; int texWidth, texHeight, texComp; #endif // ... }; struct Primitive { PrimitiveType primitiveType = Triangle; // C++ 11 init VertexOut v[3]; }; struct Fragment { glm::vec3 color; // TODO: add new attributes to your Fragment // The attributes listed below might be useful, // but always feel free to modify on your own glm::vec3 eyePos; // eye space position used for shading glm::vec3 eyeNor; float z; float sobelx; float sobely; TextureData * diffuseTex; int texWidth; int texHeight; int texComp; glm::vec2 texcoord0; }; struct FragmentMutex { int mutex; }; struct PrimitiveDevBufPointers { int primitiveMode; //from tinygltfloader macro PrimitiveType primitiveType; int numPrimitives; int numIndices; int numVertices; // Vertex In, const after loaded VertexIndex* dev_indices; VertexAttributePosition* dev_position; VertexAttributeNormal* dev_normal; VertexAttributeTexcoord* dev_texcoord0; // Materials, add more attributes when needed #if TEXTURE_MAP == 1 TextureData* dev_diffuseTex; int texWidth; int texHeight; int texComp; #endif // TextureData* dev_specularTex; // TextureData* dev_normalTex; // ... // Vertex Out, vertex used for rasterization, this is changing every frame VertexOut* dev_verticesOut; // TODO: add more attributes when needed }; } using namespace rasterizer; struct Light { glm::vec4 worldPos; glm::vec3 eyePos; float emittance; Light(glm::vec4 worldPos, float emittance) { this->worldPos = worldPos; this->emittance = emittance; } }; static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap; static int width = 0; static int height = 0; #define AMBIENT_LIGHT 0.2f std::vector<Light> lights = { Light(glm::vec4(0.0f, 10.0f, 4.0f, 1.0f), 1.0f) }; static int totalNumPrimitives = 0; static Primitive *dev_primitives = NULL; static Fragment *dev_fragmentBuffer = NULL; static glm::vec3 *dev_framebuffer = NULL; static Light *dev_lights = NULL; static FragmentMutex *dev_fragmentMutexes = NULL; /** * Kernel that writes the image to the OpenGL PBO directly. */ __global__ void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); if (x < w && y < h) { glm::vec3 color; color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0; color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0; color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0; // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } /** * Called once at the beginning of the program to allocate memory. */ void rasterizeInit(int w, int h) { width = w; height = h; hipFree(dev_fragmentBuffer); hipMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment)); hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment)); hipFree(dev_framebuffer); hipMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3)); hipMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3)); hipFree(dev_lights); hipMalloc(&dev_lights, lights.size() * sizeof(Light)); hipFree(dev_fragmentMutexes); hipMalloc(&dev_fragmentMutexes, width * height * sizeof(FragmentMutex)); checkCUDAError("rasterizeInit"); } __global__ void initMutexes(int w, int h, FragmentMutex * mutexes, Fragment * fragments) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < w && y < h) { int index = x + (y * w); mutexes[index].mutex = 0; fragments[index].z = FLT_MAX; } } /** * kern function with support for stride to sometimes replace hipMemcpy * One thread is responsible for copying one component */ __global__ void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset, int componentTypeByteSize) { // Attribute (vec3 position) // component (3 * float) // byte (4 * byte) // id of component int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < N) { int count = i / n; int offset = i - count * n; // which component of the attribute for (int j = 0; j < componentTypeByteSize; j++) { dev_dst[count * componentTypeByteSize * n + offset * componentTypeByteSize + j] = dev_src[byteOffset + count * (byteStride == 0 ? componentTypeByteSize * n : byteStride) + offset * componentTypeByteSize + j]; } } } __global__ void _nodeMatrixTransform( int numVertices, VertexAttributePosition* position, VertexAttributeNormal* normal, glm::mat4 MV, glm::mat3 MV_normal) { // vertex id int vid = (blockIdx.x * blockDim.x) + threadIdx.x; if (vid < numVertices) { position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f)); normal[vid] = glm::normalize(MV_normal * normal[vid]); } } glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node & n) { glm::mat4 curMatrix(1.0); const std::vector<double> &m = n.matrix; if (m.size() > 0) { // matrix, copy it for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { curMatrix[i][j] = (float)m.at(4 * i + j); } } } else { // no matrix, use rotation, scale, translation if (n.translation.size() > 0) { curMatrix[3][0] = n.translation[0]; curMatrix[3][1] = n.translation[1]; curMatrix[3][2] = n.translation[2]; } if (n.rotation.size() > 0) { glm::mat4 R; glm::quat q; q[0] = n.rotation[0]; q[1] = n.rotation[1]; q[2] = n.rotation[2]; R = glm::mat4_cast(q); curMatrix = curMatrix * R; } if (n.scale.size() > 0) { curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2])); } } return curMatrix; } void traverseNode( std::map<std::string, glm::mat4> & n2m, const tinygltf::Scene & scene, const std::string & nodeString, const glm::mat4 & parentMatrix ) { const tinygltf::Node & n = scene.nodes.at(nodeString); glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n); n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M)); auto it = n.children.begin(); auto itEnd = n.children.end(); for (; it != itEnd; ++it) { traverseNode(n2m, scene, *it, M); } } void rasterizeSetBuffers(const tinygltf::Scene & scene) { totalNumPrimitives = 0; std::map<std::string, BufferByte*> bufferViewDevPointers; // 1. copy all `bufferViews` to device memory { std::map<std::string, tinygltf::BufferView>::const_iterator it( scene.bufferViews.begin()); std::map<std::string, tinygltf::BufferView>::const_iterator itEnd( scene.bufferViews.end()); for (; it != itEnd; it++) { const std::string key = it->first; const tinygltf::BufferView &bufferView = it->second; if (bufferView.target == 0) { continue; // Unsupported bufferView. } const tinygltf::Buffer &buffer = scene.buffers.at(bufferView.buffer); BufferByte* dev_bufferView; hipMalloc(&dev_bufferView, bufferView.byteLength); hipMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength, hipMemcpyHostToDevice); checkCUDAError("Set BufferView Device Mem"); bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView)); } } // 2. for each mesh: // for each primitive: // build device buffer of indices, materail, and each attributes // and store these pointers in a map { std::map<std::string, glm::mat4> nodeString2Matrix; auto rootNodeNamesList = scene.scenes.at(scene.defaultScene); { auto it = rootNodeNamesList.begin(); auto itEnd = rootNodeNamesList.end(); for (; it != itEnd; ++it) { traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f)); } } // parse through node to access mesh auto itNode = nodeString2Matrix.begin(); auto itEndNode = nodeString2Matrix.end(); for (; itNode != itEndNode; ++itNode) { const tinygltf::Node & N = scene.nodes.at(itNode->first); const glm::mat4 & matrix = itNode->second; const glm::mat3 & matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix))); auto itMeshName = N.meshes.begin(); auto itEndMeshName = N.meshes.end(); for (; itMeshName != itEndMeshName; ++itMeshName) { const tinygltf::Mesh & mesh = scene.meshes.at(*itMeshName); auto res = mesh2PrimitivesMap.insert(std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>())); std::vector<PrimitiveDevBufPointers> & primitiveVector = (res.first)->second; // for each primitive for (size_t i = 0; i < mesh.primitives.size(); i++) { const tinygltf::Primitive &primitive = mesh.primitives[i]; if (primitive.indices.empty()) return; VertexIndex* dev_indices; VertexAttributePosition* dev_position; VertexAttributeNormal* dev_normal; VertexAttributeTexcoord* dev_texcoord0; // ----------Indices------------- const tinygltf::Accessor &indexAccessor = scene.accessors.at(primitive.indices); const tinygltf::BufferView &bufferView = scene.bufferViews.at(indexAccessor.bufferView); BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView); // assume type is SCALAR for indices int n = 1; int numIndices = indexAccessor.count; int componentTypeByteSize = sizeof(VertexIndex); int byteLength = numIndices * n * componentTypeByteSize; dim3 numThreadsPerBlock(128); dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); hipMalloc(&dev_indices, byteLength); _deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > ( numIndices, (BufferByte*)dev_indices, dev_bufferView, n, indexAccessor.byteStride, indexAccessor.byteOffset, componentTypeByteSize); checkCUDAError("Set Index Buffer"); // ---------Primitive Info------- // Warning: LINE_STRIP is not supported in tinygltfloader int numPrimitives; PrimitiveType primitiveType; switch (primitive.mode) { case TINYGLTF_MODE_TRIANGLES: primitiveType = PrimitiveType::Triangle; numPrimitives = numIndices / 3; break; case TINYGLTF_MODE_TRIANGLE_STRIP: primitiveType = PrimitiveType::Triangle; numPrimitives = numIndices - 2; break; case TINYGLTF_MODE_TRIANGLE_FAN: primitiveType = PrimitiveType::Triangle; numPrimitives = numIndices - 2; break; case TINYGLTF_MODE_LINE: primitiveType = PrimitiveType::Line; numPrimitives = numIndices / 2; break; case TINYGLTF_MODE_LINE_LOOP: primitiveType = PrimitiveType::Line; numPrimitives = numIndices + 1; break; case TINYGLTF_MODE_POINTS: primitiveType = PrimitiveType::Point; numPrimitives = numIndices; break; default: // output error break; }; // ----------Attributes------------- auto it(primitive.attributes.begin()); auto itEnd(primitive.attributes.end()); int numVertices = 0; // for each attribute for (; it != itEnd; it++) { const tinygltf::Accessor &accessor = scene.accessors.at(it->second); const tinygltf::BufferView &bufferView = scene.bufferViews.at(accessor.bufferView); int n = 1; if (accessor.type == TINYGLTF_TYPE_SCALAR) { n = 1; } else if (accessor.type == TINYGLTF_TYPE_VEC2) { n = 2; } else if (accessor.type == TINYGLTF_TYPE_VEC3) { n = 3; } else if (accessor.type == TINYGLTF_TYPE_VEC4) { n = 4; } BufferByte * dev_bufferView = bufferViewDevPointers.at(accessor.bufferView); BufferByte ** dev_attribute = NULL; numVertices = accessor.count; int componentTypeByteSize; // Note: since the type of our attribute array (dev_position) is static (float32) // We assume the glTF model attribute type are 5126(FLOAT) here if (it->first.compare("POSITION") == 0) { componentTypeByteSize = sizeof(VertexAttributePosition) / n; dev_attribute = (BufferByte**)&dev_position; } else if (it->first.compare("NORMAL") == 0) { componentTypeByteSize = sizeof(VertexAttributeNormal) / n; dev_attribute = (BufferByte**)&dev_normal; } else if (it->first.compare("TEXCOORD_0") == 0) { componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n; dev_attribute = (BufferByte**)&dev_texcoord0; } std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n'; dim3 numThreadsPerBlock(128); dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); int byteLength = numVertices * n * componentTypeByteSize; hipMalloc(dev_attribute, byteLength); _deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > ( n * numVertices, *dev_attribute, dev_bufferView, n, accessor.byteStride, accessor.byteOffset, componentTypeByteSize); std::string msg = "Set Attribute Buffer: " + it->first; checkCUDAError(msg.c_str()); } // malloc for VertexOut VertexOut* dev_vertexOut; hipMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut)); checkCUDAError("Malloc VertexOut Buffer"); // ----------Materials------------- // You can only worry about this part once you started to // implement textures for your rasterizer TextureData* dev_diffuseTex = NULL; #if TEXTURE_MAP == 1 int texWidth = 0; int texHeight = 0; int texComp = 0; #endif if (!primitive.material.empty()) { const tinygltf::Material &mat = scene.materials.at(primitive.material); printf("material.name = %s\n", mat.name.c_str()); if (mat.values.find("diffuse") != mat.values.end()) { std::string diffuseTexName = mat.values.at("diffuse").string_value; if (scene.textures.find(diffuseTexName) != scene.textures.end()) { const tinygltf::Texture &tex = scene.textures.at(diffuseTexName); if (scene.images.find(tex.source) != scene.images.end()) { const tinygltf::Image &image = scene.images.at(tex.source); size_t s = image.image.size() * sizeof(TextureData); hipMalloc(&dev_diffuseTex, s); hipMemcpy(dev_diffuseTex, &image.image.at(0), s, hipMemcpyHostToDevice); #if TEXTURE_MAP == 1 texWidth = image.width; texHeight = image.height; texComp = image.component; #endif checkCUDAError("Set Texture Image data"); } } } // TODO: write your code for other materails // You may have to take a look at tinygltfloader // You can also use the above code loading diffuse material as a start point } // ---------Node hierarchy transform-------- hipDeviceSynchronize(); dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); _nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> > ( numVertices, dev_position, dev_normal, matrix, matrixNormal); checkCUDAError("Node hierarchy transformation"); // at the end of the for loop of primitive // push dev pointers to map primitiveVector.push_back(PrimitiveDevBufPointers{ primitive.mode, primitiveType, numPrimitives, numIndices, numVertices, dev_indices, dev_position, dev_normal, dev_texcoord0, #if TEXTURE_MAP == 1 dev_diffuseTex, texWidth, texHeight, texComp, #endif dev_vertexOut //VertexOut }); totalNumPrimitives += numPrimitives; } // for each primitive } // for each mesh } // for each node } // 3. Malloc for dev_primitives { hipMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive)); } // Finally, hipFree raw dev_bufferViews { std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin()); std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end()); //bufferViewDevPointers for (; it != itEnd; it++) { hipFree(it->second); } checkCUDAError("Free BufferView Device Mem"); } } __global__ void _vertexTransformAndAssembly( int numVertices, PrimitiveDevBufPointers primitive, glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal, int width, int height) { // vertex id int vid = (blockIdx.x * blockDim.x) + threadIdx.x; if (vid < numVertices) { VertexOut & vout = primitive.dev_verticesOut[vid]; VertexAttributePosition & vpos = primitive.dev_position[vid]; // Multiply the MVP matrix for each vertex position, this will transform everything into clipping space // Then divide the pos by its w element to transform into NDC space // Finally transform x and y to viewport space vout.pos = MVP * glm::vec4(vpos, 1.0f); if (fabs(vout.pos.w) > EPSILON) vout.pos /= vout.pos.w; vout.pos.x = 0.5f * (float)width * (vout.pos.x + 1.0f); vout.pos.y = 0.5f * (float)height * (vout.pos.y + 1.0f); // Assemble all attribute arraies into the primitive array VertexAttributeNormal & vnorm = primitive.dev_normal[vid]; glm::vec4 eyePos = MV * glm::vec4(vpos, 1.0f); if (fabs(eyePos.w) > EPSILON) vout.eyePos = glm::vec3(eyePos / eyePos.w); vout.eyeNor = glm::normalize(MV_normal * vnorm); #if TEXTURE_MAP == 1 //Textures if (primitive.dev_diffuseTex != NULL) { vout.texcoord0 = primitive.dev_texcoord0[vid]; } vout.dev_diffuseTex = primitive.dev_diffuseTex; vout.texWidth = primitive.texWidth; vout.texHeight = primitive.texHeight; vout.texComp = primitive.texComp; #endif } } static int curPrimitiveBeginId = 0; __global__ void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives, PrimitiveDevBufPointers primitive) { // index id int iid = (blockIdx.x * blockDim.x) + threadIdx.x; if (iid < numIndices) { // This is primitive assembly for triangles int pid; // id for cur primitives vector if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES) { pid = iid / (int)primitive.primitiveType; dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType] = primitive.dev_verticesOut[primitive.dev_indices[iid]]; } // TODO: other primitive types (point, line) } } __device__ __host__ int clamp_int(int mn, int x, int mx) { if (x > mx) return mx; if (x < mn) return mn; return x; } __device__ __host__ glm::vec3 getPixel(int x, int y, int width, int height, int components, TextureData * tex) { if (x >= width || y >= height || x < 0 || y < 0) { return glm::vec3(0, 0, 0); } int texIdx = y * width + x; return (1.0f / 255.0f) * glm::vec3(tex[components * texIdx], tex[components * texIdx + 1], tex[components * texIdx + 2]); } __global__ void kernRasterize(int numPrimitives, Primitive* dev_primitives, int width, int height, Fragment* fragmentBuffer, FragmentMutex* mutexes) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < numPrimitives) { Primitive & p = dev_primitives[index]; VertexOut & firstVertex = p.v[0]; glm::vec3 triangle[3] = { glm::vec3(p.v[0].pos), glm::vec3(p.v[1].pos), glm::vec3(p.v[2].pos) }; AABB boundingBox = getAABBForTriangle(triangle); int minxpix = clamp_int(0, boundingBox.min.x, width - 1); int minypix = clamp_int(0, boundingBox.min.y, height - 1); int maxxpix = clamp_int(0, boundingBox.max.x, width - 1); int maxypix = clamp_int(0, boundingBox.max.y, height - 1); for (int y = minypix; y <= maxypix; y++) { for (int x = minxpix; x <= maxxpix; x++) { int fragIdx = (height - 1 - y) * width + (width - 1 - x); Fragment & fragment = fragmentBuffer[fragIdx]; glm::vec3 baryCoords = calculateBarycentricCoordinate(triangle, glm::vec2(x, y)); if (isBarycentricCoordInBounds(baryCoords)) { float pos = glm::dot(baryCoords, glm::vec3(p.v[0].pos.z, p.v[1].pos.z, p.v[2].pos.z)); bool isSet; do { isSet = atomicCAS(&mutexes[fragIdx].mutex, 0, 1) == 0; if (isSet) { if (pos < fragment.z) { fragment.z = pos; #if TEXTURE_MAP == 1 if (p.v[0].dev_diffuseTex == NULL) { fragment.color = glm::vec3(1.0f, 1.0f, 1.0f); // white fragment.diffuseTex = NULL; } else { #if PERSPECTIVE_CORRECT == 1 glm::vec3 perspectiveBaryCoords = glm::vec3(baryCoords.x / p.v[0].eyePos.z, baryCoords.y / p.v[1].eyePos.z, baryCoords.z / p.v[2].eyePos.z); float scaleFactor = (1.0f / (perspectiveBaryCoords.x + perspectiveBaryCoords.y + perspectiveBaryCoords.z)); fragment.texcoord0 = glm::mat3x2(p.v[0].texcoord0, p.v[1].texcoord0, p.v[2].texcoord0) * perspectiveBaryCoords * scaleFactor; #else fragment.texcoord0 = glm::mat3x2(p.v[0].texcoord0, p.v[1].texcoord0, p.v[2].texcoord0) * baryCoords; #endif fragment.texWidth = firstVertex.texWidth; fragment.texHeight = firstVertex.texHeight; fragment.texComp = firstVertex.texComp; fragment.diffuseTex = firstVertex.dev_diffuseTex; } #else fragment.color = glm::vec3(1.0f, 1.0f, 1.0f); // white #endif fragment.eyePos = glm::mat3(p.v[0].eyePos, p.v[1].eyePos, p.v[2].eyePos) * baryCoords; #if NORMAL_INTERPOLATE == 1 fragment.eyeNor = glm::mat3(p.v[0].eyeNor, p.v[1].eyeNor, p.v[2].eyeNor) * baryCoords; #else fragment.eyeNor = glm::normalize(glm::cross( glm::vec3(p.v[1].eyeNor - p.v[0].eyeNor), glm::vec3(p.v[2].eyeNor - p.v[0].eyeNor) )); #endif } } if (isSet) { mutexes[fragIdx].mutex = 0; } } while (pos < fragment.z && !isSet); } } } } } __global__ void kernTextureShader(int width, int height, Fragment* fragmentBuffer) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * width); if (x < width && y < height) { Fragment & fragment = fragmentBuffer[index]; if (fragment.diffuseTex != NULL) { float texx = 0.5f + fragment.texcoord0.x * (fragment.texWidth - 1); float texy = 0.5f + fragment.texcoord0.y * (fragment.texHeight - 1); #if BILINEAR_INTERPOLATION == 1 float x1 = glm::floor(texx); float y1 = glm::floor(texy); glm::vec3 c11 = getPixel(x1, y1, fragment.texWidth, fragment.texHeight, fragment.texComp, fragment.diffuseTex); glm::vec3 c12 = getPixel(x1, y1 + 1, fragment.texWidth, fragment.texHeight, fragment.texComp, fragment.diffuseTex); glm::vec3 c21 = getPixel(x1 + 1, y1, fragment.texWidth, fragment.texHeight, fragment.texComp, fragment.diffuseTex); glm::vec3 c22 = getPixel(x1 + 1, y1 + 1, fragment.texWidth, fragment.texHeight, fragment.texComp, fragment.diffuseTex); glm::vec3 r1 = (texx - x1) * c21 + (1.0f + x1 - texx) * c11; glm::vec3 r2 = (texx - x1) * c22 + (1.0f + x1 - texx) * c12; fragment.color = (texy - y1) * r2 + (1.0f + y1 - texy) * r1; #else fragment.color = getPixel(texx, texy, fragment.texWidth, fragment.texHeight, fragment.texComp, fragment.diffuseTex); #endif } } } struct IsBackfacing { __host__ __device__ bool operator () (const Primitive & p) { glm::vec3 normal = glm::normalize(glm::cross( glm::vec3(p.v[1].pos - p.v[0].pos), glm::vec3(p.v[2].pos - p.v[0].pos))); return normal.z < -0; } }; __global__ void calculateSobel(int w, int h, Fragment * fragmentBuffer) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); float sobelKernel[3][3] = { { -1, 0, 1 }, { -2, 0, 2 }, { -1, 0, 1 } }; if (x < w && y < h) { Fragment & fragment = fragmentBuffer[index]; for (int i = -1; i <= 1; i++) { for (int j = -1; j <= 1; j++) { if (x + i < w && x + i >= 0 && y + j < h && y + j >= 0) { int sobelIdx = x + i + ((y + j) * w); float dist = (fragmentBuffer[sobelIdx].z > 1e12) ? 1e12 : glm::length(fragmentBuffer[sobelIdx].eyePos); fragment.sobelx += sobelKernel[i + 1][j + 1] * dist; fragment.sobely += sobelKernel[j + 1][i + 1] * dist; } } } } } __global__ void calculateSobelWithShared(int w, int h, Fragment * fragmentBuffer) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); __shared__ float tile[SOBEL_GRID][SOBEL_GRID]; __shared__ float sobelx[SOBEL_GRID][SOBEL_GRID]; __shared__ float sobely[SOBEL_GRID][SOBEL_GRID]; float sobelKernel[3][3] = { { 3, 0, -3 }, { 10, 0, -10 }, { 3, 0, -3 } }; if (x < w && y < h) { int bx = threadIdx.x; int by = threadIdx.y; Fragment & fragment = fragmentBuffer[index]; tile[bx][by] = (fragment.z > 1e12) ? 1e12 : glm::length(fragment.eyePos); sobelx[bx][by] = 0; sobely[bx][by] = 0; __syncthreads(); for (int i = -1; i <= 1; i++) { for (int j = -1; j <= 1; j++) { if (bx + i < SOBEL_GRID && bx + i >= 0 && by + j < SOBEL_GRID && by + j >= 0) { sobelx[bx][by] += sobelKernel[i + 1][j + 1] * tile[bx + i][by + j]; sobely[bx][by] += sobelKernel[j + 1][i + 1] * tile[bx + i][by + j]; } else { if (x + i < w && x + i >= 0 && y + j < h && y + j >= 0) { int sobelIdx = x + i + ((y + j) * w); float dist = (fragmentBuffer[sobelIdx].z > 1e12) ? 1e12 : glm::length(fragmentBuffer[sobelIdx].eyePos); sobelx[bx][by] += sobelKernel[i + 1][j + 1] * dist; sobely[bx][by] += sobelKernel[j + 1][i + 1] * dist; } } } } fragment.sobelx = sobelx[bx][by]; fragment.sobely = sobely[bx][by]; } } /** * Writes fragment colors to the framebuffer */ __global__ void render(int w, int h, Fragment *fragmentBuffer, glm::vec3 *framebuffer, int numLights, Light *lights) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); if (x < w && y < h) { Fragment & fragment = fragmentBuffer[index]; if (fragment.z < 1e12) { float totalLight = AMBIENT_LIGHT; // Lambert shading for (int i = 0; i < numLights; i++) { Light & light = lights[i]; totalLight += light.emittance * glm::max(0.0f, glm::dot(fragment.eyeNor, glm::normalize(light.eyePos - fragment.eyePos))); } framebuffer[index] = totalLight * fragment.color; #if CEL_SHADE > 0 framebuffer[index] = glm::ceil(framebuffer[index] * (float)CEL_SHADE) / (float)CEL_SHADE; float sobel = glm::sqrt(fragment.sobelx * fragment.sobelx + fragment.sobely * fragment.sobely); if (sobel > 15.0f) framebuffer[index] = glm::vec3(0.0f, 0.0f, 0.0f); #endif } else { framebuffer[index] = glm::vec3(0.5f, 0.8f, 1.0f); } } } /** * Perform rasterization. */ void rasterize(uchar4 *pbo, const glm::mat4 & MVP, const glm::mat4 & MV, const glm::mat3 MV_normal) { int sideLength2d = 8; dim3 blockSize2d(sideLength2d, sideLength2d); dim3 blockCount2d((width - 1) / blockSize2d.x + 1, (height - 1) / blockSize2d.y + 1); // Execute your rasterization pipeline here // (See README for rasterization pipeline outline.) // Vertex Process & primitive assembly { curPrimitiveBeginId = 0; dim3 numThreadsPerBlock(128); auto it = mesh2PrimitivesMap.begin(); auto itEnd = mesh2PrimitivesMap.end(); for (; it != itEnd; ++it) { auto p = (it->second).begin(); // each primitive auto pEnd = (it->second).end(); for (; p != pEnd; ++p) { dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); _vertexTransformAndAssembly << < numBlocksForVertices, numThreadsPerBlock >> >(p->numVertices, *p, MVP, MV, MV_normal, width, height); checkCUDAError("Vertex Processing"); hipDeviceSynchronize(); _primitiveAssembly << < numBlocksForIndices, numThreadsPerBlock >> > (p->numIndices, curPrimitiveBeginId, dev_primitives, *p); checkCUDAError("Primitive Assembly"); curPrimitiveBeginId += p->numPrimitives; } } checkCUDAError("Vertex Processing and Primitive Assembly"); } hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment)); initMutexes << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentMutexes, dev_fragmentBuffer); checkCUDAError("init mutexes"); int numPrimitives = totalNumPrimitives; // Backface culling #if BACKFACE_CULL == 1 thrust::device_ptr<Primitive> dev_thrust_primitives(dev_primitives); thrust::device_ptr<Primitive> dev_thrust_primitivesEnd = thrust::remove_if(dev_thrust_primitives, dev_thrust_primitives + numPrimitives, IsBackfacing()); numPrimitives = dev_thrust_primitivesEnd - dev_thrust_primitives; printf("%d triangles\n", numPrimitives); checkCUDAError("backface culling"); #endif // Rasterization dim3 numThreadsPerBlock(64); dim3 numBlocksForPrimitives((numPrimitives + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); kernRasterize << < numBlocksForPrimitives, numThreadsPerBlock >> >( numPrimitives, dev_primitives, width, height, dev_fragmentBuffer, dev_fragmentMutexes); checkCUDAError("rasterizer"); // Filling texture colors #if TEXTURE_MAP == 1 kernTextureShader << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentBuffer); checkCUDAError("textureShader"); #endif // Offline light transformation, since there aren't many lights for (Light & light : lights) { glm::vec4 eyePos = MV * light.worldPos; light.eyePos = glm::vec3(eyePos / eyePos.w); } hipMemcpy(dev_lights, lights.data(), lights.size() * sizeof(Light), hipMemcpyHostToDevice); #if CEL_SHADE > 0 dim3 sobelBlockSize2d(SOBEL_GRID, SOBEL_GRID); dim3 sobelBlockCount2d((width - 1) / sobelBlockSize2d.x + 1, (height - 1) / sobelBlockSize2d.y + 1); #if USE_SHARED_SOBEL == 1 calculateSobelWithShared<< <sobelBlockCount2d, sobelBlockSize2d >> >(width, height, dev_fragmentBuffer); #else calculateSobel<< <sobelBlockCount2d, sobelBlockSize2d >> >(width, height, dev_fragmentBuffer); #endif checkCUDAError("Sobel"); #endif // Copy depthbuffer colors into framebuffer render << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentBuffer, dev_framebuffer, lights.size(), dev_lights); checkCUDAError("fragment shader"); // Copy framebuffer into OpenGL buffer for OpenGL previewing sendImageToPBO << <blockCount2d, blockSize2d >> >(pbo, width, height, dev_framebuffer); checkCUDAError("copy render result to pbo"); } /** * Called once at the end of the program to free CUDA memory. */ void rasterizeFree() { // deconstruct primitives attribute/indices device buffer auto it(mesh2PrimitivesMap.begin()); auto itEnd(mesh2PrimitivesMap.end()); for (; it != itEnd; ++it) { for (auto p = it->second.begin(); p != it->second.end(); ++p) { hipFree(p->dev_indices); hipFree(p->dev_position); hipFree(p->dev_normal); hipFree(p->dev_texcoord0); #if TEXTURE_MAP == 1 hipFree(p->dev_diffuseTex); #endif hipFree(p->dev_verticesOut); } } //////////// hipFree(dev_primitives); dev_primitives = NULL; hipFree(dev_fragmentBuffer); dev_fragmentBuffer = NULL; hipFree(dev_framebuffer); dev_framebuffer = NULL; hipFree(dev_lights); dev_lights = NULL; checkCUDAError("rasterize Free"); }
742653d8feafc81ede93facf5df275b887d0f7dc.cu
/** * @file rasterize.cu * @brief CUDA-accelerated rasterization pipeline. * @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek) * @date 2012-2016 * @copyright University of Pennsylvania & STUDENT */ #include <cmath> #include <cstdio> #include <cuda.h> #include <cuda_runtime.h> #include <thrust/device_ptr.h> #include <thrust/random.h> #include <thrust/remove.h> #include <util/checkCUDAError.h> #include <util/tiny_gltf_loader.h> #include "rasterizeTools.h" #include "rasterize.h" #include <glm/gtc/quaternion.hpp> #include <glm/gtc/matrix_transform.hpp> #include <iostream> #include <vector> #define TEXTURE_MAP 1 #define PERSPECTIVE_CORRECT 1 #define BILINEAR_INTERPOLATION 1 #define BACKFACE_CULL 1 #define NORMAL_INTERPOLATE 1 #define CEL_SHADE 4 #define SOBEL_GRID 8 #define USE_SHARED_SOBEL 1 namespace rasterizer { typedef unsigned short VertexIndex; typedef glm::vec3 VertexAttributePosition; typedef glm::vec3 VertexAttributeNormal; typedef glm::vec2 VertexAttributeTexcoord; typedef unsigned char TextureData; typedef unsigned char BufferByte; enum PrimitiveType{ Point = 1, Line = 2, Triangle = 3 }; struct VertexOut { glm::vec4 pos; // TODO: add new attributes to your VertexOut // The attributes listed below might be useful, // but always feel free to modify on your own glm::vec3 eyePos; // eye space position used for shading glm::vec3 eyeNor; // eye space normal used for shading, cuz normal will go wrong after perspective transformation // glm::vec3 col; #if TEXTURE_MAP == 1 glm::vec2 texcoord0; TextureData* dev_diffuseTex = NULL; int texWidth, texHeight, texComp; #endif // ... }; struct Primitive { PrimitiveType primitiveType = Triangle; // C++ 11 init VertexOut v[3]; }; struct Fragment { glm::vec3 color; // TODO: add new attributes to your Fragment // The attributes listed below might be useful, // but always feel free to modify on your own glm::vec3 eyePos; // eye space position used for shading glm::vec3 eyeNor; float z; float sobelx; float sobely; TextureData * diffuseTex; int texWidth; int texHeight; int texComp; glm::vec2 texcoord0; }; struct FragmentMutex { int mutex; }; struct PrimitiveDevBufPointers { int primitiveMode; //from tinygltfloader macro PrimitiveType primitiveType; int numPrimitives; int numIndices; int numVertices; // Vertex In, const after loaded VertexIndex* dev_indices; VertexAttributePosition* dev_position; VertexAttributeNormal* dev_normal; VertexAttributeTexcoord* dev_texcoord0; // Materials, add more attributes when needed #if TEXTURE_MAP == 1 TextureData* dev_diffuseTex; int texWidth; int texHeight; int texComp; #endif // TextureData* dev_specularTex; // TextureData* dev_normalTex; // ... // Vertex Out, vertex used for rasterization, this is changing every frame VertexOut* dev_verticesOut; // TODO: add more attributes when needed }; } using namespace rasterizer; struct Light { glm::vec4 worldPos; glm::vec3 eyePos; float emittance; Light(glm::vec4 worldPos, float emittance) { this->worldPos = worldPos; this->emittance = emittance; } }; static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap; static int width = 0; static int height = 0; #define AMBIENT_LIGHT 0.2f std::vector<Light> lights = { Light(glm::vec4(0.0f, 10.0f, 4.0f, 1.0f), 1.0f) }; static int totalNumPrimitives = 0; static Primitive *dev_primitives = NULL; static Fragment *dev_fragmentBuffer = NULL; static glm::vec3 *dev_framebuffer = NULL; static Light *dev_lights = NULL; static FragmentMutex *dev_fragmentMutexes = NULL; /** * Kernel that writes the image to the OpenGL PBO directly. */ __global__ void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); if (x < w && y < h) { glm::vec3 color; color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0; color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0; color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0; // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } /** * Called once at the beginning of the program to allocate memory. */ void rasterizeInit(int w, int h) { width = w; height = h; cudaFree(dev_fragmentBuffer); cudaMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment)); cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment)); cudaFree(dev_framebuffer); cudaMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3)); cudaMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3)); cudaFree(dev_lights); cudaMalloc(&dev_lights, lights.size() * sizeof(Light)); cudaFree(dev_fragmentMutexes); cudaMalloc(&dev_fragmentMutexes, width * height * sizeof(FragmentMutex)); checkCUDAError("rasterizeInit"); } __global__ void initMutexes(int w, int h, FragmentMutex * mutexes, Fragment * fragments) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < w && y < h) { int index = x + (y * w); mutexes[index].mutex = 0; fragments[index].z = FLT_MAX; } } /** * kern function with support for stride to sometimes replace cudaMemcpy * One thread is responsible for copying one component */ __global__ void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset, int componentTypeByteSize) { // Attribute (vec3 position) // component (3 * float) // byte (4 * byte) // id of component int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < N) { int count = i / n; int offset = i - count * n; // which component of the attribute for (int j = 0; j < componentTypeByteSize; j++) { dev_dst[count * componentTypeByteSize * n + offset * componentTypeByteSize + j] = dev_src[byteOffset + count * (byteStride == 0 ? componentTypeByteSize * n : byteStride) + offset * componentTypeByteSize + j]; } } } __global__ void _nodeMatrixTransform( int numVertices, VertexAttributePosition* position, VertexAttributeNormal* normal, glm::mat4 MV, glm::mat3 MV_normal) { // vertex id int vid = (blockIdx.x * blockDim.x) + threadIdx.x; if (vid < numVertices) { position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f)); normal[vid] = glm::normalize(MV_normal * normal[vid]); } } glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node & n) { glm::mat4 curMatrix(1.0); const std::vector<double> &m = n.matrix; if (m.size() > 0) { // matrix, copy it for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { curMatrix[i][j] = (float)m.at(4 * i + j); } } } else { // no matrix, use rotation, scale, translation if (n.translation.size() > 0) { curMatrix[3][0] = n.translation[0]; curMatrix[3][1] = n.translation[1]; curMatrix[3][2] = n.translation[2]; } if (n.rotation.size() > 0) { glm::mat4 R; glm::quat q; q[0] = n.rotation[0]; q[1] = n.rotation[1]; q[2] = n.rotation[2]; R = glm::mat4_cast(q); curMatrix = curMatrix * R; } if (n.scale.size() > 0) { curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2])); } } return curMatrix; } void traverseNode( std::map<std::string, glm::mat4> & n2m, const tinygltf::Scene & scene, const std::string & nodeString, const glm::mat4 & parentMatrix ) { const tinygltf::Node & n = scene.nodes.at(nodeString); glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n); n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M)); auto it = n.children.begin(); auto itEnd = n.children.end(); for (; it != itEnd; ++it) { traverseNode(n2m, scene, *it, M); } } void rasterizeSetBuffers(const tinygltf::Scene & scene) { totalNumPrimitives = 0; std::map<std::string, BufferByte*> bufferViewDevPointers; // 1. copy all `bufferViews` to device memory { std::map<std::string, tinygltf::BufferView>::const_iterator it( scene.bufferViews.begin()); std::map<std::string, tinygltf::BufferView>::const_iterator itEnd( scene.bufferViews.end()); for (; it != itEnd; it++) { const std::string key = it->first; const tinygltf::BufferView &bufferView = it->second; if (bufferView.target == 0) { continue; // Unsupported bufferView. } const tinygltf::Buffer &buffer = scene.buffers.at(bufferView.buffer); BufferByte* dev_bufferView; cudaMalloc(&dev_bufferView, bufferView.byteLength); cudaMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength, cudaMemcpyHostToDevice); checkCUDAError("Set BufferView Device Mem"); bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView)); } } // 2. for each mesh: // for each primitive: // build device buffer of indices, materail, and each attributes // and store these pointers in a map { std::map<std::string, glm::mat4> nodeString2Matrix; auto rootNodeNamesList = scene.scenes.at(scene.defaultScene); { auto it = rootNodeNamesList.begin(); auto itEnd = rootNodeNamesList.end(); for (; it != itEnd; ++it) { traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f)); } } // parse through node to access mesh auto itNode = nodeString2Matrix.begin(); auto itEndNode = nodeString2Matrix.end(); for (; itNode != itEndNode; ++itNode) { const tinygltf::Node & N = scene.nodes.at(itNode->first); const glm::mat4 & matrix = itNode->second; const glm::mat3 & matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix))); auto itMeshName = N.meshes.begin(); auto itEndMeshName = N.meshes.end(); for (; itMeshName != itEndMeshName; ++itMeshName) { const tinygltf::Mesh & mesh = scene.meshes.at(*itMeshName); auto res = mesh2PrimitivesMap.insert(std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>())); std::vector<PrimitiveDevBufPointers> & primitiveVector = (res.first)->second; // for each primitive for (size_t i = 0; i < mesh.primitives.size(); i++) { const tinygltf::Primitive &primitive = mesh.primitives[i]; if (primitive.indices.empty()) return; VertexIndex* dev_indices; VertexAttributePosition* dev_position; VertexAttributeNormal* dev_normal; VertexAttributeTexcoord* dev_texcoord0; // ----------Indices------------- const tinygltf::Accessor &indexAccessor = scene.accessors.at(primitive.indices); const tinygltf::BufferView &bufferView = scene.bufferViews.at(indexAccessor.bufferView); BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView); // assume type is SCALAR for indices int n = 1; int numIndices = indexAccessor.count; int componentTypeByteSize = sizeof(VertexIndex); int byteLength = numIndices * n * componentTypeByteSize; dim3 numThreadsPerBlock(128); dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); cudaMalloc(&dev_indices, byteLength); _deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > ( numIndices, (BufferByte*)dev_indices, dev_bufferView, n, indexAccessor.byteStride, indexAccessor.byteOffset, componentTypeByteSize); checkCUDAError("Set Index Buffer"); // ---------Primitive Info------- // Warning: LINE_STRIP is not supported in tinygltfloader int numPrimitives; PrimitiveType primitiveType; switch (primitive.mode) { case TINYGLTF_MODE_TRIANGLES: primitiveType = PrimitiveType::Triangle; numPrimitives = numIndices / 3; break; case TINYGLTF_MODE_TRIANGLE_STRIP: primitiveType = PrimitiveType::Triangle; numPrimitives = numIndices - 2; break; case TINYGLTF_MODE_TRIANGLE_FAN: primitiveType = PrimitiveType::Triangle; numPrimitives = numIndices - 2; break; case TINYGLTF_MODE_LINE: primitiveType = PrimitiveType::Line; numPrimitives = numIndices / 2; break; case TINYGLTF_MODE_LINE_LOOP: primitiveType = PrimitiveType::Line; numPrimitives = numIndices + 1; break; case TINYGLTF_MODE_POINTS: primitiveType = PrimitiveType::Point; numPrimitives = numIndices; break; default: // output error break; }; // ----------Attributes------------- auto it(primitive.attributes.begin()); auto itEnd(primitive.attributes.end()); int numVertices = 0; // for each attribute for (; it != itEnd; it++) { const tinygltf::Accessor &accessor = scene.accessors.at(it->second); const tinygltf::BufferView &bufferView = scene.bufferViews.at(accessor.bufferView); int n = 1; if (accessor.type == TINYGLTF_TYPE_SCALAR) { n = 1; } else if (accessor.type == TINYGLTF_TYPE_VEC2) { n = 2; } else if (accessor.type == TINYGLTF_TYPE_VEC3) { n = 3; } else if (accessor.type == TINYGLTF_TYPE_VEC4) { n = 4; } BufferByte * dev_bufferView = bufferViewDevPointers.at(accessor.bufferView); BufferByte ** dev_attribute = NULL; numVertices = accessor.count; int componentTypeByteSize; // Note: since the type of our attribute array (dev_position) is static (float32) // We assume the glTF model attribute type are 5126(FLOAT) here if (it->first.compare("POSITION") == 0) { componentTypeByteSize = sizeof(VertexAttributePosition) / n; dev_attribute = (BufferByte**)&dev_position; } else if (it->first.compare("NORMAL") == 0) { componentTypeByteSize = sizeof(VertexAttributeNormal) / n; dev_attribute = (BufferByte**)&dev_normal; } else if (it->first.compare("TEXCOORD_0") == 0) { componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n; dev_attribute = (BufferByte**)&dev_texcoord0; } std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n'; dim3 numThreadsPerBlock(128); dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); int byteLength = numVertices * n * componentTypeByteSize; cudaMalloc(dev_attribute, byteLength); _deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > ( n * numVertices, *dev_attribute, dev_bufferView, n, accessor.byteStride, accessor.byteOffset, componentTypeByteSize); std::string msg = "Set Attribute Buffer: " + it->first; checkCUDAError(msg.c_str()); } // malloc for VertexOut VertexOut* dev_vertexOut; cudaMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut)); checkCUDAError("Malloc VertexOut Buffer"); // ----------Materials------------- // You can only worry about this part once you started to // implement textures for your rasterizer TextureData* dev_diffuseTex = NULL; #if TEXTURE_MAP == 1 int texWidth = 0; int texHeight = 0; int texComp = 0; #endif if (!primitive.material.empty()) { const tinygltf::Material &mat = scene.materials.at(primitive.material); printf("material.name = %s\n", mat.name.c_str()); if (mat.values.find("diffuse") != mat.values.end()) { std::string diffuseTexName = mat.values.at("diffuse").string_value; if (scene.textures.find(diffuseTexName) != scene.textures.end()) { const tinygltf::Texture &tex = scene.textures.at(diffuseTexName); if (scene.images.find(tex.source) != scene.images.end()) { const tinygltf::Image &image = scene.images.at(tex.source); size_t s = image.image.size() * sizeof(TextureData); cudaMalloc(&dev_diffuseTex, s); cudaMemcpy(dev_diffuseTex, &image.image.at(0), s, cudaMemcpyHostToDevice); #if TEXTURE_MAP == 1 texWidth = image.width; texHeight = image.height; texComp = image.component; #endif checkCUDAError("Set Texture Image data"); } } } // TODO: write your code for other materails // You may have to take a look at tinygltfloader // You can also use the above code loading diffuse material as a start point } // ---------Node hierarchy transform-------- cudaDeviceSynchronize(); dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); _nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> > ( numVertices, dev_position, dev_normal, matrix, matrixNormal); checkCUDAError("Node hierarchy transformation"); // at the end of the for loop of primitive // push dev pointers to map primitiveVector.push_back(PrimitiveDevBufPointers{ primitive.mode, primitiveType, numPrimitives, numIndices, numVertices, dev_indices, dev_position, dev_normal, dev_texcoord0, #if TEXTURE_MAP == 1 dev_diffuseTex, texWidth, texHeight, texComp, #endif dev_vertexOut //VertexOut }); totalNumPrimitives += numPrimitives; } // for each primitive } // for each mesh } // for each node } // 3. Malloc for dev_primitives { cudaMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive)); } // Finally, cudaFree raw dev_bufferViews { std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin()); std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end()); //bufferViewDevPointers for (; it != itEnd; it++) { cudaFree(it->second); } checkCUDAError("Free BufferView Device Mem"); } } __global__ void _vertexTransformAndAssembly( int numVertices, PrimitiveDevBufPointers primitive, glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal, int width, int height) { // vertex id int vid = (blockIdx.x * blockDim.x) + threadIdx.x; if (vid < numVertices) { VertexOut & vout = primitive.dev_verticesOut[vid]; VertexAttributePosition & vpos = primitive.dev_position[vid]; // Multiply the MVP matrix for each vertex position, this will transform everything into clipping space // Then divide the pos by its w element to transform into NDC space // Finally transform x and y to viewport space vout.pos = MVP * glm::vec4(vpos, 1.0f); if (fabs(vout.pos.w) > EPSILON) vout.pos /= vout.pos.w; vout.pos.x = 0.5f * (float)width * (vout.pos.x + 1.0f); vout.pos.y = 0.5f * (float)height * (vout.pos.y + 1.0f); // Assemble all attribute arraies into the primitive array VertexAttributeNormal & vnorm = primitive.dev_normal[vid]; glm::vec4 eyePos = MV * glm::vec4(vpos, 1.0f); if (fabs(eyePos.w) > EPSILON) vout.eyePos = glm::vec3(eyePos / eyePos.w); vout.eyeNor = glm::normalize(MV_normal * vnorm); #if TEXTURE_MAP == 1 //Textures if (primitive.dev_diffuseTex != NULL) { vout.texcoord0 = primitive.dev_texcoord0[vid]; } vout.dev_diffuseTex = primitive.dev_diffuseTex; vout.texWidth = primitive.texWidth; vout.texHeight = primitive.texHeight; vout.texComp = primitive.texComp; #endif } } static int curPrimitiveBeginId = 0; __global__ void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives, PrimitiveDevBufPointers primitive) { // index id int iid = (blockIdx.x * blockDim.x) + threadIdx.x; if (iid < numIndices) { // This is primitive assembly for triangles int pid; // id for cur primitives vector if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES) { pid = iid / (int)primitive.primitiveType; dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType] = primitive.dev_verticesOut[primitive.dev_indices[iid]]; } // TODO: other primitive types (point, line) } } __device__ __host__ int clamp_int(int mn, int x, int mx) { if (x > mx) return mx; if (x < mn) return mn; return x; } __device__ __host__ glm::vec3 getPixel(int x, int y, int width, int height, int components, TextureData * tex) { if (x >= width || y >= height || x < 0 || y < 0) { return glm::vec3(0, 0, 0); } int texIdx = y * width + x; return (1.0f / 255.0f) * glm::vec3(tex[components * texIdx], tex[components * texIdx + 1], tex[components * texIdx + 2]); } __global__ void kernRasterize(int numPrimitives, Primitive* dev_primitives, int width, int height, Fragment* fragmentBuffer, FragmentMutex* mutexes) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < numPrimitives) { Primitive & p = dev_primitives[index]; VertexOut & firstVertex = p.v[0]; glm::vec3 triangle[3] = { glm::vec3(p.v[0].pos), glm::vec3(p.v[1].pos), glm::vec3(p.v[2].pos) }; AABB boundingBox = getAABBForTriangle(triangle); int minxpix = clamp_int(0, boundingBox.min.x, width - 1); int minypix = clamp_int(0, boundingBox.min.y, height - 1); int maxxpix = clamp_int(0, boundingBox.max.x, width - 1); int maxypix = clamp_int(0, boundingBox.max.y, height - 1); for (int y = minypix; y <= maxypix; y++) { for (int x = minxpix; x <= maxxpix; x++) { int fragIdx = (height - 1 - y) * width + (width - 1 - x); Fragment & fragment = fragmentBuffer[fragIdx]; glm::vec3 baryCoords = calculateBarycentricCoordinate(triangle, glm::vec2(x, y)); if (isBarycentricCoordInBounds(baryCoords)) { float pos = glm::dot(baryCoords, glm::vec3(p.v[0].pos.z, p.v[1].pos.z, p.v[2].pos.z)); bool isSet; do { isSet = atomicCAS(&mutexes[fragIdx].mutex, 0, 1) == 0; if (isSet) { if (pos < fragment.z) { fragment.z = pos; #if TEXTURE_MAP == 1 if (p.v[0].dev_diffuseTex == NULL) { fragment.color = glm::vec3(1.0f, 1.0f, 1.0f); // white fragment.diffuseTex = NULL; } else { #if PERSPECTIVE_CORRECT == 1 glm::vec3 perspectiveBaryCoords = glm::vec3(baryCoords.x / p.v[0].eyePos.z, baryCoords.y / p.v[1].eyePos.z, baryCoords.z / p.v[2].eyePos.z); float scaleFactor = (1.0f / (perspectiveBaryCoords.x + perspectiveBaryCoords.y + perspectiveBaryCoords.z)); fragment.texcoord0 = glm::mat3x2(p.v[0].texcoord0, p.v[1].texcoord0, p.v[2].texcoord0) * perspectiveBaryCoords * scaleFactor; #else fragment.texcoord0 = glm::mat3x2(p.v[0].texcoord0, p.v[1].texcoord0, p.v[2].texcoord0) * baryCoords; #endif fragment.texWidth = firstVertex.texWidth; fragment.texHeight = firstVertex.texHeight; fragment.texComp = firstVertex.texComp; fragment.diffuseTex = firstVertex.dev_diffuseTex; } #else fragment.color = glm::vec3(1.0f, 1.0f, 1.0f); // white #endif fragment.eyePos = glm::mat3(p.v[0].eyePos, p.v[1].eyePos, p.v[2].eyePos) * baryCoords; #if NORMAL_INTERPOLATE == 1 fragment.eyeNor = glm::mat3(p.v[0].eyeNor, p.v[1].eyeNor, p.v[2].eyeNor) * baryCoords; #else fragment.eyeNor = glm::normalize(glm::cross( glm::vec3(p.v[1].eyeNor - p.v[0].eyeNor), glm::vec3(p.v[2].eyeNor - p.v[0].eyeNor) )); #endif } } if (isSet) { mutexes[fragIdx].mutex = 0; } } while (pos < fragment.z && !isSet); } } } } } __global__ void kernTextureShader(int width, int height, Fragment* fragmentBuffer) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * width); if (x < width && y < height) { Fragment & fragment = fragmentBuffer[index]; if (fragment.diffuseTex != NULL) { float texx = 0.5f + fragment.texcoord0.x * (fragment.texWidth - 1); float texy = 0.5f + fragment.texcoord0.y * (fragment.texHeight - 1); #if BILINEAR_INTERPOLATION == 1 float x1 = glm::floor(texx); float y1 = glm::floor(texy); glm::vec3 c11 = getPixel(x1, y1, fragment.texWidth, fragment.texHeight, fragment.texComp, fragment.diffuseTex); glm::vec3 c12 = getPixel(x1, y1 + 1, fragment.texWidth, fragment.texHeight, fragment.texComp, fragment.diffuseTex); glm::vec3 c21 = getPixel(x1 + 1, y1, fragment.texWidth, fragment.texHeight, fragment.texComp, fragment.diffuseTex); glm::vec3 c22 = getPixel(x1 + 1, y1 + 1, fragment.texWidth, fragment.texHeight, fragment.texComp, fragment.diffuseTex); glm::vec3 r1 = (texx - x1) * c21 + (1.0f + x1 - texx) * c11; glm::vec3 r2 = (texx - x1) * c22 + (1.0f + x1 - texx) * c12; fragment.color = (texy - y1) * r2 + (1.0f + y1 - texy) * r1; #else fragment.color = getPixel(texx, texy, fragment.texWidth, fragment.texHeight, fragment.texComp, fragment.diffuseTex); #endif } } } struct IsBackfacing { __host__ __device__ bool operator () (const Primitive & p) { glm::vec3 normal = glm::normalize(glm::cross( glm::vec3(p.v[1].pos - p.v[0].pos), glm::vec3(p.v[2].pos - p.v[0].pos))); return normal.z < -0; } }; __global__ void calculateSobel(int w, int h, Fragment * fragmentBuffer) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); float sobelKernel[3][3] = { { -1, 0, 1 }, { -2, 0, 2 }, { -1, 0, 1 } }; if (x < w && y < h) { Fragment & fragment = fragmentBuffer[index]; for (int i = -1; i <= 1; i++) { for (int j = -1; j <= 1; j++) { if (x + i < w && x + i >= 0 && y + j < h && y + j >= 0) { int sobelIdx = x + i + ((y + j) * w); float dist = (fragmentBuffer[sobelIdx].z > 1e12) ? 1e12 : glm::length(fragmentBuffer[sobelIdx].eyePos); fragment.sobelx += sobelKernel[i + 1][j + 1] * dist; fragment.sobely += sobelKernel[j + 1][i + 1] * dist; } } } } } __global__ void calculateSobelWithShared(int w, int h, Fragment * fragmentBuffer) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); __shared__ float tile[SOBEL_GRID][SOBEL_GRID]; __shared__ float sobelx[SOBEL_GRID][SOBEL_GRID]; __shared__ float sobely[SOBEL_GRID][SOBEL_GRID]; float sobelKernel[3][3] = { { 3, 0, -3 }, { 10, 0, -10 }, { 3, 0, -3 } }; if (x < w && y < h) { int bx = threadIdx.x; int by = threadIdx.y; Fragment & fragment = fragmentBuffer[index]; tile[bx][by] = (fragment.z > 1e12) ? 1e12 : glm::length(fragment.eyePos); sobelx[bx][by] = 0; sobely[bx][by] = 0; __syncthreads(); for (int i = -1; i <= 1; i++) { for (int j = -1; j <= 1; j++) { if (bx + i < SOBEL_GRID && bx + i >= 0 && by + j < SOBEL_GRID && by + j >= 0) { sobelx[bx][by] += sobelKernel[i + 1][j + 1] * tile[bx + i][by + j]; sobely[bx][by] += sobelKernel[j + 1][i + 1] * tile[bx + i][by + j]; } else { if (x + i < w && x + i >= 0 && y + j < h && y + j >= 0) { int sobelIdx = x + i + ((y + j) * w); float dist = (fragmentBuffer[sobelIdx].z > 1e12) ? 1e12 : glm::length(fragmentBuffer[sobelIdx].eyePos); sobelx[bx][by] += sobelKernel[i + 1][j + 1] * dist; sobely[bx][by] += sobelKernel[j + 1][i + 1] * dist; } } } } fragment.sobelx = sobelx[bx][by]; fragment.sobely = sobely[bx][by]; } } /** * Writes fragment colors to the framebuffer */ __global__ void render(int w, int h, Fragment *fragmentBuffer, glm::vec3 *framebuffer, int numLights, Light *lights) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * w); if (x < w && y < h) { Fragment & fragment = fragmentBuffer[index]; if (fragment.z < 1e12) { float totalLight = AMBIENT_LIGHT; // Lambert shading for (int i = 0; i < numLights; i++) { Light & light = lights[i]; totalLight += light.emittance * glm::max(0.0f, glm::dot(fragment.eyeNor, glm::normalize(light.eyePos - fragment.eyePos))); } framebuffer[index] = totalLight * fragment.color; #if CEL_SHADE > 0 framebuffer[index] = glm::ceil(framebuffer[index] * (float)CEL_SHADE) / (float)CEL_SHADE; float sobel = glm::sqrt(fragment.sobelx * fragment.sobelx + fragment.sobely * fragment.sobely); if (sobel > 15.0f) framebuffer[index] = glm::vec3(0.0f, 0.0f, 0.0f); #endif } else { framebuffer[index] = glm::vec3(0.5f, 0.8f, 1.0f); } } } /** * Perform rasterization. */ void rasterize(uchar4 *pbo, const glm::mat4 & MVP, const glm::mat4 & MV, const glm::mat3 MV_normal) { int sideLength2d = 8; dim3 blockSize2d(sideLength2d, sideLength2d); dim3 blockCount2d((width - 1) / blockSize2d.x + 1, (height - 1) / blockSize2d.y + 1); // Execute your rasterization pipeline here // (See README for rasterization pipeline outline.) // Vertex Process & primitive assembly { curPrimitiveBeginId = 0; dim3 numThreadsPerBlock(128); auto it = mesh2PrimitivesMap.begin(); auto itEnd = mesh2PrimitivesMap.end(); for (; it != itEnd; ++it) { auto p = (it->second).begin(); // each primitive auto pEnd = (it->second).end(); for (; p != pEnd; ++p) { dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); _vertexTransformAndAssembly << < numBlocksForVertices, numThreadsPerBlock >> >(p->numVertices, *p, MVP, MV, MV_normal, width, height); checkCUDAError("Vertex Processing"); cudaDeviceSynchronize(); _primitiveAssembly << < numBlocksForIndices, numThreadsPerBlock >> > (p->numIndices, curPrimitiveBeginId, dev_primitives, *p); checkCUDAError("Primitive Assembly"); curPrimitiveBeginId += p->numPrimitives; } } checkCUDAError("Vertex Processing and Primitive Assembly"); } cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment)); initMutexes << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentMutexes, dev_fragmentBuffer); checkCUDAError("init mutexes"); int numPrimitives = totalNumPrimitives; // Backface culling #if BACKFACE_CULL == 1 thrust::device_ptr<Primitive> dev_thrust_primitives(dev_primitives); thrust::device_ptr<Primitive> dev_thrust_primitivesEnd = thrust::remove_if(dev_thrust_primitives, dev_thrust_primitives + numPrimitives, IsBackfacing()); numPrimitives = dev_thrust_primitivesEnd - dev_thrust_primitives; printf("%d triangles\n", numPrimitives); checkCUDAError("backface culling"); #endif // Rasterization dim3 numThreadsPerBlock(64); dim3 numBlocksForPrimitives((numPrimitives + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x); kernRasterize << < numBlocksForPrimitives, numThreadsPerBlock >> >( numPrimitives, dev_primitives, width, height, dev_fragmentBuffer, dev_fragmentMutexes); checkCUDAError("rasterizer"); // Filling texture colors #if TEXTURE_MAP == 1 kernTextureShader << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentBuffer); checkCUDAError("textureShader"); #endif // Offline light transformation, since there aren't many lights for (Light & light : lights) { glm::vec4 eyePos = MV * light.worldPos; light.eyePos = glm::vec3(eyePos / eyePos.w); } cudaMemcpy(dev_lights, lights.data(), lights.size() * sizeof(Light), cudaMemcpyHostToDevice); #if CEL_SHADE > 0 dim3 sobelBlockSize2d(SOBEL_GRID, SOBEL_GRID); dim3 sobelBlockCount2d((width - 1) / sobelBlockSize2d.x + 1, (height - 1) / sobelBlockSize2d.y + 1); #if USE_SHARED_SOBEL == 1 calculateSobelWithShared<< <sobelBlockCount2d, sobelBlockSize2d >> >(width, height, dev_fragmentBuffer); #else calculateSobel<< <sobelBlockCount2d, sobelBlockSize2d >> >(width, height, dev_fragmentBuffer); #endif checkCUDAError("Sobel"); #endif // Copy depthbuffer colors into framebuffer render << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentBuffer, dev_framebuffer, lights.size(), dev_lights); checkCUDAError("fragment shader"); // Copy framebuffer into OpenGL buffer for OpenGL previewing sendImageToPBO << <blockCount2d, blockSize2d >> >(pbo, width, height, dev_framebuffer); checkCUDAError("copy render result to pbo"); } /** * Called once at the end of the program to free CUDA memory. */ void rasterizeFree() { // deconstruct primitives attribute/indices device buffer auto it(mesh2PrimitivesMap.begin()); auto itEnd(mesh2PrimitivesMap.end()); for (; it != itEnd; ++it) { for (auto p = it->second.begin(); p != it->second.end(); ++p) { cudaFree(p->dev_indices); cudaFree(p->dev_position); cudaFree(p->dev_normal); cudaFree(p->dev_texcoord0); #if TEXTURE_MAP == 1 cudaFree(p->dev_diffuseTex); #endif cudaFree(p->dev_verticesOut); } } //////////// cudaFree(dev_primitives); dev_primitives = NULL; cudaFree(dev_fragmentBuffer); dev_fragmentBuffer = NULL; cudaFree(dev_framebuffer); dev_framebuffer = NULL; cudaFree(dev_lights); dev_lights = NULL; checkCUDAError("rasterize Free"); }
2b314eacc231f5bd35acfd9ae8f82f9af87cbc47.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cuda_runtime.h> #include<stdio.h> #include<sys/time.h> #include<stdint.h> #define CHECK(call)\ {\ const hipError_t error = call;\ if (error != hipSuccess)\ {\ printf("Error: %s, %d\n", __FILE__, __LINE__);\ printf("Code: %d, reason: %s\n", error, hipGetErrorString(error));\ exit(0);\ }\ }\ void mat_sum(float *a, float *b, float *c, const int x, const int y) { float *aa = a; float *bb = b, *cc = c; for (int i=0; i< y; i++) { for(int j=0; j<x; j++) cc[j] = aa[j] + bb[j]; aa += x; bb += x; cc += x; } } __global__ void mat_sum_g(float *a, float *b, float *c, int x, int y) { unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; unsigned int j = blockIdx.y; unsigned int idx = i + j * x; if (i < x && j < y) c[idx] = a[idx] + b[idx]; } void init_data(float *inp, int n) { time_t t; srand((unsigned) time(&t)); for(int i=0; i<n; i++) inp[i] = (float)(rand() & 0xFF) / 10.f; } double cpu_sec() { struct timeval tp; gettimeofday(&tp, NULL);\ return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } void check_mat(float *c, float *g, int n) { double epsilon = 1.0E-8; int match = 1; for (int i = 0; i < n; i++) { if (abs(c[i] - g[i]) > epsilon) { match = 0; printf("Don't match!\n"); printf("host %5.2f device %5.2f at current %d\n", c[i], g[i], i); break; } } if (match) printf("Array match\n\n"); return; } int main() { int dev = 0; hipDeviceProp_t devp; CHECK(hipGetDeviceProperties(&devp, dev)); printf("Device %d: %s\n", dev, devp.name); CHECK(hipSetDevice(dev)); int nx = 1 << 7, ny = 1 << 7; int n = nx * ny, nbytes = n * sizeof(float); printf("Mat Size: x %d, y %d\n", nx, ny); float *ha, *hb, *cpu, *gpu; ha = (float *)malloc(nbytes); hb = (float *)malloc(nbytes); cpu = (float *)malloc(nbytes); gpu = (float *)malloc(nbytes); init_data(ha, n); init_data(hb, n); memset(cpu, 0, nbytes); memset(gpu, 0, nbytes); float *da, *db, *dc; hipMalloc((float **)&da, nbytes); hipMalloc((float **)&db, nbytes); hipMalloc((float **)&dc, nbytes); hipMemcpy(da, ha, nbytes, hipMemcpyHostToDevice); hipMemcpy(db, hb, nbytes, hipMemcpyHostToDevice); double start, duration; start = cpu_sec(); mat_sum(ha, hb, cpu, nx, ny); duration = cpu_sec() - start; printf("Mat sum cpu time cost %f ms\n", duration*1000); dim3 block(32); dim3 grid((nx+31)/32,ny); start = cpu_sec(); hipLaunchKernelGGL(( mat_sum_g), dim3(grid), dim3(block), 0, 0, da, db, dc,nx, ny); hipDeviceSynchronize(); duration = cpu_sec() - start; printf("Mat sum GPU<<<(%d,%d), (%d,%d)>>> time cost %f ms\n", grid.x, grid.y, block.x, block.y, duration*1000); hipMemcpy(gpu, dc, nbytes, hipMemcpyDeviceToHost); check_mat(cpu, gpu, n); hipFree(da); hipFree(db); hipFree(dc); free(ha); free(hb); free(cpu); free(gpu); hipDeviceReset(); int c = getchar(); return 0; }
2b314eacc231f5bd35acfd9ae8f82f9af87cbc47.cu
#include<cuda_runtime.h> #include<stdio.h> #include<sys/time.h> #include<stdint.h> #define CHECK(call)\ {\ const cudaError_t error = call;\ if (error != cudaSuccess)\ {\ printf("Error: %s, %d\n", __FILE__, __LINE__);\ printf("Code: %d, reason: %s\n", error, cudaGetErrorString(error));\ exit(0);\ }\ }\ void mat_sum(float *a, float *b, float *c, const int x, const int y) { float *aa = a; float *bb = b, *cc = c; for (int i=0; i< y; i++) { for(int j=0; j<x; j++) cc[j] = aa[j] + bb[j]; aa += x; bb += x; cc += x; } } __global__ void mat_sum_g(float *a, float *b, float *c, int x, int y) { unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; unsigned int j = blockIdx.y; unsigned int idx = i + j * x; if (i < x && j < y) c[idx] = a[idx] + b[idx]; } void init_data(float *inp, int n) { time_t t; srand((unsigned) time(&t)); for(int i=0; i<n; i++) inp[i] = (float)(rand() & 0xFF) / 10.f; } double cpu_sec() { struct timeval tp; gettimeofday(&tp, NULL);\ return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } void check_mat(float *c, float *g, int n) { double epsilon = 1.0E-8; int match = 1; for (int i = 0; i < n; i++) { if (abs(c[i] - g[i]) > epsilon) { match = 0; printf("Don't match!\n"); printf("host %5.2f device %5.2f at current %d\n", c[i], g[i], i); break; } } if (match) printf("Array match\n\n"); return; } int main() { int dev = 0; cudaDeviceProp devp; CHECK(cudaGetDeviceProperties(&devp, dev)); printf("Device %d: %s\n", dev, devp.name); CHECK(cudaSetDevice(dev)); int nx = 1 << 7, ny = 1 << 7; int n = nx * ny, nbytes = n * sizeof(float); printf("Mat Size: x %d, y %d\n", nx, ny); float *ha, *hb, *cpu, *gpu; ha = (float *)malloc(nbytes); hb = (float *)malloc(nbytes); cpu = (float *)malloc(nbytes); gpu = (float *)malloc(nbytes); init_data(ha, n); init_data(hb, n); memset(cpu, 0, nbytes); memset(gpu, 0, nbytes); float *da, *db, *dc; cudaMalloc((float **)&da, nbytes); cudaMalloc((float **)&db, nbytes); cudaMalloc((float **)&dc, nbytes); cudaMemcpy(da, ha, nbytes, cudaMemcpyHostToDevice); cudaMemcpy(db, hb, nbytes, cudaMemcpyHostToDevice); double start, duration; start = cpu_sec(); mat_sum(ha, hb, cpu, nx, ny); duration = cpu_sec() - start; printf("Mat sum cpu time cost %f ms\n", duration*1000); dim3 block(32); dim3 grid((nx+31)/32,ny); start = cpu_sec(); mat_sum_g<<<grid, block>>>(da, db, dc,nx, ny); cudaDeviceSynchronize(); duration = cpu_sec() - start; printf("Mat sum GPU<<<(%d,%d), (%d,%d)>>> time cost %f ms\n", grid.x, grid.y, block.x, block.y, duration*1000); cudaMemcpy(gpu, dc, nbytes, cudaMemcpyDeviceToHost); check_mat(cpu, gpu, n); cudaFree(da); cudaFree(db); cudaFree(dc); free(ha); free(hb); free(cpu); free(gpu); cudaDeviceReset(); int c = getchar(); return 0; }
1049481c8f51aeca9ebbf5757db01dfbb56395f0.hip
// !!! This is a file automatically generated by hipify!!! /* * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * ************************************************************************ * Modified from Pytorch * Copyright (c) 2016-present, Facebook, Inc. * * See https://github.com/pytorch/pytorch/blob/master/LICENSE for details * ************************************************************************ * Modified from ONNX Runtime * Copyright (c) Microsoft Corporation * * See https://github.com/microsoft/onnxruntime/blob/master/LICENSE for details * ************************************************************************ */ #include <hip/hip_runtime.h> #include "hip/hip_fp16.h" #include "common/common.cuh" #include "roiAlignKernel.h" using half = __half; __device__ half floatMax(half a, half b) { #if __CUDA_ARCH__ >= 800 return __hmax(a, b); #else return __float2half(max(__half2float(a), __half2float(b))); #endif } __device__ float floatMax(float a, float b) { return max(a, b); } template <typename T> __device__ T bilinearInterpolate(T const* bottomData, int32_t const height, int32_t const width, T y, T x, int32_t const isModeAvg, int32_t const index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < static_cast<T>(-1.0) || y > static_cast<T>(height) || x < static_cast<T>(-1.0) || x > static_cast<T>(width)) { // empty return 0; } if (y <= static_cast<T>(0)) { y = 0; } if (x <= static_cast<T>(0)) { x = 0; } int32_t yLow = static_cast<int32_t>(y); int32_t xLow = static_cast<int32_t>(x); int32_t yHigh; int32_t xHigh; if (yLow >= height - 1) { yHigh = yLow = height - 1; y = static_cast<T>(yLow); } else { yHigh = yLow + 1; } if (xLow >= width - 1) { xHigh = xLow = width - 1; x = static_cast<T>(xLow); } else { xHigh = xLow + 1; } T ly = y - static_cast<T>(yLow); T lx = x - static_cast<T>(xLow); T hy = static_cast<T>(1.) - ly, hx = static_cast<T>(1.) - lx; // do bilinear interpolation T v1 = bottomData[yLow * width + xLow]; T v2 = bottomData[yLow * width + xHigh]; T v3 = bottomData[yHigh * width + xLow]; T v4 = bottomData[yHigh * width + xHigh]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val; if (isModeAvg) { val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); // mode Avg } else { val = floatMax(floatMax(floatMax(w1 * v1, w2 * v2), w3 * v3), w4 * v4); // mode Max } return val; } template <typename T> __global__ void RoIAlignForward(int32_t const nthreads, T const* bottomData, T const spatialScale, int32_t const channels, int32_t const height, int32_t const width, int32_t const pooledHeight, int32_t const pooledWidth, int32_t const samplingRatio, T const* bottomRois, T* topData, int32_t const isModeAvg, int32_t const* batchIndicesPtr, int32_t const aligned) { for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { // (n, c, ph, pw) is an element in the pooled output int32_t pw = index % pooledWidth; int32_t ph = (index / pooledWidth) % pooledHeight; int32_t c = (index / pooledWidth / pooledHeight) % channels; int32_t n = index / pooledWidth / pooledHeight / channels; T const* offsetBottomRois = bottomRois + n * 4; auto const roiBatchInd = batchIndicesPtr[n]; bool continuousCoordinate = aligned; // Do not using rounding; this implementation detail is critical T roiOffset = static_cast<T>(continuousCoordinate ? 0.5 : 0); T roiStartW = offsetBottomRois[0] * spatialScale - roiOffset; T roiStartH = offsetBottomRois[1] * spatialScale - roiOffset; T roiEndW = offsetBottomRois[2] * spatialScale - roiOffset; T roiEndH = offsetBottomRois[3] * spatialScale - roiOffset; T roiWidth = roiEndW - roiStartW; T roiHeight = roiEndH - roiStartH; if (!continuousCoordinate) { // backward compatiblity // Force malformed ROIs to be 1x1 roiWidth = floatMax(roiWidth, static_cast<T>(1.)); roiHeight = floatMax(roiHeight, static_cast<T>(1.)); } T binSizeH = static_cast<T>(roiHeight) / static_cast<T>(pooledHeight); T binSizeW = static_cast<T>(roiWidth) / static_cast<T>(pooledWidth); T const* offsetBottomData = bottomData + static_cast<int32_t>((roiBatchInd * channels + c) * height * width); // We use roiBinGrid to sample the grid and mimic integral int32_t roiBinGridH; if (samplingRatio > 0) { roiBinGridH = samplingRatio; } else { roiBinGridH = ceilf(roiHeight / static_cast<T>(pooledHeight)); } int32_t roiBinGridW; if (samplingRatio > 0) { roiBinGridW = samplingRatio; } else { roiBinGridW = ceilf(roiWidth / static_cast<T>(pooledWidth)); } // We do average (integral) pooling inside a bin T const count = roiBinGridH * roiBinGridW; // e.g. = 4 T const yOff = roiStartH + static_cast<T>(ph) * binSizeH; T const yFac = binSizeH / static_cast<T>(roiBinGridH); T const xOff = roiStartW + static_cast<T>(pw) * binSizeW; T const xFac = binSizeW / static_cast<T>(roiBinGridW); T outputVal = 0.; bool maxFlag = false; for (int32_t iy = 0; iy < roiBinGridH; iy++) // e.g., iy = 0, 1 { T const y = yOff + static_cast<T>(iy + .5F) * yFac; // e.g., 0.5, 1.5 for (int32_t ix = 0; ix < roiBinGridW; ix++) { T const x = xOff + static_cast<T>(ix + .5F) * xFac; T val = bilinearInterpolate(offsetBottomData, height, width, y, x, isModeAvg, index); if (isModeAvg) { outputVal += val; } else { if (!maxFlag) { outputVal = val; maxFlag = true; } else { outputVal = floatMax(outputVal, val); } } } } if (isModeAvg) { outputVal = outputVal / count; } topData[index] = outputVal; } } template <typename T> hipError_t RoiAlignImpl(hipStream_t stream, int32_t const maxThreadsPerBlock, T const* bottomData, T const spatialScale, int32_t const numRois, int32_t const channels, int32_t const height, int32_t const width, int32_t const pooledHeight, int32_t const pooledWidth, int32_t const samplingRatio, T const* bottomRois, T* topData, int32_t const isModeAvg, int32_t const* batchIndicesPtr, int32_t const aligned) { PLUGIN_ASSERT(bottomData != nullptr); PLUGIN_ASSERT(bottomRois != nullptr); PLUGIN_ASSERT(batchIndicesPtr != nullptr); PLUGIN_ASSERT(topData != nullptr); PLUGIN_ASSERT(numRois >= 0); PLUGIN_ASSERT(maxThreadsPerBlock > 0); PLUGIN_ASSERT(height > 0); PLUGIN_ASSERT(width > 0); PLUGIN_ASSERT(pooledHeight > 0); PLUGIN_ASSERT(pooledWidth > 0); PLUGIN_ASSERT(samplingRatio >= 0); PLUGIN_ASSERT(isModeAvg == 0 || isModeAvg == 1); PLUGIN_ASSERT(static_cast<float>(spatialScale) > 0.0F); PLUGIN_ASSERT(aligned == 0 || aligned == 1); int32_t const outputSize = numRois * channels * pooledHeight * pooledWidth; int32_t blocksPerGrid = static_cast<int32_t>(ceil(static_cast<float>(outputSize) / maxThreadsPerBlock)); hipLaunchKernelGGL(( RoIAlignForward<T>), dim3(blocksPerGrid), dim3(maxThreadsPerBlock), 0, stream, outputSize,// nthreads bottomData, // bottomData spatialScale, // spatialScale channels, // channels height, // height width, // width pooledHeight, // pooledHeight pooledWidth, // pooledWidth samplingRatio, // samplingRatio bottomRois, // bottomRois topData, // topData isModeAvg, // isModeAvg batchIndicesPtr, // batchIndicesPtr aligned); return hipGetLastError(); } #define SPECIALIZED_IMPL(T) \ template hipError_t RoiAlignImpl<T>(hipStream_t stream, int32_t const maxThreadsPerBlock, T const* bottomData, \ T const spatialScale, int32_t const numRois, int32_t const channels, int32_t const height, \ int32_t const width, int32_t const pooledHeight, int32_t const pooledWidth, int32_t const samplingRatio, \ T const* bottomRois, T* topData, int32_t const isModeAvg, int32_t const* batchIndicesPtr, \ int32_t const aligned); SPECIALIZED_IMPL(float) SPECIALIZED_IMPL(half)
1049481c8f51aeca9ebbf5757db01dfbb56395f0.cu
/* * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * ************************************************************************ * Modified from Pytorch * Copyright (c) 2016-present, Facebook, Inc. * * See https://github.com/pytorch/pytorch/blob/master/LICENSE for details * ************************************************************************ * Modified from ONNX Runtime * Copyright (c) Microsoft Corporation * * See https://github.com/microsoft/onnxruntime/blob/master/LICENSE for details * ************************************************************************ */ #include <cuda.h> #include "cuda_fp16.h" #include "common/common.cuh" #include "roiAlignKernel.h" using half = __half; __device__ half floatMax(half a, half b) { #if __CUDA_ARCH__ >= 800 return __hmax(a, b); #else return __float2half(max(__half2float(a), __half2float(b))); #endif } __device__ float floatMax(float a, float b) { return max(a, b); } template <typename T> __device__ T bilinearInterpolate(T const* bottomData, int32_t const height, int32_t const width, T y, T x, int32_t const isModeAvg, int32_t const index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < static_cast<T>(-1.0) || y > static_cast<T>(height) || x < static_cast<T>(-1.0) || x > static_cast<T>(width)) { // empty return 0; } if (y <= static_cast<T>(0)) { y = 0; } if (x <= static_cast<T>(0)) { x = 0; } int32_t yLow = static_cast<int32_t>(y); int32_t xLow = static_cast<int32_t>(x); int32_t yHigh; int32_t xHigh; if (yLow >= height - 1) { yHigh = yLow = height - 1; y = static_cast<T>(yLow); } else { yHigh = yLow + 1; } if (xLow >= width - 1) { xHigh = xLow = width - 1; x = static_cast<T>(xLow); } else { xHigh = xLow + 1; } T ly = y - static_cast<T>(yLow); T lx = x - static_cast<T>(xLow); T hy = static_cast<T>(1.) - ly, hx = static_cast<T>(1.) - lx; // do bilinear interpolation T v1 = bottomData[yLow * width + xLow]; T v2 = bottomData[yLow * width + xHigh]; T v3 = bottomData[yHigh * width + xLow]; T v4 = bottomData[yHigh * width + xHigh]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val; if (isModeAvg) { val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); // mode Avg } else { val = floatMax(floatMax(floatMax(w1 * v1, w2 * v2), w3 * v3), w4 * v4); // mode Max } return val; } template <typename T> __global__ void RoIAlignForward(int32_t const nthreads, T const* bottomData, T const spatialScale, int32_t const channels, int32_t const height, int32_t const width, int32_t const pooledHeight, int32_t const pooledWidth, int32_t const samplingRatio, T const* bottomRois, T* topData, int32_t const isModeAvg, int32_t const* batchIndicesPtr, int32_t const aligned) { for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) { // (n, c, ph, pw) is an element in the pooled output int32_t pw = index % pooledWidth; int32_t ph = (index / pooledWidth) % pooledHeight; int32_t c = (index / pooledWidth / pooledHeight) % channels; int32_t n = index / pooledWidth / pooledHeight / channels; T const* offsetBottomRois = bottomRois + n * 4; auto const roiBatchInd = batchIndicesPtr[n]; bool continuousCoordinate = aligned; // Do not using rounding; this implementation detail is critical T roiOffset = static_cast<T>(continuousCoordinate ? 0.5 : 0); T roiStartW = offsetBottomRois[0] * spatialScale - roiOffset; T roiStartH = offsetBottomRois[1] * spatialScale - roiOffset; T roiEndW = offsetBottomRois[2] * spatialScale - roiOffset; T roiEndH = offsetBottomRois[3] * spatialScale - roiOffset; T roiWidth = roiEndW - roiStartW; T roiHeight = roiEndH - roiStartH; if (!continuousCoordinate) { // backward compatiblity // Force malformed ROIs to be 1x1 roiWidth = floatMax(roiWidth, static_cast<T>(1.)); roiHeight = floatMax(roiHeight, static_cast<T>(1.)); } T binSizeH = static_cast<T>(roiHeight) / static_cast<T>(pooledHeight); T binSizeW = static_cast<T>(roiWidth) / static_cast<T>(pooledWidth); T const* offsetBottomData = bottomData + static_cast<int32_t>((roiBatchInd * channels + c) * height * width); // We use roiBinGrid to sample the grid and mimic integral int32_t roiBinGridH; if (samplingRatio > 0) { roiBinGridH = samplingRatio; } else { roiBinGridH = ceilf(roiHeight / static_cast<T>(pooledHeight)); } int32_t roiBinGridW; if (samplingRatio > 0) { roiBinGridW = samplingRatio; } else { roiBinGridW = ceilf(roiWidth / static_cast<T>(pooledWidth)); } // We do average (integral) pooling inside a bin T const count = roiBinGridH * roiBinGridW; // e.g. = 4 T const yOff = roiStartH + static_cast<T>(ph) * binSizeH; T const yFac = binSizeH / static_cast<T>(roiBinGridH); T const xOff = roiStartW + static_cast<T>(pw) * binSizeW; T const xFac = binSizeW / static_cast<T>(roiBinGridW); T outputVal = 0.; bool maxFlag = false; for (int32_t iy = 0; iy < roiBinGridH; iy++) // e.g., iy = 0, 1 { T const y = yOff + static_cast<T>(iy + .5F) * yFac; // e.g., 0.5, 1.5 for (int32_t ix = 0; ix < roiBinGridW; ix++) { T const x = xOff + static_cast<T>(ix + .5F) * xFac; T val = bilinearInterpolate(offsetBottomData, height, width, y, x, isModeAvg, index); if (isModeAvg) { outputVal += val; } else { if (!maxFlag) { outputVal = val; maxFlag = true; } else { outputVal = floatMax(outputVal, val); } } } } if (isModeAvg) { outputVal = outputVal / count; } topData[index] = outputVal; } } template <typename T> cudaError_t RoiAlignImpl(cudaStream_t stream, int32_t const maxThreadsPerBlock, T const* bottomData, T const spatialScale, int32_t const numRois, int32_t const channels, int32_t const height, int32_t const width, int32_t const pooledHeight, int32_t const pooledWidth, int32_t const samplingRatio, T const* bottomRois, T* topData, int32_t const isModeAvg, int32_t const* batchIndicesPtr, int32_t const aligned) { PLUGIN_ASSERT(bottomData != nullptr); PLUGIN_ASSERT(bottomRois != nullptr); PLUGIN_ASSERT(batchIndicesPtr != nullptr); PLUGIN_ASSERT(topData != nullptr); PLUGIN_ASSERT(numRois >= 0); PLUGIN_ASSERT(maxThreadsPerBlock > 0); PLUGIN_ASSERT(height > 0); PLUGIN_ASSERT(width > 0); PLUGIN_ASSERT(pooledHeight > 0); PLUGIN_ASSERT(pooledWidth > 0); PLUGIN_ASSERT(samplingRatio >= 0); PLUGIN_ASSERT(isModeAvg == 0 || isModeAvg == 1); PLUGIN_ASSERT(static_cast<float>(spatialScale) > 0.0F); PLUGIN_ASSERT(aligned == 0 || aligned == 1); int32_t const outputSize = numRois * channels * pooledHeight * pooledWidth; int32_t blocksPerGrid = static_cast<int32_t>(ceil(static_cast<float>(outputSize) / maxThreadsPerBlock)); RoIAlignForward<T><<<blocksPerGrid, maxThreadsPerBlock, 0, stream>>>(outputSize,// nthreads bottomData, // bottomData spatialScale, // spatialScale channels, // channels height, // height width, // width pooledHeight, // pooledHeight pooledWidth, // pooledWidth samplingRatio, // samplingRatio bottomRois, // bottomRois topData, // topData isModeAvg, // isModeAvg batchIndicesPtr, // batchIndicesPtr aligned); return cudaGetLastError(); } #define SPECIALIZED_IMPL(T) \ template cudaError_t RoiAlignImpl<T>(cudaStream_t stream, int32_t const maxThreadsPerBlock, T const* bottomData, \ T const spatialScale, int32_t const numRois, int32_t const channels, int32_t const height, \ int32_t const width, int32_t const pooledHeight, int32_t const pooledWidth, int32_t const samplingRatio, \ T const* bottomRois, T* topData, int32_t const isModeAvg, int32_t const* batchIndicesPtr, \ int32_t const aligned); SPECIALIZED_IMPL(float) SPECIALIZED_IMPL(half)
106ff7f5b87f4524a1092b931936cc494a3ed668.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <THH/THH.h> #include <THH/THHApply.cuh> #include "common.h" #include <stdio.h> #include <assert.h> #include <thrust/functional.h> __global__ void cunn_MaskedSpatialClassNLLCriterion_updateOutput_kernel( float *output, float *total_weight, float *input, float *target, float *mask, float *weights, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample) { __shared__ float partial_sums[CUDA_NUM_THREADS]; int i, t; float m; float cur_weight; float input_sum = 0; float acc_weight = 0; int sample = blockIdx.x / blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; int step = blockDim.x * blocks_per_sample; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = target[toffset + i] - 1; m = mask ? mask[toffset + i] : 1.0f; //assert(t >= 0 && t < n_classes); if (t >= 0 && t < n_classes) { cur_weight = weights ? weights[t] : 1.0f; cur_weight = cur_weight * m; input_sum -= input[ioffset + i + map_nelem * t] * cur_weight; acc_weight += cur_weight; } } __syncthreads(); input_sum = reduceBlock(partial_sums, blockDim.x, input_sum, thrust::plus<float>(), 0.0f); acc_weight = reduceBlock(partial_sums, blockDim.x, acc_weight, thrust::plus<float>(), 0.0f); if (threadIdx.x == 0) { atomicAdd(total_weight, acc_weight); if (size_average && acc_weight > 0) atomicAdd(output, input_sum / acc_weight / gridDim.x); else atomicAdd(output, input_sum); } } __global__ void cunn_MaskedSpatialClassNLLCriterion_updateGradInput_kernel( float *gradInput, float *target, float *mask, float *weights, float *total_weight, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample) { if (*total_weight <= 0) return; int i, t; float m; float norm = size_average ? (1.0f / *total_weight) : 1.0f; int sample = blockIdx.x / blocks_per_sample; int step = blockDim.x * blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = (int)target[toffset + i] - 1; m = mask ? mask[toffset + i] : 1.0f; //assert(t >= 0 && t < n_classes); if (t >= 0 && t < n_classes) { gradInput[ioffset + i + map_nelem * t] = -(weights ? weights[t] : 1.0f) * norm * m; } } } extern "C" void THNN_CudaMaskedSpatialClassNLLCriterion_updateOutput( THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *mask, THCudaTensor *output, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { THArgCheck(THCudaTensor_nDimension(state, target) == 3, 1, "only batches of spatial targets supported (3D tensors)"); THArgCheck(THCudaTensor_nDimension(state, input) == 4, 2, "only batches of spatial inputs supported (4D tensors)"); if (mask) THArgCheck(THCudaTensor_nDimension(state, mask) == 3, 1, "only batches of spatial targets supported (3D tensors)"); if (weights && mask) THCUNN_assertSameGPU(state, 6, input, target, mask, weights, output, total_weight); else if (weights) THCUNN_assertSameGPU(state, 5, input, target, weights, output, total_weight); else if (mask) THCUNN_assertSameGPU(state, 5, input, target, mask, output, total_weight); else THCUNN_assertSameGPU(state, 4, input, target, output, total_weight); input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; mask = mask ? THCudaTensor_newContiguous(state, mask) : NULL; target = THCudaTensor_newContiguous(state, target); float *input_data = THCudaTensor_data(state, input); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; float *mask_data = mask ? THCudaTensor_data(state, mask) : NULL; float *target_data = THCudaTensor_data(state, target); float *output_data = THCudaTensor_data(state, output); float *total_weight_data = THCudaTensor_data(state, total_weight); long batch_size = THCudaTensor_size(state, target, 0); long map_nelem = THCudaTensor_nElement(state, target) / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; THCudaTensor_fill(state, output, 0); THCudaTensor_fill(state, total_weight, 0); hipLaunchKernelGGL(( cunn_MaskedSpatialClassNLLCriterion_updateOutput_kernel) , dim3(total_blocks), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state), output_data, total_weight_data, input_data, target_data, mask_data, weights_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), blocks_per_sample ); THCudaCheck(hipGetLastError()); if (weights) THCudaTensor_free(state, weights); if (mask) THCudaTensor_free(state, mask); THCudaTensor_free(state, target); THCudaTensor_free(state, input); } extern "C" void THNN_CudaMaskedSpatialClassNLLCriterion_updateGradInput( THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *mask, THCudaTensor *gradInput, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { THArgCheck(THCudaTensor_nDimension(state, target) == 3, 1, "only batches of spatial targets supported (3D tensors)"); THArgCheck(THCudaTensor_nDimension(state, input) == 4, 2, "only batches of spatial inputs supported (4D tensors)"); THArgCheck(THCudaTensor_isContiguous(state, gradInput), 4, "gradInput must be contiguous"); if (mask) THArgCheck(THCudaTensor_nDimension(state, mask) == 3, 1, "only batches of spatial targets supported (3D tensors)"); if (weights && mask) THCUNN_assertSameGPU(state, 6, mask, weights, input, target, gradInput, total_weight); else if (weights) THCUNN_assertSameGPU(state, 5, weights, input, target, gradInput, total_weight); else if (mask) THCUNN_assertSameGPU(state, 5, mask, input, target, gradInput, total_weight); else THCUNN_assertSameGPU(state, 4, input, target, gradInput, total_weight); input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; mask = mask ? THCudaTensor_newContiguous(state, mask) : NULL; target = THCudaTensor_newContiguous(state, target); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; float *mask_data = mask ? THCudaTensor_data(state, mask) : NULL; float *gradInput_data = THCudaTensor_data(state, gradInput); float *target_data = THCudaTensor_data(state, target); float *total_weight_data = THCudaTensor_data(state, total_weight); long batch_size = THCudaTensor_size(state, target, 0); long map_nelem = THCudaTensor_nElement(state, target) / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; hipLaunchKernelGGL(( cunn_MaskedSpatialClassNLLCriterion_updateGradInput_kernel) , dim3(total_blocks), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state), gradInput_data, target_data, weights_data, mask_data, total_weight_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) *THCudaTensor_size(state, input, 3), blocks_per_sample ); THCudaCheck(hipGetLastError()); if (weights) THCudaTensor_free(state, weights); if (mask) THCudaTensor_free(state, mask); THCudaTensor_free(state, target); THCudaTensor_free(state, input); }
106ff7f5b87f4524a1092b931936cc494a3ed668.cu
#include <THC/THC.h> #include <THC/THCApply.cuh> #include "common.h" #include <stdio.h> #include <assert.h> #include <thrust/functional.h> __global__ void cunn_MaskedSpatialClassNLLCriterion_updateOutput_kernel( float *output, float *total_weight, float *input, float *target, float *mask, float *weights, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample) { __shared__ float partial_sums[CUDA_NUM_THREADS]; int i, t; float m; float cur_weight; float input_sum = 0; float acc_weight = 0; int sample = blockIdx.x / blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; int step = blockDim.x * blocks_per_sample; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = target[toffset + i] - 1; m = mask ? mask[toffset + i] : 1.0f; //assert(t >= 0 && t < n_classes); if (t >= 0 && t < n_classes) { cur_weight = weights ? weights[t] : 1.0f; cur_weight = cur_weight * m; input_sum -= input[ioffset + i + map_nelem * t] * cur_weight; acc_weight += cur_weight; } } __syncthreads(); input_sum = reduceBlock(partial_sums, blockDim.x, input_sum, thrust::plus<float>(), 0.0f); acc_weight = reduceBlock(partial_sums, blockDim.x, acc_weight, thrust::plus<float>(), 0.0f); if (threadIdx.x == 0) { atomicAdd(total_weight, acc_weight); if (size_average && acc_weight > 0) atomicAdd(output, input_sum / acc_weight / gridDim.x); else atomicAdd(output, input_sum); } } __global__ void cunn_MaskedSpatialClassNLLCriterion_updateGradInput_kernel( float *gradInput, float *target, float *mask, float *weights, float *total_weight, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample) { if (*total_weight <= 0) return; int i, t; float m; float norm = size_average ? (1.0f / *total_weight) : 1.0f; int sample = blockIdx.x / blocks_per_sample; int step = blockDim.x * blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = (int)target[toffset + i] - 1; m = mask ? mask[toffset + i] : 1.0f; //assert(t >= 0 && t < n_classes); if (t >= 0 && t < n_classes) { gradInput[ioffset + i + map_nelem * t] = -(weights ? weights[t] : 1.0f) * norm * m; } } } extern "C" void THNN_CudaMaskedSpatialClassNLLCriterion_updateOutput( THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *mask, THCudaTensor *output, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { THArgCheck(THCudaTensor_nDimension(state, target) == 3, 1, "only batches of spatial targets supported (3D tensors)"); THArgCheck(THCudaTensor_nDimension(state, input) == 4, 2, "only batches of spatial inputs supported (4D tensors)"); if (mask) THArgCheck(THCudaTensor_nDimension(state, mask) == 3, 1, "only batches of spatial targets supported (3D tensors)"); if (weights && mask) THCUNN_assertSameGPU(state, 6, input, target, mask, weights, output, total_weight); else if (weights) THCUNN_assertSameGPU(state, 5, input, target, weights, output, total_weight); else if (mask) THCUNN_assertSameGPU(state, 5, input, target, mask, output, total_weight); else THCUNN_assertSameGPU(state, 4, input, target, output, total_weight); input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; mask = mask ? THCudaTensor_newContiguous(state, mask) : NULL; target = THCudaTensor_newContiguous(state, target); float *input_data = THCudaTensor_data(state, input); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; float *mask_data = mask ? THCudaTensor_data(state, mask) : NULL; float *target_data = THCudaTensor_data(state, target); float *output_data = THCudaTensor_data(state, output); float *total_weight_data = THCudaTensor_data(state, total_weight); long batch_size = THCudaTensor_size(state, target, 0); long map_nelem = THCudaTensor_nElement(state, target) / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; THCudaTensor_fill(state, output, 0); THCudaTensor_fill(state, total_weight, 0); cunn_MaskedSpatialClassNLLCriterion_updateOutput_kernel <<<total_blocks, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>( output_data, total_weight_data, input_data, target_data, mask_data, weights_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), blocks_per_sample ); THCudaCheck(cudaGetLastError()); if (weights) THCudaTensor_free(state, weights); if (mask) THCudaTensor_free(state, mask); THCudaTensor_free(state, target); THCudaTensor_free(state, input); } extern "C" void THNN_CudaMaskedSpatialClassNLLCriterion_updateGradInput( THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *mask, THCudaTensor *gradInput, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { THArgCheck(THCudaTensor_nDimension(state, target) == 3, 1, "only batches of spatial targets supported (3D tensors)"); THArgCheck(THCudaTensor_nDimension(state, input) == 4, 2, "only batches of spatial inputs supported (4D tensors)"); THArgCheck(THCudaTensor_isContiguous(state, gradInput), 4, "gradInput must be contiguous"); if (mask) THArgCheck(THCudaTensor_nDimension(state, mask) == 3, 1, "only batches of spatial targets supported (3D tensors)"); if (weights && mask) THCUNN_assertSameGPU(state, 6, mask, weights, input, target, gradInput, total_weight); else if (weights) THCUNN_assertSameGPU(state, 5, weights, input, target, gradInput, total_weight); else if (mask) THCUNN_assertSameGPU(state, 5, mask, input, target, gradInput, total_weight); else THCUNN_assertSameGPU(state, 4, input, target, gradInput, total_weight); input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; mask = mask ? THCudaTensor_newContiguous(state, mask) : NULL; target = THCudaTensor_newContiguous(state, target); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; float *mask_data = mask ? THCudaTensor_data(state, mask) : NULL; float *gradInput_data = THCudaTensor_data(state, gradInput); float *target_data = THCudaTensor_data(state, target); float *total_weight_data = THCudaTensor_data(state, total_weight); long batch_size = THCudaTensor_size(state, target, 0); long map_nelem = THCudaTensor_nElement(state, target) / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; cunn_MaskedSpatialClassNLLCriterion_updateGradInput_kernel <<<total_blocks, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>( gradInput_data, target_data, weights_data, mask_data, total_weight_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) *THCudaTensor_size(state, input, 3), blocks_per_sample ); THCudaCheck(cudaGetLastError()); if (weights) THCudaTensor_free(state, weights); if (mask) THCudaTensor_free(state, mask); THCudaTensor_free(state, target); THCudaTensor_free(state, input); }
be0f956f1e805dd9bd8772dc58e2813a935aab80.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudabridge.h" __global__ void keyCheckKernel(unsigned int *xPtr, unsigned int *yPtr, int blocks, int compression); __global__ void keyFinderKernel(unsigned int *xPtr, unsigned int *yPtr, unsigned int *chainPtr, int points); __global__ void keyFinderKernelWithDouble(unsigned int *xPtr, unsigned int *yPtr, unsigned int *chainPtr, int points); void callKeyFinderKernel(int blocks, int threads, int points, unsigned int *xPtr, unsigned int *yPtr, unsigned int *chainPtr, bool useDouble, int compression) { int blocksMultPoints = blocks * points; hipLaunchKernelGGL(( keyCheckKernel) , dim3(blocksMultPoints), dim3(threads), 0, 0, xPtr, yPtr, blocks, compression); checkKernelLaunch(); if(useDouble) { hipLaunchKernelGGL(( keyFinderKernelWithDouble) , dim3(blocks), dim3(threads) , 0, 0, xPtr, yPtr, chainPtr, points); } else { hipLaunchKernelGGL(( keyFinderKernel) , dim3(blocks), dim3(threads), 0, 0, xPtr, yPtr, chainPtr, points); } waitForKernel(); } void checkKernelLaunch() { // Check for kernel launch error hipError_t err = hipGetLastError(); if(err != hipSuccess) { throw cuda::CudaException(err); } } void waitForKernel() { // Check for kernel launch error hipError_t err = hipGetLastError(); if(err != hipSuccess) { throw cuda::CudaException(err); } // Wait for kernel to complete err = hipDeviceSynchronize(); fflush(stdout); if(err != hipSuccess) { throw cuda::CudaException(err); } }
be0f956f1e805dd9bd8772dc58e2813a935aab80.cu
#include "cudabridge.h" __global__ void keyCheckKernel(unsigned int *xPtr, unsigned int *yPtr, int blocks, int compression); __global__ void keyFinderKernel(unsigned int *xPtr, unsigned int *yPtr, unsigned int *chainPtr, int points); __global__ void keyFinderKernelWithDouble(unsigned int *xPtr, unsigned int *yPtr, unsigned int *chainPtr, int points); void callKeyFinderKernel(int blocks, int threads, int points, unsigned int *xPtr, unsigned int *yPtr, unsigned int *chainPtr, bool useDouble, int compression) { int blocksMultPoints = blocks * points; keyCheckKernel <<<blocksMultPoints, threads>>> (xPtr, yPtr, blocks, compression); checkKernelLaunch(); if(useDouble) { keyFinderKernelWithDouble <<<blocks, threads >>> (xPtr, yPtr, chainPtr, points); } else { keyFinderKernel <<<blocks, threads>>> (xPtr, yPtr, chainPtr, points); } waitForKernel(); } void checkKernelLaunch() { // Check for kernel launch error cudaError_t err = cudaGetLastError(); if(err != cudaSuccess) { throw cuda::CudaException(err); } } void waitForKernel() { // Check for kernel launch error cudaError_t err = cudaGetLastError(); if(err != cudaSuccess) { throw cuda::CudaException(err); } // Wait for kernel to complete err = cudaDeviceSynchronize(); fflush(stdout); if(err != cudaSuccess) { throw cuda::CudaException(err); } }
d0431a421f63d91bf1fd20c3a0f56c44169a7eef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/zaxpycp.cu, normal z -> s, Tue Aug 30 09:38:27 2016 */ #include "magma_internal.h" #define NB 64 /******************************************************************************/ // adds x += r --and-- // copies r = b // each thread does one index, x[i] and r[i] __global__ void saxpycp_kernel( int m, float *r, float *x, const float *b) { const int i = threadIdx.x + blockIdx.x*NB; if ( i < m ) { x[i] = MAGMA_S_ADD( x[i], r[i] ); r[i] = b[i]; } } /***************************************************************************//** adds x += r --and-- copies r = b *******************************************************************************/ extern "C" void magmablas_saxpycp_q( magma_int_t m, magmaFloat_ptr r, magmaFloat_ptr x, magmaFloat_const_ptr b, magma_queue_t queue ) { dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); hipLaunchKernelGGL(( saxpycp_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, r, x, b ); }
d0431a421f63d91bf1fd20c3a0f56c44169a7eef.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/zaxpycp.cu, normal z -> s, Tue Aug 30 09:38:27 2016 */ #include "magma_internal.h" #define NB 64 /******************************************************************************/ // adds x += r --and-- // copies r = b // each thread does one index, x[i] and r[i] __global__ void saxpycp_kernel( int m, float *r, float *x, const float *b) { const int i = threadIdx.x + blockIdx.x*NB; if ( i < m ) { x[i] = MAGMA_S_ADD( x[i], r[i] ); r[i] = b[i]; } } /***************************************************************************//** adds x += r --and-- copies r = b *******************************************************************************/ extern "C" void magmablas_saxpycp_q( magma_int_t m, magmaFloat_ptr r, magmaFloat_ptr x, magmaFloat_const_ptr b, magma_queue_t queue ) { dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); saxpycp_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( m, r, x, b ); }
385fe7c21390bc7d77145d7b997a45ffec8ca1a1.hip
// !!! This is a file automatically generated by hipify!!! // SparseMatrixGpu.cpp #include "lsm/gpu/SparseMatrixGpu.hpp" #include <iostream> #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include "gpu/helper_cuda.h" namespace lsm { namespace { /// /// \brief The CudaEnvironment struct /// struct CudaEnvironment { CudaEnvironment( ) { if ( findCudaDevice( 0, 0, false ) < 0 ) { throw std::runtime_error( "No CUDA capable devices found" ); } #ifdef VERBOSE_PRINT std::cout << "CUDA device initialized" << std::endl; #endif } ~CudaEnvironment( ) { // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset( ); #ifdef VERBOSE_PRINT std::cout << "CUDA device reset" << std::endl; #endif } }; /// /// \brief cudaEnvironment /// const CudaEnvironment cudaEnvironment; } // namespace template< typename T > class SparseMatrixGpu< T >::SparseMatrixGpuImpl { public: explicit SparseMatrixGpuImpl(); private: thrust::device_vector< T > dataPlaceholder_; }; template< typename T > SparseMatrixGpu< T >::SparseMatrixGpuImpl::SparseMatrixGpuImpl() : dataPlaceholder_( 10 ) // temp init size {} //////////////////////////////////////////////////////// //////////////////////////////////////////////////////// template< typename T > SparseMatrixGpu< T >::SparseMatrixGpu() : upImpl_( new SparseMatrixGpuImpl() ) {} template< typename T > SparseMatrixGpu< T >::~SparseMatrixGpu() {} } // namespace lsm template class lsm::SparseMatrixGpu< float >; template class lsm::SparseMatrixGpu< double >;
385fe7c21390bc7d77145d7b997a45ffec8ca1a1.cu
// SparseMatrixGpu.cpp #include "lsm/gpu/SparseMatrixGpu.hpp" #include <iostream> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include "gpu/helper_cuda.h" namespace lsm { namespace { /// /// \brief The CudaEnvironment struct /// struct CudaEnvironment { CudaEnvironment( ) { if ( findCudaDevice( 0, 0, false ) < 0 ) { throw std::runtime_error( "No CUDA capable devices found" ); } #ifdef VERBOSE_PRINT std::cout << "CUDA device initialized" << std::endl; #endif } ~CudaEnvironment( ) { // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset( ); #ifdef VERBOSE_PRINT std::cout << "CUDA device reset" << std::endl; #endif } }; /// /// \brief cudaEnvironment /// const CudaEnvironment cudaEnvironment; } // namespace template< typename T > class SparseMatrixGpu< T >::SparseMatrixGpuImpl { public: explicit SparseMatrixGpuImpl(); private: thrust::device_vector< T > dataPlaceholder_; }; template< typename T > SparseMatrixGpu< T >::SparseMatrixGpuImpl::SparseMatrixGpuImpl() : dataPlaceholder_( 10 ) // temp init size {} //////////////////////////////////////////////////////// //////////////////////////////////////////////////////// template< typename T > SparseMatrixGpu< T >::SparseMatrixGpu() : upImpl_( new SparseMatrixGpuImpl() ) {} template< typename T > SparseMatrixGpu< T >::~SparseMatrixGpu() {} } // namespace lsm template class lsm::SparseMatrixGpu< float >; template class lsm::SparseMatrixGpu< double >;
f6f270c77609a4e1c39c695f64e6c4560fa655e9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Software License Agreement (BSD License) * * Copyright (c) 2011, Willow Garage, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Author: Anatoly Baskeheev, Itseez Ltd, ([email protected]) */ #include "internal.hpp" #include "pcl/gpu/utils/device/funcattrib.hpp" #include "pcl/gpu/utils/device/warp.hpp" #include "pcl/gpu/utils/device/block.hpp" #include "pcl/gpu/utils/safe_call.hpp" #include "pcl/gpu/features/device/pair_features.hpp" #ifndef M_PI #define M_PI 3.14159265358979323846f #endif namespace pcl { namespace device { __device__ unsigned int count = 0; struct VfhDevice { enum { CTA_SIZE = 256, bins1 = 45, bins2 = 45, bins3 = 45, bins4 = 45, bins_vp = 128, FSize = bins1 + bins2 + bins3 + bins4 + bins_vp }; float distance_normalization_factor_inv; float hist_incr; // Factorization constant float hist_incr_size_component; bool normalize_distances; float3 centroid_p; float3 centroid_n; float3 d_vp_p; float hist_incr_vp; PtrSz<int> indices; const PointType *points; const NormalType *normals; mutable PtrStep<float> global_buffer; mutable float* output; template<typename T> __device__ __forceinline__ float3 fetch(const T* data, int index) const { T t = data[index]; return make_float3(t.x, t.y, t.z); } __device__ __forceinline__ void compute(float *shist_b1, float *shist_b2, float *shist_b3, float *shist_b4, float *shist_vp) const { int idx = threadIdx.x + blockIdx.x * CTA_SIZE; if (idx > indices.size) return; } __device__ void operator()() const { __shared__ float shist[FSize]; __shared__ bool lastBlock; Block::fill(shist, shist + FSize, 0.f); __syncthreads(); float *const shist_b1 = shist; float *const shist_b2 = shist_b1 + bins1; float *const shist_b3 = shist_b2 + bins2; float *const shist_b4 = shist_b3 + bins3; float *const shist_vp = shist_b4 + bins4; int STRIDE = gridDim.x * CTA_SIZE; int idx = blockIdx.x * CTA_SIZE + threadIdx.x; for (; idx < indices.size; idx += STRIDE) { int index = indices.data[idx]; float3 p = fetch(points, index); float3 n = fetch(normals, index); // spfh int h_index; float f1, f2, f3, f4; if (computePairFeatures(centroid_p, centroid_n, p, n, f1, f2, f3, f4)) { // Normalize the f1, f2, f3, f4 features and push them in the histogram h_index = floor (bins1 * ((f1 + M_PI) * (1.f / (2.f * M_PI)))); h_index = min(bins1 - 1, max(0, h_index)); atomicAdd(shist_b1 + h_index, hist_incr); h_index = floor (bins2 * ((f2 + 1.f) * 0.5f)); h_index = min(bins2 - 1, max (0, h_index)); atomicAdd(shist_b2 + h_index, hist_incr); h_index = floor (bins3 * ((f3 + 1.f) * 0.5f)); h_index = min(bins3 - 1, max (0, h_index)); atomicAdd(shist_b3 + h_index, hist_incr); if (normalize_distances) h_index = floor (bins4 * (f4 * distance_normalization_factor_inv)); else h_index = __float2int_rn (f4 * 100); h_index = min(bins4 - 1, max (0, h_index)); atomicAdd(shist_b4 + h_index, hist_incr_size_component); } // viewpoint component float alfa = ((dot(n, d_vp_p) + 1.f) * 0.5f); h_index = floor (bins_vp * alfa); h_index = min(bins_vp - 1, max (0, h_index)); atomicAdd(shist_vp + h_index, hist_incr_vp); } /* for (; idx < indices.size; idx += STRIDE) */ __syncthreads(); Block::copy(shist, shist + FSize, global_buffer.ptr(blockIdx.x)); __threadfence(); if (threadIdx.x == 0) { unsigned int value = atomicInc(&count, gridDim.x); lastBlock = (value == (gridDim.x - 1)); } __syncthreads(); if (lastBlock) { int total_rows = gridDim.x; for(int x = threadIdx.x; x < FSize; x += CTA_SIZE) { float sum = 0.f; for(int y = 0; y < total_rows; ++y) sum += global_buffer.ptr(y)[x]; output[x] = sum; } if (threadIdx.x == 0) count = 0; } } }; __global__ void estimateVfhKernel(const VfhDevice vfh) { vfh(); } } } void pcl::device::VFHEstimationImpl::compute(DeviceArray<VFHSignature308>& feature) { feature.create(1); VfhDevice vfh; vfh.centroid_p = xyz_centroid; vfh.centroid_n = normal_centroid; vfh.indices = indices; vfh.points = points; vfh.normals = normals; vfh.normalize_distances = normalize_distances; // Compute the direction of view from the viewpoint to the centroid vfh.d_vp_p = normalized (viewpoint - xyz_centroid); vfh.distance_normalization_factor_inv = 1.f; if ( normalize_distances ) { float3 max_pt = getMaxDistance (points, indices, xyz_centroid); vfh.distance_normalization_factor_inv = 1.f / norm(xyz_centroid - max_pt); } // Factorization constant vfh.hist_incr = normalize_bins ? (100.f / (indices.size() - 1)) : 1.f; vfh.hist_incr_size_component = size_component ? vfh.hist_incr : 0.f; vfh.hist_incr_vp = normalize_bins ? (100.f / indices.size()) : 1.f; int device; cudaSafeCall( hipGetDevice(&device) ); hipDeviceProp_t prop; cudaSafeCall( hipGetDeviceProperties(&prop, device) ); int total = static_cast<int> (indices.empty() ? points.size() : indices.size()); int total_lenght_in_blocks = (total + VfhDevice::CTA_SIZE - 1) / VfhDevice::CTA_SIZE; int block = VfhDevice::CTA_SIZE; int grid = min(total_lenght_in_blocks, prop.multiProcessorCount * prop.maxThreadsPerMultiProcessor / VfhDevice::CTA_SIZE); DeviceArray2D<float> global_buffer(grid, VfhDevice::FSize); vfh.global_buffer = global_buffer; vfh.output = (float*)feature.ptr(); hipLaunchKernelGGL(( estimateVfhKernel), dim3(grid), dim3(block), 0, 0, vfh); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); }
f6f270c77609a4e1c39c695f64e6c4560fa655e9.cu
/* * Software License Agreement (BSD License) * * Copyright (c) 2011, Willow Garage, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Author: Anatoly Baskeheev, Itseez Ltd, ([email protected]) */ #include "internal.hpp" #include "pcl/gpu/utils/device/funcattrib.hpp" #include "pcl/gpu/utils/device/warp.hpp" #include "pcl/gpu/utils/device/block.hpp" #include "pcl/gpu/utils/safe_call.hpp" #include "pcl/gpu/features/device/pair_features.hpp" #ifndef M_PI #define M_PI 3.14159265358979323846f #endif namespace pcl { namespace device { __device__ unsigned int count = 0; struct VfhDevice { enum { CTA_SIZE = 256, bins1 = 45, bins2 = 45, bins3 = 45, bins4 = 45, bins_vp = 128, FSize = bins1 + bins2 + bins3 + bins4 + bins_vp }; float distance_normalization_factor_inv; float hist_incr; // Factorization constant float hist_incr_size_component; bool normalize_distances; float3 centroid_p; float3 centroid_n; float3 d_vp_p; float hist_incr_vp; PtrSz<int> indices; const PointType *points; const NormalType *normals; mutable PtrStep<float> global_buffer; mutable float* output; template<typename T> __device__ __forceinline__ float3 fetch(const T* data, int index) const { T t = data[index]; return make_float3(t.x, t.y, t.z); } __device__ __forceinline__ void compute(float *shist_b1, float *shist_b2, float *shist_b3, float *shist_b4, float *shist_vp) const { int idx = threadIdx.x + blockIdx.x * CTA_SIZE; if (idx > indices.size) return; } __device__ void operator()() const { __shared__ float shist[FSize]; __shared__ bool lastBlock; Block::fill(shist, shist + FSize, 0.f); __syncthreads(); float *const shist_b1 = shist; float *const shist_b2 = shist_b1 + bins1; float *const shist_b3 = shist_b2 + bins2; float *const shist_b4 = shist_b3 + bins3; float *const shist_vp = shist_b4 + bins4; int STRIDE = gridDim.x * CTA_SIZE; int idx = blockIdx.x * CTA_SIZE + threadIdx.x; for (; idx < indices.size; idx += STRIDE) { int index = indices.data[idx]; float3 p = fetch(points, index); float3 n = fetch(normals, index); // spfh int h_index; float f1, f2, f3, f4; if (computePairFeatures(centroid_p, centroid_n, p, n, f1, f2, f3, f4)) { // Normalize the f1, f2, f3, f4 features and push them in the histogram h_index = floor (bins1 * ((f1 + M_PI) * (1.f / (2.f * M_PI)))); h_index = min(bins1 - 1, max(0, h_index)); atomicAdd(shist_b1 + h_index, hist_incr); h_index = floor (bins2 * ((f2 + 1.f) * 0.5f)); h_index = min(bins2 - 1, max (0, h_index)); atomicAdd(shist_b2 + h_index, hist_incr); h_index = floor (bins3 * ((f3 + 1.f) * 0.5f)); h_index = min(bins3 - 1, max (0, h_index)); atomicAdd(shist_b3 + h_index, hist_incr); if (normalize_distances) h_index = floor (bins4 * (f4 * distance_normalization_factor_inv)); else h_index = __float2int_rn (f4 * 100); h_index = min(bins4 - 1, max (0, h_index)); atomicAdd(shist_b4 + h_index, hist_incr_size_component); } // viewpoint component float alfa = ((dot(n, d_vp_p) + 1.f) * 0.5f); h_index = floor (bins_vp * alfa); h_index = min(bins_vp - 1, max (0, h_index)); atomicAdd(shist_vp + h_index, hist_incr_vp); } /* for (; idx < indices.size; idx += STRIDE) */ __syncthreads(); Block::copy(shist, shist + FSize, global_buffer.ptr(blockIdx.x)); __threadfence(); if (threadIdx.x == 0) { unsigned int value = atomicInc(&count, gridDim.x); lastBlock = (value == (gridDim.x - 1)); } __syncthreads(); if (lastBlock) { int total_rows = gridDim.x; for(int x = threadIdx.x; x < FSize; x += CTA_SIZE) { float sum = 0.f; for(int y = 0; y < total_rows; ++y) sum += global_buffer.ptr(y)[x]; output[x] = sum; } if (threadIdx.x == 0) count = 0; } } }; __global__ void estimateVfhKernel(const VfhDevice vfh) { vfh(); } } } void pcl::device::VFHEstimationImpl::compute(DeviceArray<VFHSignature308>& feature) { feature.create(1); VfhDevice vfh; vfh.centroid_p = xyz_centroid; vfh.centroid_n = normal_centroid; vfh.indices = indices; vfh.points = points; vfh.normals = normals; vfh.normalize_distances = normalize_distances; // Compute the direction of view from the viewpoint to the centroid vfh.d_vp_p = normalized (viewpoint - xyz_centroid); vfh.distance_normalization_factor_inv = 1.f; if ( normalize_distances ) { float3 max_pt = getMaxDistance (points, indices, xyz_centroid); vfh.distance_normalization_factor_inv = 1.f / norm(xyz_centroid - max_pt); } // Factorization constant vfh.hist_incr = normalize_bins ? (100.f / (indices.size() - 1)) : 1.f; vfh.hist_incr_size_component = size_component ? vfh.hist_incr : 0.f; vfh.hist_incr_vp = normalize_bins ? (100.f / indices.size()) : 1.f; int device; cudaSafeCall( cudaGetDevice(&device) ); cudaDeviceProp prop; cudaSafeCall( cudaGetDeviceProperties(&prop, device) ); int total = static_cast<int> (indices.empty() ? points.size() : indices.size()); int total_lenght_in_blocks = (total + VfhDevice::CTA_SIZE - 1) / VfhDevice::CTA_SIZE; int block = VfhDevice::CTA_SIZE; int grid = min(total_lenght_in_blocks, prop.multiProcessorCount * prop.maxThreadsPerMultiProcessor / VfhDevice::CTA_SIZE); DeviceArray2D<float> global_buffer(grid, VfhDevice::FSize); vfh.global_buffer = global_buffer; vfh.output = (float*)feature.ptr(); estimateVfhKernel<<<grid, block>>>(vfh); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); }
436cca595d0d85fc97840d2390802c49feaf7cd7.hip
// !!! This is a file automatically generated by hipify!!! #include "ctranslate2/primitives.h" #include <hip/hip_runtime.h> #include <rocblas.h> #include "cuda/helpers.h" #include "type_dispatch.h" namespace ctranslate2 { template<> template <typename T> T primitives<Device::CUDA>::at(const T* x, dim_t index) { T val = T(); cross_device_primitives<Device::CUDA, Device::CPU>::copy(x + index, &val, 1); return val; } template<> template <typename T> void primitives<Device::CUDA>::fill(T* x, T a, dim_t size) { THRUST_CALL(thrust::fill, x, x + size, a); } template<> template <typename T> void primitives<Device::CUDA>::strided_fill(T* x, T a, dim_t inc_x, dim_t size) { auto it = thrust::make_permutation_iterator( x, thrust::make_transform_iterator(thrust::counting_iterator<cuda::index_t>(0), thrust::placeholders::_1 * inc_x)); THRUST_CALL(thrust::fill, it, it + size, a); } template<> template <typename T> void primitives<Device::CUDA>::copy(const T* x, T* y, dim_t size) { CUDA_CHECK(hipMemcpyAsync(y, x, size * sizeof (T), hipMemcpyDeviceToDevice, cuda::get_cuda_stream())); } template<> template <typename U, typename V> void primitives<Device::CUDA>::convert(const U* x, V* y, dim_t size) { THRUST_CALL(thrust::copy, cuda::device_cast(x), cuda::device_cast(x) + size, cuda::device_cast(y)); } template void primitives<Device::CUDA>::convert(const float*, float16_t*, dim_t); template void primitives<Device::CUDA>::convert(const float16_t*, float*, dim_t); template<> template <typename T> T primitives<Device::CUDA>::sum(const T* array, dim_t size) { return T(THRUST_CALL(thrust::reduce, cuda::device_cast(array), cuda::device_cast(array) + size, cuda::device_type<T>(), cuda::plus<cuda::device_type<T>>())); } template<> template <typename T> dim_t primitives<Device::CUDA>::max_element(const T* array, dim_t size) { const auto* max = THRUST_CALL(thrust::max_element, cuda::device_cast(array), cuda::device_cast(array) + size, cuda::maximum<cuda::device_type<T>>()); return static_cast<dim_t>(max - cuda::device_cast(array)); } template<> template <typename T> T primitives<Device::CUDA>::max(const T* array, dim_t size) { return at(array, max_element(array, size)); } template<> template <typename T> void primitives<Device::CUDA>::add(T a, const T* x, T* y, dim_t size) { using DeviceT = cuda::device_type<T>; cuda::unary_transform(x, y, size, cuda::bind_right<cuda::plus, DeviceT>(DeviceT(a))); } template<> template <typename T> void primitives<Device::CUDA>::add(const T* a, const T* b, T* c, dim_t size) { cuda::binary_transform(a, b, c, size, cuda::plus<cuda::device_type<T>>()); } template<> template <typename T> void primitives<Device::CUDA>::add_batch_broadcast(const T* a, const T* b, T* c, dim_t a_size, dim_t b_size) { cuda::binary_transform(a, b, c, b_size, cuda::plus<cuda::device_type<T>>(), cuda::repeat_vec<cuda::index_t>(a_size)); } template<> template <typename T> void primitives<Device::CUDA>::add_depth_broadcast(const T* a, const T* b, T* c, dim_t a_size, dim_t b_size) { cuda::binary_transform(a, b, c, b_size, cuda::plus<cuda::device_type<T>>(), cuda::repeat_vec_depth<cuda::index_t>(b_size / a_size)); } template<> template <typename T> void primitives<Device::CUDA>::sub(const T* a, const T* b, T* c, dim_t size) { cuda::binary_transform(a, b, c, size, cuda::minus<cuda::device_type<T>>()); } template<> template <typename T> void primitives<Device::CUDA>::min(T a, const T* x, T* y, dim_t size) { using DeviceT = cuda::device_type<T>; cuda::unary_transform(x, y, size, cuda::bind_right<cuda::minimum, DeviceT>(DeviceT(a))); } template<> template <typename T> void primitives<Device::CUDA>::min(const T* a, const T* b, T* c, dim_t size) { cuda::binary_transform(a, b, c, size, cuda::minimum<cuda::device_type<T>>()); } template<> template <typename T> void primitives<Device::CUDA>::max(T a, const T* x, T* y, dim_t size) { using DeviceT = cuda::device_type<T>; cuda::unary_transform(x, y, size, cuda::bind_right<cuda::maximum, DeviceT>(DeviceT(a))); } template<> template <typename T> void primitives<Device::CUDA>::max(const T* a, const T* b, T* c, dim_t size) { cuda::binary_transform(a, b, c, size, cuda::maximum<cuda::device_type<T>>()); } template<> template <typename T> void primitives<Device::CUDA>::mul(T a, const T* x, T* y, dim_t size) { using DeviceT = cuda::device_type<T>; cuda::unary_transform(x, y, size, cuda::bind_right<cuda::multiplies, DeviceT>(DeviceT(a))); } template<> template <typename T> void primitives<Device::CUDA>::mul(const T* a, const T* b, T* c, dim_t size) { cuda::binary_transform(a, b, c, size, cuda::multiplies<cuda::device_type<T>>()); } template<> template <typename T> void primitives<Device::CUDA>::mul_batch_broadcast(const T* a, const T* b, T* c, dim_t a_size, dim_t b_size) { cuda::binary_transform(a, b, c, b_size, cuda::multiplies<cuda::device_type<T>>(), cuda::repeat_vec<cuda::index_t>(a_size)); } template<> template <typename T> void primitives<Device::CUDA>::relu(const T* x, T* y, dim_t size) { cuda::unary_transform(x, y, size, cuda::relu_func<cuda::device_type<T>>()); } template void primitives<Device::CUDA>::relu(const float*, float*, dim_t); template void primitives<Device::CUDA>::relu(const float16_t*, float16_t*, dim_t); template<> template <typename T> void primitives<Device::CUDA>::gelu(const T* x, T* y, dim_t size) { cuda::unary_transform(x, y, size, cuda::gelu_func<cuda::device_type<T>>()); } template void primitives<Device::CUDA>::gelu(const float*, float*, dim_t); template void primitives<Device::CUDA>::gelu(const float16_t*, float16_t*, dim_t); template <typename T> struct perm_indices_2d { T _rows, _cols; perm_indices_2d(T rows, T cols) : _rows(rows) , _cols(cols) { } __host__ __device__ T operator()(const T i) const { const T i0 = i / _rows; const T i1 = i % _rows; return i1 * _cols + i0; } }; template<> template <typename T> void primitives<Device::CUDA>::transpose_2d(const T* a, const dim_t* dims, T* b) { cuda::permute(a, b, dims[0] * dims[1], perm_indices_2d<cuda::index_t>(dims[0], dims[1])); } template <typename T> struct perm_indices_3d { T _a_ps0, _a_ps1, _a_ps2; // Permuted strides of the original array. T _b_d0, _b_d1, _b_d2; // Dimension of the permutated array. T _b_s0, _b_s1, _b_s2; // Strides of the permutated array. perm_indices_3d(const dim_t* dims, const dim_t* perm) { const dim_t a_stride[3] = {dims[1] * dims[2], dims[2], 1}; _a_ps0 = a_stride[perm[0]]; _a_ps1 = a_stride[perm[1]]; _a_ps2 = a_stride[perm[2]]; _b_d0 = dims[perm[0]]; _b_d1 = dims[perm[1]]; _b_d2 = dims[perm[2]]; _b_s0 = _b_d1 * _b_d2; _b_s1 = _b_d2; _b_s2 = 1; } __host__ __device__ T operator()(const T i) const { const T i0 = i / _b_s0; const T i1 = i / _b_s1 % _b_d1; const T i2 = i % _b_d2; return i0 * _a_ps0 + i1 * _a_ps1 + i2 * _a_ps2; } }; template<> template <typename T> void primitives<Device::CUDA>::transpose_3d(const T* a, const dim_t* dims, const dim_t* perm, T* b) { cuda::permute(a, b, dims[0] * dims[1] * dims[2], perm_indices_3d<cuda::index_t>(dims, perm)); } template <typename T> struct perm_indices_4d { T _a_ps0, _a_ps1, _a_ps2, _a_ps3; // Permuted strides of the original array. T _b_d0, _b_d1, _b_d2, _b_d3; // Dimension of the permutated array. T _b_s0, _b_s1, _b_s2, _b_s3; // Strides of the permutated array. perm_indices_4d(const dim_t* dims, const dim_t* perm) { const dim_t a_stride[4] = {dims[1] * dims[2] * dims[3], dims[2] * dims[3], dims[3], 1}; _a_ps0 = a_stride[perm[0]]; _a_ps1 = a_stride[perm[1]]; _a_ps2 = a_stride[perm[2]]; _a_ps3 = a_stride[perm[3]]; _b_d0 = dims[perm[0]]; _b_d1 = dims[perm[1]]; _b_d2 = dims[perm[2]]; _b_d3 = dims[perm[3]]; _b_s0 = _b_d1 * _b_d2 * _b_d3; _b_s1 = _b_d2 * _b_d3; _b_s2 = _b_d3; _b_s3 = 1; } __host__ __device__ T operator()(const T i) const { const T i0 = i / _b_s0; const T i1 = i / _b_s1 % _b_d1; const T i2 = i / _b_s2 % _b_d2; const T i3 = i % _b_d3; return i0 * _a_ps0 + i1 * _a_ps1 + i2 * _a_ps2 + i3 * _a_ps3; } }; template <typename T> __global__ void transpose_0213(const T* in, const cuda::index_t rows, const cuda::index_t cols, const cuda::index_t stride1, const cuda::index_t stride2, T* out) { const cuda::index_t stride = stride1 * stride2; for (cuda::index_t j = blockIdx.x; j < rows; j += gridDim.x) { const cuda::index_t z = j / stride; const cuda::index_t y = (j % stride) / stride1; const cuda::index_t x = (j % stride) % stride1; const cuda::index_t j2 = z * stride + x * stride2 + y; const T* row_in = in + j2 * cols; T* row_out = out + j * cols; for (cuda::index_t i = threadIdx.x; i < cols; i += blockDim.x) { row_out[i] = row_in[i]; } } } template<> template <typename T> void primitives<Device::CUDA>::transpose_4d(const T* a, const dim_t* dims, const dim_t* perm, T* b) { if (perm[0] == 0 && perm[1] == 2 && perm[2] == 1 && perm[3] == 3) { // Optimize the permutation used in multi-head attention. const dim_t rows = dims[0] * dims[1] * dims[2]; const dim_t cols = dims[3]; const dim_t blocks = ::min(rows, cuda::max_blocks); const dim_t threads = ::min(cols, cuda::max_threads); hipLaunchKernelGGL(( transpose_0213), dim3(blocks), dim3(threads), 0, cuda::get_cuda_stream(), a, rows, cols, dims[1], dims[2], b); return; } cuda::permute(a, b, dims[0] * dims[1] * dims[2] * dims[3], perm_indices_4d<cuda::index_t>(dims, perm)); } template<> template<> void primitives<Device::CUDA>::gemm(bool, bool, bool transpose_a, bool transpose_b, dim_t m, dim_t n, dim_t k, float alpha, const float* a, dim_t lda, const float* b, dim_t ldb, float beta, float* c, dim_t ldc, const float*) { // cuBLAS assumes column-major storage, so swap a and b accordingly. CUBLAS_CHECK(hipblasSgemm(cuda::get_cublas_handle(), transpose_b ? HIPBLAS_OP_T : HIPBLAS_OP_N, transpose_a ? HIPBLAS_OP_T : HIPBLAS_OP_N, n, m, k, &alpha, b, ldb, a, lda, &beta, c, ldc)); } template<> template<> void primitives<Device::CUDA>::gemm(bool, bool, bool transpose_a, bool transpose_b, dim_t m, dim_t n, dim_t k, float alpha, const float16_t* a, dim_t lda, const float16_t* b, dim_t ldb, float beta, float16_t* c, dim_t ldc, const float16_t*) { const __half alpha_h = alpha; const __half beta_h = beta; // cuBLAS assumes column-major storage, so swap a and b accordingly. CUBLAS_CHECK(hipblasGemmEx(cuda::get_cublas_handle(), transpose_b ? HIPBLAS_OP_T : HIPBLAS_OP_N, transpose_a ? HIPBLAS_OP_T : HIPBLAS_OP_N, n, m, k, &alpha_h, b, HIP_R_16F, ldb, a, HIP_R_16F, lda, &beta_h, c, HIP_R_16F, ldc, HIP_R_16F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } template<> template<> void primitives<Device::CUDA>::gemm(bool, bool, bool transpose_a, bool transpose_b, dim_t m, dim_t n, dim_t k, float alpha, const int8_t* a, dim_t lda, const int8_t* b, dim_t ldb, float beta, int32_t* c, dim_t ldc, const int32_t*) { int32_t alpha_i = alpha; int32_t beta_i = beta; // cuBLAS assumes column-major storage, so swap a and b accordingly. CUBLAS_CHECK(hipblasGemmEx(cuda::get_cublas_handle(), transpose_b ? HIPBLAS_OP_T : HIPBLAS_OP_N, transpose_a ? HIPBLAS_OP_T : HIPBLAS_OP_N, n, m, k, &alpha_i, b, HIP_R_8I, ldb, a, HIP_R_8I, lda, &beta_i, c, HIP_R_32I, ldc, HIP_R_32I, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } template<> template<> void primitives<Device::CUDA>::gemm_batch_strided(bool transpose_a, bool transpose_b, dim_t m, dim_t n, dim_t k, float alpha, const float* a, dim_t lda, dim_t stridea, const float* b, dim_t ldb, dim_t strideb, float beta, float* c, dim_t ldc, dim_t stridec, dim_t batch_size) { // cuBLAS assumes column-major storage, so swap a and b accordingly. CUBLAS_CHECK(hipblasSgemmStridedBatched(cuda::get_cublas_handle(), transpose_b ? HIPBLAS_OP_T : HIPBLAS_OP_N, transpose_a ? HIPBLAS_OP_T : HIPBLAS_OP_N, n, m, k, &alpha, b, ldb, strideb, a, lda, stridea, &beta, c, ldc, stridec, batch_size)); } template<> template<> void primitives<Device::CUDA>::gemm_batch_strided(bool transpose_a, bool transpose_b, dim_t m, dim_t n, dim_t k, float alpha, const float16_t* a, dim_t lda, dim_t stridea, const float16_t* b, dim_t ldb, dim_t strideb, float beta, float16_t* c, dim_t ldc, dim_t stridec, dim_t batch_size) { const __half alpha_h = alpha; const __half beta_h = beta; // cuBLAS assumes column-major storage, so swap a and b accordingly. CUBLAS_CHECK(hipblasGemmStridedBatchedEx(cuda::get_cublas_handle(), transpose_b ? HIPBLAS_OP_T : HIPBLAS_OP_N, transpose_a ? HIPBLAS_OP_T : HIPBLAS_OP_N, n, m, k, &alpha_h, b, HIP_R_16F, ldb, strideb, a, HIP_R_16F, lda, stridea, &beta_h, c, HIP_R_16F, ldc, stridec, batch_size, HIP_R_16F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } struct exp_func { __host__ __device__ float operator()(float x) { return expf(x); } }; template<> void primitives<Device::CUDA>::exp(const float* x, float* y, dim_t size) { cuda::unary_transform(x, y, size, exp_func()); } struct log_func { __host__ __device__ float operator()(float x) { return logf(x); } }; template<> template<> void primitives<Device::CUDA>::log(const float* x, float* y, dim_t size) { cuda::unary_transform(x, y, size, log_func()); } #if CUDA_CAN_USE_HALF struct hlog_func { __device__ __half operator()(__half x) { return hlog(x); } }; #else struct hlog_func { __host__ __device__ __half operator()(__half x) { return __half(logf(float(x))); } }; #endif template<> template<> void primitives<Device::CUDA>::log(const float16_t* x, float16_t* y, dim_t size) { cuda::unary_transform(x, y, size, hlog_func()); } template<> template <typename T> void cross_device_primitives<Device::CPU, Device::CUDA>::copy(const T* x, T* y, dim_t size) { CUDA_CHECK(hipMemcpyAsync(y, x, size * sizeof (T), hipMemcpyHostToDevice, cuda::get_cuda_stream())); } template<> template <typename T> void cross_device_primitives<Device::CUDA, Device::CPU>::copy(const T* x, T* y, dim_t size) { CUDA_CHECK(hipMemcpyAsync(y, x, size * sizeof (T), hipMemcpyDeviceToHost, cuda::get_cuda_stream())); } #define DECLARE_IMPL(T) \ template T \ primitives<Device::CUDA>::at(const T* x, dim_t index); \ template void \ primitives<Device::CUDA>::fill(T* x, T a, dim_t size); \ template void \ primitives<Device::CUDA>::strided_fill(T* x, T a, dim_t inc_x, dim_t size); \ template void \ primitives<Device::CUDA>::copy<T>(const T* x, T* y, dim_t size); \ template T \ primitives<Device::CUDA>::sum(const T* array, dim_t size); \ template dim_t \ primitives<Device::CUDA>::max_element(const T* array, dim_t size); \ template T \ primitives<Device::CUDA>::max(const T* array, dim_t size); \ template void \ primitives<Device::CUDA>::add(T a, const T* x, T* y, dim_t size); \ template void \ primitives<Device::CUDA>::add(const T* a, const T* b, T* c, dim_t size); \ template void \ primitives<Device::CUDA>::add_batch_broadcast(const T* a, const T* b, \ T* c, dim_t a_size, dim_t b_size); \ template void \ primitives<Device::CUDA>::add_depth_broadcast(const T* a, const T* b, \ T* c, dim_t a_size, dim_t b_size); \ template void \ primitives<Device::CUDA>::sub(const T* a, const T* b, T* c, dim_t size); \ template void \ primitives<Device::CUDA>::min(T a, const T* x, T* y, dim_t size); \ template void \ primitives<Device::CUDA>::min(const T* a, const T* b, T* c, dim_t size); \ template void \ primitives<Device::CUDA>::max(T a, const T* x, T* y, dim_t size); \ template void \ primitives<Device::CUDA>::max(const T* a, const T* b, T* c, dim_t size); \ template void \ primitives<Device::CUDA>::mul(T a, const T* x, T* y, dim_t size); \ template void \ primitives<Device::CUDA>::mul(const T* a, const T* b, T* c, dim_t size); \ template void \ primitives<Device::CUDA>::mul_batch_broadcast(const T* a, const T* b, \ T* c, dim_t a_size, dim_t b_size); \ template void \ primitives<Device::CUDA>::transpose_2d(const T* a, \ const dim_t* dims, \ T* b); \ template void \ primitives<Device::CUDA>::transpose_3d(const T* a, \ const dim_t* dims, \ const dim_t* perm, \ T* b); \ template void \ primitives<Device::CUDA>::transpose_4d(const T* a, \ const dim_t* dims, \ const dim_t* perm, \ T* b); \ template void \ cross_device_primitives<Device::CPU, Device::CUDA>::copy<T>(const T*, T*, dim_t); \ template void \ cross_device_primitives<Device::CUDA, Device::CPU>::copy<T>(const T*, T*, dim_t); DECLARE_ALL_TYPES(DECLARE_IMPL) }
436cca595d0d85fc97840d2390802c49feaf7cd7.cu
#include "ctranslate2/primitives.h" #include <cuda_runtime.h> #include <cublas_v2.h> #include "cuda/helpers.h" #include "type_dispatch.h" namespace ctranslate2 { template<> template <typename T> T primitives<Device::CUDA>::at(const T* x, dim_t index) { T val = T(); cross_device_primitives<Device::CUDA, Device::CPU>::copy(x + index, &val, 1); return val; } template<> template <typename T> void primitives<Device::CUDA>::fill(T* x, T a, dim_t size) { THRUST_CALL(thrust::fill, x, x + size, a); } template<> template <typename T> void primitives<Device::CUDA>::strided_fill(T* x, T a, dim_t inc_x, dim_t size) { auto it = thrust::make_permutation_iterator( x, thrust::make_transform_iterator(thrust::counting_iterator<cuda::index_t>(0), thrust::placeholders::_1 * inc_x)); THRUST_CALL(thrust::fill, it, it + size, a); } template<> template <typename T> void primitives<Device::CUDA>::copy(const T* x, T* y, dim_t size) { CUDA_CHECK(cudaMemcpyAsync(y, x, size * sizeof (T), cudaMemcpyDeviceToDevice, cuda::get_cuda_stream())); } template<> template <typename U, typename V> void primitives<Device::CUDA>::convert(const U* x, V* y, dim_t size) { THRUST_CALL(thrust::copy, cuda::device_cast(x), cuda::device_cast(x) + size, cuda::device_cast(y)); } template void primitives<Device::CUDA>::convert(const float*, float16_t*, dim_t); template void primitives<Device::CUDA>::convert(const float16_t*, float*, dim_t); template<> template <typename T> T primitives<Device::CUDA>::sum(const T* array, dim_t size) { return T(THRUST_CALL(thrust::reduce, cuda::device_cast(array), cuda::device_cast(array) + size, cuda::device_type<T>(), cuda::plus<cuda::device_type<T>>())); } template<> template <typename T> dim_t primitives<Device::CUDA>::max_element(const T* array, dim_t size) { const auto* max = THRUST_CALL(thrust::max_element, cuda::device_cast(array), cuda::device_cast(array) + size, cuda::maximum<cuda::device_type<T>>()); return static_cast<dim_t>(max - cuda::device_cast(array)); } template<> template <typename T> T primitives<Device::CUDA>::max(const T* array, dim_t size) { return at(array, max_element(array, size)); } template<> template <typename T> void primitives<Device::CUDA>::add(T a, const T* x, T* y, dim_t size) { using DeviceT = cuda::device_type<T>; cuda::unary_transform(x, y, size, cuda::bind_right<cuda::plus, DeviceT>(DeviceT(a))); } template<> template <typename T> void primitives<Device::CUDA>::add(const T* a, const T* b, T* c, dim_t size) { cuda::binary_transform(a, b, c, size, cuda::plus<cuda::device_type<T>>()); } template<> template <typename T> void primitives<Device::CUDA>::add_batch_broadcast(const T* a, const T* b, T* c, dim_t a_size, dim_t b_size) { cuda::binary_transform(a, b, c, b_size, cuda::plus<cuda::device_type<T>>(), cuda::repeat_vec<cuda::index_t>(a_size)); } template<> template <typename T> void primitives<Device::CUDA>::add_depth_broadcast(const T* a, const T* b, T* c, dim_t a_size, dim_t b_size) { cuda::binary_transform(a, b, c, b_size, cuda::plus<cuda::device_type<T>>(), cuda::repeat_vec_depth<cuda::index_t>(b_size / a_size)); } template<> template <typename T> void primitives<Device::CUDA>::sub(const T* a, const T* b, T* c, dim_t size) { cuda::binary_transform(a, b, c, size, cuda::minus<cuda::device_type<T>>()); } template<> template <typename T> void primitives<Device::CUDA>::min(T a, const T* x, T* y, dim_t size) { using DeviceT = cuda::device_type<T>; cuda::unary_transform(x, y, size, cuda::bind_right<cuda::minimum, DeviceT>(DeviceT(a))); } template<> template <typename T> void primitives<Device::CUDA>::min(const T* a, const T* b, T* c, dim_t size) { cuda::binary_transform(a, b, c, size, cuda::minimum<cuda::device_type<T>>()); } template<> template <typename T> void primitives<Device::CUDA>::max(T a, const T* x, T* y, dim_t size) { using DeviceT = cuda::device_type<T>; cuda::unary_transform(x, y, size, cuda::bind_right<cuda::maximum, DeviceT>(DeviceT(a))); } template<> template <typename T> void primitives<Device::CUDA>::max(const T* a, const T* b, T* c, dim_t size) { cuda::binary_transform(a, b, c, size, cuda::maximum<cuda::device_type<T>>()); } template<> template <typename T> void primitives<Device::CUDA>::mul(T a, const T* x, T* y, dim_t size) { using DeviceT = cuda::device_type<T>; cuda::unary_transform(x, y, size, cuda::bind_right<cuda::multiplies, DeviceT>(DeviceT(a))); } template<> template <typename T> void primitives<Device::CUDA>::mul(const T* a, const T* b, T* c, dim_t size) { cuda::binary_transform(a, b, c, size, cuda::multiplies<cuda::device_type<T>>()); } template<> template <typename T> void primitives<Device::CUDA>::mul_batch_broadcast(const T* a, const T* b, T* c, dim_t a_size, dim_t b_size) { cuda::binary_transform(a, b, c, b_size, cuda::multiplies<cuda::device_type<T>>(), cuda::repeat_vec<cuda::index_t>(a_size)); } template<> template <typename T> void primitives<Device::CUDA>::relu(const T* x, T* y, dim_t size) { cuda::unary_transform(x, y, size, cuda::relu_func<cuda::device_type<T>>()); } template void primitives<Device::CUDA>::relu(const float*, float*, dim_t); template void primitives<Device::CUDA>::relu(const float16_t*, float16_t*, dim_t); template<> template <typename T> void primitives<Device::CUDA>::gelu(const T* x, T* y, dim_t size) { cuda::unary_transform(x, y, size, cuda::gelu_func<cuda::device_type<T>>()); } template void primitives<Device::CUDA>::gelu(const float*, float*, dim_t); template void primitives<Device::CUDA>::gelu(const float16_t*, float16_t*, dim_t); template <typename T> struct perm_indices_2d { T _rows, _cols; perm_indices_2d(T rows, T cols) : _rows(rows) , _cols(cols) { } __host__ __device__ T operator()(const T i) const { const T i0 = i / _rows; const T i1 = i % _rows; return i1 * _cols + i0; } }; template<> template <typename T> void primitives<Device::CUDA>::transpose_2d(const T* a, const dim_t* dims, T* b) { cuda::permute(a, b, dims[0] * dims[1], perm_indices_2d<cuda::index_t>(dims[0], dims[1])); } template <typename T> struct perm_indices_3d { T _a_ps0, _a_ps1, _a_ps2; // Permuted strides of the original array. T _b_d0, _b_d1, _b_d2; // Dimension of the permutated array. T _b_s0, _b_s1, _b_s2; // Strides of the permutated array. perm_indices_3d(const dim_t* dims, const dim_t* perm) { const dim_t a_stride[3] = {dims[1] * dims[2], dims[2], 1}; _a_ps0 = a_stride[perm[0]]; _a_ps1 = a_stride[perm[1]]; _a_ps2 = a_stride[perm[2]]; _b_d0 = dims[perm[0]]; _b_d1 = dims[perm[1]]; _b_d2 = dims[perm[2]]; _b_s0 = _b_d1 * _b_d2; _b_s1 = _b_d2; _b_s2 = 1; } __host__ __device__ T operator()(const T i) const { const T i0 = i / _b_s0; const T i1 = i / _b_s1 % _b_d1; const T i2 = i % _b_d2; return i0 * _a_ps0 + i1 * _a_ps1 + i2 * _a_ps2; } }; template<> template <typename T> void primitives<Device::CUDA>::transpose_3d(const T* a, const dim_t* dims, const dim_t* perm, T* b) { cuda::permute(a, b, dims[0] * dims[1] * dims[2], perm_indices_3d<cuda::index_t>(dims, perm)); } template <typename T> struct perm_indices_4d { T _a_ps0, _a_ps1, _a_ps2, _a_ps3; // Permuted strides of the original array. T _b_d0, _b_d1, _b_d2, _b_d3; // Dimension of the permutated array. T _b_s0, _b_s1, _b_s2, _b_s3; // Strides of the permutated array. perm_indices_4d(const dim_t* dims, const dim_t* perm) { const dim_t a_stride[4] = {dims[1] * dims[2] * dims[3], dims[2] * dims[3], dims[3], 1}; _a_ps0 = a_stride[perm[0]]; _a_ps1 = a_stride[perm[1]]; _a_ps2 = a_stride[perm[2]]; _a_ps3 = a_stride[perm[3]]; _b_d0 = dims[perm[0]]; _b_d1 = dims[perm[1]]; _b_d2 = dims[perm[2]]; _b_d3 = dims[perm[3]]; _b_s0 = _b_d1 * _b_d2 * _b_d3; _b_s1 = _b_d2 * _b_d3; _b_s2 = _b_d3; _b_s3 = 1; } __host__ __device__ T operator()(const T i) const { const T i0 = i / _b_s0; const T i1 = i / _b_s1 % _b_d1; const T i2 = i / _b_s2 % _b_d2; const T i3 = i % _b_d3; return i0 * _a_ps0 + i1 * _a_ps1 + i2 * _a_ps2 + i3 * _a_ps3; } }; template <typename T> __global__ void transpose_0213(const T* in, const cuda::index_t rows, const cuda::index_t cols, const cuda::index_t stride1, const cuda::index_t stride2, T* out) { const cuda::index_t stride = stride1 * stride2; for (cuda::index_t j = blockIdx.x; j < rows; j += gridDim.x) { const cuda::index_t z = j / stride; const cuda::index_t y = (j % stride) / stride1; const cuda::index_t x = (j % stride) % stride1; const cuda::index_t j2 = z * stride + x * stride2 + y; const T* row_in = in + j2 * cols; T* row_out = out + j * cols; for (cuda::index_t i = threadIdx.x; i < cols; i += blockDim.x) { row_out[i] = row_in[i]; } } } template<> template <typename T> void primitives<Device::CUDA>::transpose_4d(const T* a, const dim_t* dims, const dim_t* perm, T* b) { if (perm[0] == 0 && perm[1] == 2 && perm[2] == 1 && perm[3] == 3) { // Optimize the permutation used in multi-head attention. const dim_t rows = dims[0] * dims[1] * dims[2]; const dim_t cols = dims[3]; const dim_t blocks = std::min(rows, cuda::max_blocks); const dim_t threads = std::min(cols, cuda::max_threads); transpose_0213<<<blocks, threads, 0, cuda::get_cuda_stream()>>>(a, rows, cols, dims[1], dims[2], b); return; } cuda::permute(a, b, dims[0] * dims[1] * dims[2] * dims[3], perm_indices_4d<cuda::index_t>(dims, perm)); } template<> template<> void primitives<Device::CUDA>::gemm(bool, bool, bool transpose_a, bool transpose_b, dim_t m, dim_t n, dim_t k, float alpha, const float* a, dim_t lda, const float* b, dim_t ldb, float beta, float* c, dim_t ldc, const float*) { // cuBLAS assumes column-major storage, so swap a and b accordingly. CUBLAS_CHECK(cublasSgemm(cuda::get_cublas_handle(), transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N, transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N, n, m, k, &alpha, b, ldb, a, lda, &beta, c, ldc)); } template<> template<> void primitives<Device::CUDA>::gemm(bool, bool, bool transpose_a, bool transpose_b, dim_t m, dim_t n, dim_t k, float alpha, const float16_t* a, dim_t lda, const float16_t* b, dim_t ldb, float beta, float16_t* c, dim_t ldc, const float16_t*) { const __half alpha_h = alpha; const __half beta_h = beta; // cuBLAS assumes column-major storage, so swap a and b accordingly. CUBLAS_CHECK(cublasGemmEx(cuda::get_cublas_handle(), transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N, transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N, n, m, k, &alpha_h, b, CUDA_R_16F, ldb, a, CUDA_R_16F, lda, &beta_h, c, CUDA_R_16F, ldc, CUDA_R_16F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } template<> template<> void primitives<Device::CUDA>::gemm(bool, bool, bool transpose_a, bool transpose_b, dim_t m, dim_t n, dim_t k, float alpha, const int8_t* a, dim_t lda, const int8_t* b, dim_t ldb, float beta, int32_t* c, dim_t ldc, const int32_t*) { int32_t alpha_i = alpha; int32_t beta_i = beta; // cuBLAS assumes column-major storage, so swap a and b accordingly. CUBLAS_CHECK(cublasGemmEx(cuda::get_cublas_handle(), transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N, transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N, n, m, k, &alpha_i, b, CUDA_R_8I, ldb, a, CUDA_R_8I, lda, &beta_i, c, CUDA_R_32I, ldc, CUDA_R_32I, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } template<> template<> void primitives<Device::CUDA>::gemm_batch_strided(bool transpose_a, bool transpose_b, dim_t m, dim_t n, dim_t k, float alpha, const float* a, dim_t lda, dim_t stridea, const float* b, dim_t ldb, dim_t strideb, float beta, float* c, dim_t ldc, dim_t stridec, dim_t batch_size) { // cuBLAS assumes column-major storage, so swap a and b accordingly. CUBLAS_CHECK(cublasSgemmStridedBatched(cuda::get_cublas_handle(), transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N, transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N, n, m, k, &alpha, b, ldb, strideb, a, lda, stridea, &beta, c, ldc, stridec, batch_size)); } template<> template<> void primitives<Device::CUDA>::gemm_batch_strided(bool transpose_a, bool transpose_b, dim_t m, dim_t n, dim_t k, float alpha, const float16_t* a, dim_t lda, dim_t stridea, const float16_t* b, dim_t ldb, dim_t strideb, float beta, float16_t* c, dim_t ldc, dim_t stridec, dim_t batch_size) { const __half alpha_h = alpha; const __half beta_h = beta; // cuBLAS assumes column-major storage, so swap a and b accordingly. CUBLAS_CHECK(cublasGemmStridedBatchedEx(cuda::get_cublas_handle(), transpose_b ? CUBLAS_OP_T : CUBLAS_OP_N, transpose_a ? CUBLAS_OP_T : CUBLAS_OP_N, n, m, k, &alpha_h, b, CUDA_R_16F, ldb, strideb, a, CUDA_R_16F, lda, stridea, &beta_h, c, CUDA_R_16F, ldc, stridec, batch_size, CUDA_R_16F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } struct exp_func { __host__ __device__ float operator()(float x) { return expf(x); } }; template<> void primitives<Device::CUDA>::exp(const float* x, float* y, dim_t size) { cuda::unary_transform(x, y, size, exp_func()); } struct log_func { __host__ __device__ float operator()(float x) { return logf(x); } }; template<> template<> void primitives<Device::CUDA>::log(const float* x, float* y, dim_t size) { cuda::unary_transform(x, y, size, log_func()); } #if CUDA_CAN_USE_HALF struct hlog_func { __device__ __half operator()(__half x) { return hlog(x); } }; #else struct hlog_func { __host__ __device__ __half operator()(__half x) { return __half(logf(float(x))); } }; #endif template<> template<> void primitives<Device::CUDA>::log(const float16_t* x, float16_t* y, dim_t size) { cuda::unary_transform(x, y, size, hlog_func()); } template<> template <typename T> void cross_device_primitives<Device::CPU, Device::CUDA>::copy(const T* x, T* y, dim_t size) { CUDA_CHECK(cudaMemcpyAsync(y, x, size * sizeof (T), cudaMemcpyHostToDevice, cuda::get_cuda_stream())); } template<> template <typename T> void cross_device_primitives<Device::CUDA, Device::CPU>::copy(const T* x, T* y, dim_t size) { CUDA_CHECK(cudaMemcpyAsync(y, x, size * sizeof (T), cudaMemcpyDeviceToHost, cuda::get_cuda_stream())); } #define DECLARE_IMPL(T) \ template T \ primitives<Device::CUDA>::at(const T* x, dim_t index); \ template void \ primitives<Device::CUDA>::fill(T* x, T a, dim_t size); \ template void \ primitives<Device::CUDA>::strided_fill(T* x, T a, dim_t inc_x, dim_t size); \ template void \ primitives<Device::CUDA>::copy<T>(const T* x, T* y, dim_t size); \ template T \ primitives<Device::CUDA>::sum(const T* array, dim_t size); \ template dim_t \ primitives<Device::CUDA>::max_element(const T* array, dim_t size); \ template T \ primitives<Device::CUDA>::max(const T* array, dim_t size); \ template void \ primitives<Device::CUDA>::add(T a, const T* x, T* y, dim_t size); \ template void \ primitives<Device::CUDA>::add(const T* a, const T* b, T* c, dim_t size); \ template void \ primitives<Device::CUDA>::add_batch_broadcast(const T* a, const T* b, \ T* c, dim_t a_size, dim_t b_size); \ template void \ primitives<Device::CUDA>::add_depth_broadcast(const T* a, const T* b, \ T* c, dim_t a_size, dim_t b_size); \ template void \ primitives<Device::CUDA>::sub(const T* a, const T* b, T* c, dim_t size); \ template void \ primitives<Device::CUDA>::min(T a, const T* x, T* y, dim_t size); \ template void \ primitives<Device::CUDA>::min(const T* a, const T* b, T* c, dim_t size); \ template void \ primitives<Device::CUDA>::max(T a, const T* x, T* y, dim_t size); \ template void \ primitives<Device::CUDA>::max(const T* a, const T* b, T* c, dim_t size); \ template void \ primitives<Device::CUDA>::mul(T a, const T* x, T* y, dim_t size); \ template void \ primitives<Device::CUDA>::mul(const T* a, const T* b, T* c, dim_t size); \ template void \ primitives<Device::CUDA>::mul_batch_broadcast(const T* a, const T* b, \ T* c, dim_t a_size, dim_t b_size); \ template void \ primitives<Device::CUDA>::transpose_2d(const T* a, \ const dim_t* dims, \ T* b); \ template void \ primitives<Device::CUDA>::transpose_3d(const T* a, \ const dim_t* dims, \ const dim_t* perm, \ T* b); \ template void \ primitives<Device::CUDA>::transpose_4d(const T* a, \ const dim_t* dims, \ const dim_t* perm, \ T* b); \ template void \ cross_device_primitives<Device::CPU, Device::CUDA>::copy<T>(const T*, T*, dim_t); \ template void \ cross_device_primitives<Device::CUDA, Device::CPU>::copy<T>(const T*, T*, dim_t); DECLARE_ALL_TYPES(DECLARE_IMPL) }
82589c405d6f610218c427900986ac836073ed3a.hip
// !!! This is a file automatically generated by hipify!!! #include "reduce_hip.cuh" #include "real.h" #include "assert.h" #include <iostream> int main(){ real summands[1024]; for (int i=0; i!=1024; ++i) summands[i]=1; for (int j=0; j!=1000; ++j) reducev2(summands,1024); }
82589c405d6f610218c427900986ac836073ed3a.cu
#include "reduce.cuh" #include "real.h" #include "assert.h" #include <iostream> int main(){ real summands[1024]; for (int i=0; i!=1024; ++i) summands[i]=1; for (int j=0; j!=1000; ++j) reducev2(summands,1024); }
6b9b834bb16a799324cba5e9f0dbf480fe23527b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * ****************************************************************************** * * * * * * This program and the accompanying materials are made available under the * * terms of the Apache License, Version 2.0 which is available at * * https://www.apache.org/licenses/LICENSE-2.0. * * * * See the NOTICE file distributed with this work for additional * * information regarding copyright ownership. * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * * License for the specific language governing permissions and limitations * * under the License. * * * * SPDX-License-Identifier: Apache-2.0 * ***************************************************************************** */ // // @author Yurii Shyrma ([email protected]) // #include <ops/declarable/helpers/convolutions.h> #include <helpers/PointersManager.h> #include <math/templatemath.h> namespace sd { namespace ops { ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void pooling3dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { // x input is [bS, iC, iD, iH, iW] // z output is [bS, iC, oD, oH, oW] const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank, kDeff, kHeff, kWeff, iD, iH, iW, kProd; __shared__ Nd4jLong zLen, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); zLen = shape::length(zShapeInfo); rank = 5; kDeff = kD + (kD - 1) * (dD - 1); kHeff = kH + (kH - 1) * (dH - 1); kWeff = kW + (kW - 1) * (dW - 1); iD = xShapeInfo[3]; iH = xShapeInfo[4]; iW = xShapeInfo[5]; kProd = kD * kH * kW; } __syncthreads(); const auto zInd = threadIdx.x + blockIdx.x * blockDim.x; if(zInd >= zLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(zInd, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); int dstart = coords[2] * sD - pD; int hstart = coords[3] * sH - pH; int wstart = coords[4] * sW - pW; int dend = dstart + kDeff; int hend = hstart + kHeff; int wend = wstart + kWeff; if(dstart < 0) dstart += dD * ((-dstart + dD - 1) / dD); if(hstart < 0) hstart += dH * ((-hstart + dH - 1) / dH); if(wstart < 0) wstart += dW * ((-wstart + dW - 1) / dW); if(dend > iD) dend -= dD * ((dend - iD + dD - 1) / dD); if(hend > iH) hend -= dH * ((hend - iH + dH - 1) / dH); if(wend > iW) wend -= dW * ((wend - iW + dW - 1) / dW); switch (poolingMode) { /*** max ***/ case 0: { T max = -DataTypeUtils::max<T>(); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) { for (coords[3] = hstart; coords[3] < hend; coords[3] += dH){ for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) { T val = x[shape::getOffset(xShapeInfo, coords)]; if (val > max) max = val; } } } z[zOffset] = max; } break; /*** avg ***/ case 1: { T sum = static_cast<T>(0.); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) sum += x[shape::getOffset(xShapeInfo, coords)]; if (extraParam0 == 0) { //Exclude padding uint a = (dend - dstart) / dD + ((dend - dstart) % dD == 0 ? 0 : 1); uint b = (hend - hstart) / dH + ((hend - hstart) % dH == 0 ? 0 : 1); uint c = (wend - wstart) / dW + ((wend - wstart) % dW == 0 ? 0 : 1); sum /= static_cast<T>(a * b * c); // /= sd::math::nd4j_ceil<double,T>(static_cast<double>(dend - dstart) / static_cast<double>(dD)) * sd::math::nd4j_ceil<double,T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * sd::math::nd4j_ceil<double,T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); //Accounts for dilation } else if (extraParam0 == 1) //Include padding sum /= kProd; z[zOffset] = sum; } break; /*** pnorm ***/ case 2: { T sum = static_cast<T>(0.); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) sum += sd::math::nd4j_pow<T,T,T>(sd::math::nd4j_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0); sum = sd::math::nd4j_pow<T,T,T>(sum, (T) 1.f / extraParam0); z[zOffset] = sum; } break; } } ////////////////////////////////////////////////////////////////////////// template <typename T> static void pooling3dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { hipLaunchKernelGGL(( pooling3dCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::pooling3d(sd::graph::Context& block, const NDArray& input, NDArray& output, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { PointersManager manager(block.launchContext(), "pooling3d"); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), pooling3dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0), FLOAT_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } } }
6b9b834bb16a799324cba5e9f0dbf480fe23527b.cu
/* * ****************************************************************************** * * * * * * This program and the accompanying materials are made available under the * * terms of the Apache License, Version 2.0 which is available at * * https://www.apache.org/licenses/LICENSE-2.0. * * * * See the NOTICE file distributed with this work for additional * * information regarding copyright ownership. * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * * License for the specific language governing permissions and limitations * * under the License. * * * * SPDX-License-Identifier: Apache-2.0 * ***************************************************************************** */ // // @author Yurii Shyrma ([email protected]) // #include <ops/declarable/helpers/convolutions.h> #include <helpers/PointersManager.h> #include <math/templatemath.h> namespace sd { namespace ops { ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void pooling3dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { // x input is [bS, iC, iD, iH, iW] // z output is [bS, iC, oD, oH, oW] const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank, kDeff, kHeff, kWeff, iD, iH, iW, kProd; __shared__ Nd4jLong zLen, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); zLen = shape::length(zShapeInfo); rank = 5; kDeff = kD + (kD - 1) * (dD - 1); kHeff = kH + (kH - 1) * (dH - 1); kWeff = kW + (kW - 1) * (dW - 1); iD = xShapeInfo[3]; iH = xShapeInfo[4]; iW = xShapeInfo[5]; kProd = kD * kH * kW; } __syncthreads(); const auto zInd = threadIdx.x + blockIdx.x * blockDim.x; if(zInd >= zLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(zInd, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); int dstart = coords[2] * sD - pD; int hstart = coords[3] * sH - pH; int wstart = coords[4] * sW - pW; int dend = dstart + kDeff; int hend = hstart + kHeff; int wend = wstart + kWeff; if(dstart < 0) dstart += dD * ((-dstart + dD - 1) / dD); if(hstart < 0) hstart += dH * ((-hstart + dH - 1) / dH); if(wstart < 0) wstart += dW * ((-wstart + dW - 1) / dW); if(dend > iD) dend -= dD * ((dend - iD + dD - 1) / dD); if(hend > iH) hend -= dH * ((hend - iH + dH - 1) / dH); if(wend > iW) wend -= dW * ((wend - iW + dW - 1) / dW); switch (poolingMode) { /*** max ***/ case 0: { T max = -DataTypeUtils::max<T>(); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) { for (coords[3] = hstart; coords[3] < hend; coords[3] += dH){ for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) { T val = x[shape::getOffset(xShapeInfo, coords)]; if (val > max) max = val; } } } z[zOffset] = max; } break; /*** avg ***/ case 1: { T sum = static_cast<T>(0.); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) sum += x[shape::getOffset(xShapeInfo, coords)]; if (extraParam0 == 0) { //Exclude padding uint a = (dend - dstart) / dD + ((dend - dstart) % dD == 0 ? 0 : 1); uint b = (hend - hstart) / dH + ((hend - hstart) % dH == 0 ? 0 : 1); uint c = (wend - wstart) / dW + ((wend - wstart) % dW == 0 ? 0 : 1); sum /= static_cast<T>(a * b * c); // /= sd::math::nd4j_ceil<double,T>(static_cast<double>(dend - dstart) / static_cast<double>(dD)) * sd::math::nd4j_ceil<double,T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * sd::math::nd4j_ceil<double,T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); //Accounts for dilation } else if (extraParam0 == 1) //Include padding sum /= kProd; z[zOffset] = sum; } break; /*** pnorm ***/ case 2: { T sum = static_cast<T>(0.); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) sum += sd::math::nd4j_pow<T,T,T>(sd::math::nd4j_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0); sum = sd::math::nd4j_pow<T,T,T>(sum, (T) 1.f / extraParam0); z[zOffset] = sum; } break; } } ////////////////////////////////////////////////////////////////////////// template <typename T> static void pooling3dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { pooling3dCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0); } ////////////////////////////////////////////////////////////////////////// void ConvolutionUtils::pooling3d(sd::graph::Context& block, const NDArray& input, NDArray& output, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { PointersManager manager(block.launchContext(), "pooling3d"); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), pooling3dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0), FLOAT_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } } }
7bd2e3421b47675db20d69f3731937b07d955350.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "activation_func.h" // Compute Sigmoid function __global__ void Sigmoid(const int n, const float *Z, float *A) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) A[i] = 1.0f / (1.0f + expf(-Z[i])); } // Compute Rectified-Linear-Unit function __global__ void Relu(const int n, const float *Z, float *A) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) A[i] = fmaxf(Z[i], 0.0f); } // Compute derivative of Sigmoid function // For use in Backprop, dA is multiplied to get dZ __global__ void SigmoidBackward(const int n, const float *dA, const float *A, float *dZ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) dZ[i] = dA[i] * A[i] * (1.0f - A[i]); } // Compute derivative of Relu function // For use in Backprop, dA is multiplied to get dZ __global__ void ReluBackward(const int n, const float *dA, const float *Z, float *dZ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { if (Z[i] > 0.0f) dZ[i] = dA[i]; else dZ[i] = 0.0f; } }
7bd2e3421b47675db20d69f3731937b07d955350.cu
#include "activation_func.h" // Compute Sigmoid function __global__ void Sigmoid(const int n, const float *Z, float *A) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) A[i] = 1.0f / (1.0f + expf(-Z[i])); } // Compute Rectified-Linear-Unit function __global__ void Relu(const int n, const float *Z, float *A) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) A[i] = fmaxf(Z[i], 0.0f); } // Compute derivative of Sigmoid function // For use in Backprop, dA is multiplied to get dZ __global__ void SigmoidBackward(const int n, const float *dA, const float *A, float *dZ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) dZ[i] = dA[i] * A[i] * (1.0f - A[i]); } // Compute derivative of Relu function // For use in Backprop, dA is multiplied to get dZ __global__ void ReluBackward(const int n, const float *dA, const float *Z, float *dZ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { if (Z[i] > 0.0f) dZ[i] = dA[i]; else dZ[i] = 0.0f; } }
bd81cc95d24cde1357a54b1eb0d941995bbde645.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @generated s Wed Nov 14 22:53:51 2012 */ #include "common_magma.h" #define PRECISION_s /*The version for fermi can be found in ssymv_fermi.cu */ #if (GPUSHMEM < 200) #define magmablas_ssymv_130 magmablas_ssymv #define ssymv_bs 64 #define thread_x 64 #define thread_y 4 #define bank_shift 33 #define quarter_thread_x 16 #define half_thread_x 32 /******************************************************************************* * Functions for each specific cases - Lower case */ __global__ void magmablas_ssymv_130_L_special( magma_int_t n, float alpha, const float *A, magma_int_t lda, const float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *WC) { magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; magma_int_t blkc = blockIdx.x ; float res = MAGMA_S_ZERO; float res_ = MAGMA_S_ZERO; float res1 = MAGMA_S_ZERO; __shared__ float la [quarter_thread_x][thread_x+1]; /* Why +3? */ __shared__ float buff [thread_x]; __shared__ float buff2[thread_x]; float tr[4]; float b[8]; magma_int_t break_d = thread_x * blkc; const magma_int_t td = (thread_x * ty ) + tx; magma_int_t tx_ = td % half_thread_x; magma_int_t ty_ = td / half_thread_x; WC += break_d + tx; x += (break_d + tx ) * incx; A += break_d * (lda+1); A += ty_* lda + tx_ ; if( ty == 0 ){ buff[tx] = x[0]; } // obtain the vector x store in buff; tx = tx_ ; ty = ty_ ; #pragma unroll for(magma_int_t j =0; j<half_thread_x; j +=8) la[0][ bank_shift * (ty_+j) + tx_] = A[ j * lda]; __syncthreads(); #pragma unroll for(magma_int_t i=ty_*4; i<(ty_ * 4 + 4) ; i++){ if ( i < tx_ ) { la[0][bank_shift * tx_ + i] = la[0][ i * bank_shift + tx_]; } else la[0][bank_shift * tx_ + i] = la[0][ bank_shift * tx_ + i]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res+= la[0][bank_shift * tx_ + j + ty_ * 4] * buff[j + ty_ * 4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_== 0 ) res1 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; else { MAGMA_S_SET2REAL(res1,0); } __syncthreads(); MAGMA_S_SET2REAL(res, 0) ; A+= half_thread_x + half_thread_x *lda ; #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8) la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; __syncthreads(); #pragma unroll for(magma_int_t i=ty_*4; i<(4+ty_*4) ; i++){ if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res+= la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); float res2; MAGMA_S_SET2REAL(res2,0); if( ty_== 1 ) res2 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; else { MAGMA_S_SET2REAL(res2,0); } __syncthreads(); MAGMA_S_SET2REAL(res,0); A-=half_thread_x *lda ; MAGMA_S_SET2REAL(res_,0); #pragma unroll for(magma_int_t j=0; j<half_thread_x; j+=8) tr[j/8] = A[ j * lda]; #pragma unroll for(magma_int_t j=0; j < 4 ; j++){ res += tr[j] * buff[ j*8 + ty_]; la[0][bank_shift*(ty_+j*8)+tx_] = tr[j]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res_+= la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_ == 1 ) res2 = res2 + la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; else { MAGMA_S_SET2REAL(res2,0); } __syncthreads(); la[0][bank_shift*tx_+ty_]= res_ ; __syncthreads(); if( ty_ == 0 ) { res1 = res1 + la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { MAGMA_S_SET2REAL(res1,0); } A-=half_thread_x; __syncthreads(); tx = threadIdx.x ; ty = threadIdx.y ; if( ty_ == 0 && ty == 0 ) res = res1 ; else if( ty_ == 1 && ty == 0 ) res = res2 ; else { MAGMA_S_SET2REAL(res,0); } A-=ty_* lda ; A-=tx_; A= A - lda * blkc * thread_x; x= x - blkc * thread_x *incx ; A+=4 * ty* lda ; A+=tx; magma_int_t wc_c = 0 ; magma_int_t count = 0 ; tx_ = td % quarter_thread_x ; ty_ = td / quarter_thread_x ; WC-=tx ; WC+=tx_; #pragma unroll for(magma_int_t j=0;j<4;j++) { b[j] = buff[ty_*4+j]; } #pragma unroll for(magma_int_t i=0; i<thread_x*blkc; i += thread_x ) { res_ = MAGMA_S_ZERO; count++; if(ty == 0) buff2[tx] = x[i*incx]; __syncthreads(); #pragma unroll for( magma_int_t k=0;k<4;k++) { #pragma unroll for(magma_int_t j=0; j < 4 ; j++) tr[j] = A[j*lda]; #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += tr[j] * buff2[ quarter_thread_x * k + ty * 4 + j]; la[( j + ty * 4)][tx] = tr[j]; } __syncthreads(); MAGMA_S_SET2REAL(res_,0); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res_+=la[tx_][ty_*4+j] * b[j]; } b[4+k] = res_ ; __syncthreads(); A += lda * quarter_thread_x ; } #pragma unroll for(magma_int_t k=0; k < 4 ; k++){ la[tx_][ty_+quarter_thread_x*k]= b[4+k] ; } __syncthreads(); if( ty_ < 4 ) { magma_int_t k = ty_*quarter_thread_x; res_ = la[tx_][0+k] + la[tx_][1+k] + la[tx_][2+k] + la[tx_][3+k] + la[tx_][4+k] + la[tx_][5+k] + la[tx_][6+k] + la[tx_][7+k] + la[tx_][8+k] + la[tx_][9+k] + la[tx_][10+k]+ la[tx_][11+k] + la[tx_][12+k]+ la[tx_][13+k] + la[tx_][14+k]+ la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } WC+=tx ; WC-=tx_; la[ty][tx]= res ; __syncthreads(); if( ty == 0 ) { res = la[0][tx]+ la[1][tx] + la[2][tx]+ la[3][tx]; WC[0+lda*(blkc) ] = res; } } /************************************************************** * Lower case for generic sizes */ __global__ void magmablas_ssymv_130_L_generic(magma_int_t n, float alpha, const float *A, magma_int_t lda, const float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *WC, magma_int_t m_mod_thread_x) { magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; magma_int_t blkc = blockIdx.x ; float res = MAGMA_S_ZERO; float res_ = MAGMA_S_ZERO; float res1 = MAGMA_S_ZERO; __shared__ float la [quarter_thread_x][thread_x+3]; __shared__ float buff [thread_x]; __shared__ float buff2[thread_x]; float tr[4]; float b[8]; magma_int_t break_d = thread_x * blkc; const magma_int_t td = (thread_x * ty ) + tx; magma_int_t tx_ = td % half_thread_x; magma_int_t ty_ = td / half_thread_x; WC+= break_d + tx; x += (break_d + tx ) * incx; A += break_d * (lda+1); A += lda * ty_; magma_int_t trackA ; if( blkc == ( gridDim.x - 1 ) ) { if( ty == 0 ){ if( tx > m_mod_thread_x ) { MAGMA_S_SET2REAL(buff[tx],0); } else buff[tx] = x[0]; } if ( tx_ > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx_; A += trackA ; } else { if( ty == 0 ){ buff[tx] = x[0]; } trackA = tx_; A += trackA ; } // Somehow merging these two if - else creates problem // It could be a potential bug -- from synchronization or from cuda or compiler if( blkc == ( gridDim.x - 1 ) ) { #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8){ if( ( ty_ + j ) > m_mod_thread_x ) { MAGMA_S_SET2REAL(la[0][bank_shift*(ty_+j)+tx_], 9999); } else la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } A-=trackA; } else { #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8){ la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } } tx = tx_ ; ty = ty_ ; __syncthreads(); #pragma unroll for(magma_int_t i=ty_*4; i<(ty_*4+4) ; i++){ if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][i*bank_shift+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res += la[0][bank_shift*tx_+j+ty_*4] * buff[j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_== 0 ) res1 = la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; else { MAGMA_S_SET2REAL(res1,0); } __syncthreads(); MAGMA_S_SET2REAL(res,0); if( blkc == ( gridDim.x - 1 ) ) { if ( (tx_+half_thread_x) > m_mod_thread_x ) trackA = m_mod_thread_x; else trackA = tx_ + half_thread_x; A+= trackA+half_thread_x*lda ; #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8){ if( ( ty_ + j+half_thread_x ) > m_mod_thread_x ) { MAGMA_S_SET2REAL(la[0][bank_shift*(ty_+j)+tx_], 99999); } else la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } A-= trackA+half_thread_x*lda ; A+=tx_ ; A+= half_thread_x + half_thread_x *lda ; } else { A+= half_thread_x + half_thread_x *lda ; #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8){ la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(magma_int_t i=ty_*4; i<(4+ty_*4) ; i++){ if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res+= la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); float res2; MAGMA_S_SET2REAL(res2,0); if( ty_== 1 ) res2 = la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; else { MAGMA_S_SET2REAL(res2,0); } __syncthreads(); MAGMA_S_SET2REAL(res,0); MAGMA_S_SET2REAL(res_,0); A-=half_thread_x *lda ; if( blkc == ( gridDim.x - 1 ) ) { A-=tx_; if ( tx_ > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx_; A+= trackA ; #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8) if( ( ty_ + j ) > m_mod_thread_x ) { MAGMA_S_SET2REAL(tr[j/8], 99999); } else tr[j/8] = A[ j * lda]; A-=trackA; A+=tx_; } else { #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8) tr[j/8] = A[ j * lda]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++){ res+= tr[j] * buff[ j*8 + ty_]; la[0][bank_shift*(ty_+j*8)+tx_] = tr[j]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res_+= la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_ == 1 ) res2 = res2 + la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; else { MAGMA_S_SET2REAL(res2,0); } __syncthreads(); la[0][bank_shift*tx_+ty_]= res_ ; __syncthreads(); if( ty_ == 0 ) { res1 = res1 + la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { MAGMA_S_SET2REAL(res1,0); } A-=half_thread_x; __syncthreads(); tx = threadIdx.x ; ty = threadIdx.y ; if( ty_ == 0 && ty == 0 ) res = res1 ; else if( ty_ == 1 && ty == 0 ) res = res2 ; else { MAGMA_S_SET2REAL(res,0); } A-=ty_* lda ; A-=tx_; A= A - lda*break_d; x= x - break_d *incx ; A+=4 * ty* lda ; if( blkc == ( gridDim.x - 1 ) ) { if(tx <= m_mod_thread_x ) A+=tx; else A+=m_mod_thread_x; } else{ A+=tx; } magma_int_t wc_c = 0 ; magma_int_t count = 0 ; tx_ = td % quarter_thread_x ; ty_ = td / quarter_thread_x ; WC-=tx ; WC+=tx_; #pragma unroll for(magma_int_t j=0; j < 4 ; j++) b[j] = buff[ty_*4+j]; #pragma unroll for(magma_int_t i=0; i< break_d; i += thread_x ){ MAGMA_S_SET2REAL(res_,0); count++; if( ty== 0 ) { buff2[tx] = x[i*incx]; } __syncthreads(); #pragma unroll for( magma_int_t k=0;k<4;k++){ #pragma unroll for(magma_int_t j=0; j < 4 ; j++) tr[j] = A[j*lda] ; #pragma unroll for(magma_int_t j=0; j < 4 ; j++){ res+=tr[j]*buff2[quarter_thread_x*k + ty*4+(j)]; la[( (j)+ty*4)][tx] = tr[j]; } __syncthreads(); MAGMA_S_SET2REAL(res_, 0) ; #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res_+=la[tx_][ty_*4+j]* b[j] ; b[4+k] = res_ ; __syncthreads(); A+=lda* quarter_thread_x ; } #pragma unroll for(magma_int_t k=0; k < 4 ; k++){ la[tx_][ty_+quarter_thread_x*k]= b[4+k] ; } __syncthreads(); if( ty_ < 4 ) { magma_int_t k = ty_*quarter_thread_x; res_ = la[tx_][0+k] + la[tx_][1+k] + la[tx_][2+k] + la[tx_][3+k] + la[tx_][4+k] + la[tx_][5+k] + la[tx_][6+k] + la[tx_][7+k] + la[tx_][8+k] + la[tx_][9+k] + la[tx_][10+k]+ la[tx_][11+k] + la[tx_][12+k]+ la[tx_][13+k] + la[tx_][14+k]+ la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } WC+=tx ; WC-=tx_; la[ty][tx]= res ; __syncthreads(); if( ty == 0 ) { res=la[0][tx]+ la[1][tx]+ la[2][tx]+ la[3][tx] ; WC[0+lda*(blkc)] = res; } } __global__ void magmablas_ssymv_130_L_update(magma_int_t n, float alpha, const float* A, magma_int_t lda, const float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *WC ) { magma_int_t i; magma_int_t tx = threadIdx.x ; magma_int_t ind = blockIdx.x * thread_x + tx ; float Ca; MAGMA_S_SET2REAL(Ca, 0) ; WC+= ind + lda * blockIdx.x; for(i = blockIdx.x*thread_x; i<n; i+=thread_x){ Ca += WC[0] ; WC += thread_x; } if( ind < n ) y[ind * incy] = beta * y[ind * incy] + alpha * Ca ; } extern "C" void magmablas_ssymv_130_L(magma_int_t m, float alpha, const float *A, magma_int_t lda, const float *X, magma_int_t incx, float beta, float *Y, magma_int_t incy, float *dC_work) { magma_int_t blocks; if (m % ssymv_bs==0) blocks = m / ssymv_bs; else blocks = m / ssymv_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(thread_x, thread_y, 1); dim3 threads_u(ssymv_bs, 1, 1); /* * If matrix size is multiple of ssymv_bs, we use a specific code. * otherwise, we call the generic case. */ if(m % ssymv_bs == 0 ) { hipLaunchKernelGGL(( magmablas_ssymv_130_L_special) , dim3(grid), dim3(threads), 0, magma_stream , m, alpha, A, lda, X, incx, beta, Y, incy, dC_work); } else{ magma_int_t m_mod_thread_x = m%ssymv_bs - 1; hipLaunchKernelGGL(( magmablas_ssymv_130_L_generic) , dim3(grid), dim3(threads), 0, magma_stream , m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_thread_x); } hipLaunchKernelGGL(( magmablas_ssymv_130_L_update), dim3(grid), dim3(threads_u), 0, magma_stream , m, alpha, A, lda, X, incx, beta, Y, incy, dC_work); } /************************************************************************* Purpose ======= magmablas_ssymv2 performs the matrix-vector operation on fermi: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n hermitian matrix. the interface of magmablas_ssymv2 is different from magmablas_ssymv in the last argument dC_work As magma implements ssymv through two steps: 1) perform the multiplication in each thread blocks and put the intermediate value in a space of device memory which we call working space. dC_work is the working space 2) sum the intermediate values and store the final result in y. the size of dC_work is lda * (n/thread_x + (n%thread_x !=0) where thread_x = 64 magamblasw_ssymv requires users to explicitly a working space, while magmablas_ssymv is a wrapper routine of magmabalsw_ssymv allocating the working space inside the routine and provides the same interface with cublas. If users need to call ssymv frequently, we suggest to use magmablas_ssymv2 instead of magmablas_ssymv. As the overhead of allocating and free in device memory in magmablas_ssymv would hurt performance. Our tests show that this penalty is about 10Gflop/s when matrix size is around 10000. */ extern "C" magma_int_t magmablas_ssymv2_130( char uplo, magma_int_t n, float alpha, const float *A, magma_int_t lda, const float *X, magma_int_t incx, float beta, float *Y, magma_int_t incy, float *dC_work, magma_int_t workspace) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) ) return MAGMA_SUCCESS; /* TODO: Upper case is not implemented in MAGMA */ if ( upper ) { #if defined(PRECISION_z) || defined(PRECISION_c) fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented"); #else hipblasSsymv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy); #endif } else { magmablas_ssymv_130_L(n, alpha, A, lda, X, incx, beta, Y, incy, dC_work); } return MAGMA_SUCCESS; } /************************************************************************* Purpose ======= magmablas_ssymv performs the matrix-vector operation on fermi: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n hermitian matrix. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of A is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of A is to be referenced. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. ALPHA - REAL*16 . On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - REAL*16 array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular part of the hermitian matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular part of the hermitian matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. It is recommended that lda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. X - REAL*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. Unchanged on exit. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. BETA - REAL*16 . On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. Unchanged on exit. Y - REAL*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. INCY - INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. Unchanged on exit. */ extern "C" magma_int_t magmablas_ssymv_130( char uplo, magma_int_t n, float alpha, const float *A, magma_int_t lda, const float *X, magma_int_t incx, float beta, float *Y, magma_int_t incy) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) ) return MAGMA_SUCCESS; /* TODO: Upper case is not implemented in MAGMA */ if ( upper ) { #if defined(PRECISION_z) || defined(PRECISION_c) fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented"); #else hipblasSsymv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy); #endif } else { float *dC_work; magma_int_t blocks = n / thread_x + (n % thread_x != 0); magma_int_t workspace = lda * (blocks + 1); /* TODO: need to add a MAGMA context to handle workspaces */ hipblasAlloc( workspace, sizeof(float), (void**)&dC_work ) ; hipblasGetError( ) ; magmablas_ssymv2_130( uplo, n, alpha, A, lda, X, incx, beta, Y, incy, dC_work, workspace); hipblasFree(dC_work); hipblasGetError( ) ; } return MAGMA_SUCCESS; } #endif /* (GPUSHMEM < 200) */
bd81cc95d24cde1357a54b1eb0d941995bbde645.cu
/* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @generated s Wed Nov 14 22:53:51 2012 */ #include "common_magma.h" #define PRECISION_s /*The version for fermi can be found in ssymv_fermi.cu */ #if (GPUSHMEM < 200) #define magmablas_ssymv_130 magmablas_ssymv #define ssymv_bs 64 #define thread_x 64 #define thread_y 4 #define bank_shift 33 #define quarter_thread_x 16 #define half_thread_x 32 /******************************************************************************* * Functions for each specific cases - Lower case */ __global__ void magmablas_ssymv_130_L_special( magma_int_t n, float alpha, const float *A, magma_int_t lda, const float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *WC) { magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; magma_int_t blkc = blockIdx.x ; float res = MAGMA_S_ZERO; float res_ = MAGMA_S_ZERO; float res1 = MAGMA_S_ZERO; __shared__ float la [quarter_thread_x][thread_x+1]; /* Why +3? */ __shared__ float buff [thread_x]; __shared__ float buff2[thread_x]; float tr[4]; float b[8]; magma_int_t break_d = thread_x * blkc; const magma_int_t td = (thread_x * ty ) + tx; magma_int_t tx_ = td % half_thread_x; magma_int_t ty_ = td / half_thread_x; WC += break_d + tx; x += (break_d + tx ) * incx; A += break_d * (lda+1); A += ty_* lda + tx_ ; if( ty == 0 ){ buff[tx] = x[0]; } // obtain the vector x store in buff; tx = tx_ ; ty = ty_ ; #pragma unroll for(magma_int_t j =0; j<half_thread_x; j +=8) la[0][ bank_shift * (ty_+j) + tx_] = A[ j * lda]; __syncthreads(); #pragma unroll for(magma_int_t i=ty_*4; i<(ty_ * 4 + 4) ; i++){ if ( i < tx_ ) { la[0][bank_shift * tx_ + i] = la[0][ i * bank_shift + tx_]; } else la[0][bank_shift * tx_ + i] = la[0][ bank_shift * tx_ + i]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res+= la[0][bank_shift * tx_ + j + ty_ * 4] * buff[j + ty_ * 4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_== 0 ) res1 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; else { MAGMA_S_SET2REAL(res1,0); } __syncthreads(); MAGMA_S_SET2REAL(res, 0) ; A+= half_thread_x + half_thread_x *lda ; #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8) la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; __syncthreads(); #pragma unroll for(magma_int_t i=ty_*4; i<(4+ty_*4) ; i++){ if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res+= la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); float res2; MAGMA_S_SET2REAL(res2,0); if( ty_== 1 ) res2 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; else { MAGMA_S_SET2REAL(res2,0); } __syncthreads(); MAGMA_S_SET2REAL(res,0); A-=half_thread_x *lda ; MAGMA_S_SET2REAL(res_,0); #pragma unroll for(magma_int_t j=0; j<half_thread_x; j+=8) tr[j/8] = A[ j * lda]; #pragma unroll for(magma_int_t j=0; j < 4 ; j++){ res += tr[j] * buff[ j*8 + ty_]; la[0][bank_shift*(ty_+j*8)+tx_] = tr[j]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res_+= la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_ == 1 ) res2 = res2 + la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; else { MAGMA_S_SET2REAL(res2,0); } __syncthreads(); la[0][bank_shift*tx_+ty_]= res_ ; __syncthreads(); if( ty_ == 0 ) { res1 = res1 + la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { MAGMA_S_SET2REAL(res1,0); } A-=half_thread_x; __syncthreads(); tx = threadIdx.x ; ty = threadIdx.y ; if( ty_ == 0 && ty == 0 ) res = res1 ; else if( ty_ == 1 && ty == 0 ) res = res2 ; else { MAGMA_S_SET2REAL(res,0); } A-=ty_* lda ; A-=tx_; A= A - lda * blkc * thread_x; x= x - blkc * thread_x *incx ; A+=4 * ty* lda ; A+=tx; magma_int_t wc_c = 0 ; magma_int_t count = 0 ; tx_ = td % quarter_thread_x ; ty_ = td / quarter_thread_x ; WC-=tx ; WC+=tx_; #pragma unroll for(magma_int_t j=0;j<4;j++) { b[j] = buff[ty_*4+j]; } #pragma unroll for(magma_int_t i=0; i<thread_x*blkc; i += thread_x ) { res_ = MAGMA_S_ZERO; count++; if(ty == 0) buff2[tx] = x[i*incx]; __syncthreads(); #pragma unroll for( magma_int_t k=0;k<4;k++) { #pragma unroll for(magma_int_t j=0; j < 4 ; j++) tr[j] = A[j*lda]; #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += tr[j] * buff2[ quarter_thread_x * k + ty * 4 + j]; la[( j + ty * 4)][tx] = tr[j]; } __syncthreads(); MAGMA_S_SET2REAL(res_,0); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res_+=la[tx_][ty_*4+j] * b[j]; } b[4+k] = res_ ; __syncthreads(); A += lda * quarter_thread_x ; } #pragma unroll for(magma_int_t k=0; k < 4 ; k++){ la[tx_][ty_+quarter_thread_x*k]= b[4+k] ; } __syncthreads(); if( ty_ < 4 ) { magma_int_t k = ty_*quarter_thread_x; res_ = la[tx_][0+k] + la[tx_][1+k] + la[tx_][2+k] + la[tx_][3+k] + la[tx_][4+k] + la[tx_][5+k] + la[tx_][6+k] + la[tx_][7+k] + la[tx_][8+k] + la[tx_][9+k] + la[tx_][10+k]+ la[tx_][11+k] + la[tx_][12+k]+ la[tx_][13+k] + la[tx_][14+k]+ la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } WC+=tx ; WC-=tx_; la[ty][tx]= res ; __syncthreads(); if( ty == 0 ) { res = la[0][tx]+ la[1][tx] + la[2][tx]+ la[3][tx]; WC[0+lda*(blkc) ] = res; } } /************************************************************** * Lower case for generic sizes */ __global__ void magmablas_ssymv_130_L_generic(magma_int_t n, float alpha, const float *A, magma_int_t lda, const float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *WC, magma_int_t m_mod_thread_x) { magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; magma_int_t blkc = blockIdx.x ; float res = MAGMA_S_ZERO; float res_ = MAGMA_S_ZERO; float res1 = MAGMA_S_ZERO; __shared__ float la [quarter_thread_x][thread_x+3]; __shared__ float buff [thread_x]; __shared__ float buff2[thread_x]; float tr[4]; float b[8]; magma_int_t break_d = thread_x * blkc; const magma_int_t td = (thread_x * ty ) + tx; magma_int_t tx_ = td % half_thread_x; magma_int_t ty_ = td / half_thread_x; WC+= break_d + tx; x += (break_d + tx ) * incx; A += break_d * (lda+1); A += lda * ty_; magma_int_t trackA ; if( blkc == ( gridDim.x - 1 ) ) { if( ty == 0 ){ if( tx > m_mod_thread_x ) { MAGMA_S_SET2REAL(buff[tx],0); } else buff[tx] = x[0]; } if ( tx_ > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx_; A += trackA ; } else { if( ty == 0 ){ buff[tx] = x[0]; } trackA = tx_; A += trackA ; } // Somehow merging these two if - else creates problem // It could be a potential bug -- from synchronization or from cuda or compiler if( blkc == ( gridDim.x - 1 ) ) { #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8){ if( ( ty_ + j ) > m_mod_thread_x ) { MAGMA_S_SET2REAL(la[0][bank_shift*(ty_+j)+tx_], 9999); } else la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } A-=trackA; } else { #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8){ la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } } tx = tx_ ; ty = ty_ ; __syncthreads(); #pragma unroll for(magma_int_t i=ty_*4; i<(ty_*4+4) ; i++){ if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][i*bank_shift+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res += la[0][bank_shift*tx_+j+ty_*4] * buff[j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_== 0 ) res1 = la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; else { MAGMA_S_SET2REAL(res1,0); } __syncthreads(); MAGMA_S_SET2REAL(res,0); if( blkc == ( gridDim.x - 1 ) ) { if ( (tx_+half_thread_x) > m_mod_thread_x ) trackA = m_mod_thread_x; else trackA = tx_ + half_thread_x; A+= trackA+half_thread_x*lda ; #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8){ if( ( ty_ + j+half_thread_x ) > m_mod_thread_x ) { MAGMA_S_SET2REAL(la[0][bank_shift*(ty_+j)+tx_], 99999); } else la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } A-= trackA+half_thread_x*lda ; A+=tx_ ; A+= half_thread_x + half_thread_x *lda ; } else { A+= half_thread_x + half_thread_x *lda ; #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8){ la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(magma_int_t i=ty_*4; i<(4+ty_*4) ; i++){ if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res+= la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); float res2; MAGMA_S_SET2REAL(res2,0); if( ty_== 1 ) res2 = la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; else { MAGMA_S_SET2REAL(res2,0); } __syncthreads(); MAGMA_S_SET2REAL(res,0); MAGMA_S_SET2REAL(res_,0); A-=half_thread_x *lda ; if( blkc == ( gridDim.x - 1 ) ) { A-=tx_; if ( tx_ > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx_; A+= trackA ; #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8) if( ( ty_ + j ) > m_mod_thread_x ) { MAGMA_S_SET2REAL(tr[j/8], 99999); } else tr[j/8] = A[ j * lda]; A-=trackA; A+=tx_; } else { #pragma unroll for(magma_int_t j =0; j<half_thread_x; j+=8) tr[j/8] = A[ j * lda]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++){ res+= tr[j] * buff[ j*8 + ty_]; la[0][bank_shift*(ty_+j*8)+tx_] = tr[j]; } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res_+= la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_ == 1 ) res2 = res2 + la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; else { MAGMA_S_SET2REAL(res2,0); } __syncthreads(); la[0][bank_shift*tx_+ty_]= res_ ; __syncthreads(); if( ty_ == 0 ) { res1 = res1 + la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { MAGMA_S_SET2REAL(res1,0); } A-=half_thread_x; __syncthreads(); tx = threadIdx.x ; ty = threadIdx.y ; if( ty_ == 0 && ty == 0 ) res = res1 ; else if( ty_ == 1 && ty == 0 ) res = res2 ; else { MAGMA_S_SET2REAL(res,0); } A-=ty_* lda ; A-=tx_; A= A - lda*break_d; x= x - break_d *incx ; A+=4 * ty* lda ; if( blkc == ( gridDim.x - 1 ) ) { if(tx <= m_mod_thread_x ) A+=tx; else A+=m_mod_thread_x; } else{ A+=tx; } magma_int_t wc_c = 0 ; magma_int_t count = 0 ; tx_ = td % quarter_thread_x ; ty_ = td / quarter_thread_x ; WC-=tx ; WC+=tx_; #pragma unroll for(magma_int_t j=0; j < 4 ; j++) b[j] = buff[ty_*4+j]; #pragma unroll for(magma_int_t i=0; i< break_d; i += thread_x ){ MAGMA_S_SET2REAL(res_,0); count++; if( ty== 0 ) { buff2[tx] = x[i*incx]; } __syncthreads(); #pragma unroll for( magma_int_t k=0;k<4;k++){ #pragma unroll for(magma_int_t j=0; j < 4 ; j++) tr[j] = A[j*lda] ; #pragma unroll for(magma_int_t j=0; j < 4 ; j++){ res+=tr[j]*buff2[quarter_thread_x*k + ty*4+(j)]; la[( (j)+ty*4)][tx] = tr[j]; } __syncthreads(); MAGMA_S_SET2REAL(res_, 0) ; #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res_+=la[tx_][ty_*4+j]* b[j] ; b[4+k] = res_ ; __syncthreads(); A+=lda* quarter_thread_x ; } #pragma unroll for(magma_int_t k=0; k < 4 ; k++){ la[tx_][ty_+quarter_thread_x*k]= b[4+k] ; } __syncthreads(); if( ty_ < 4 ) { magma_int_t k = ty_*quarter_thread_x; res_ = la[tx_][0+k] + la[tx_][1+k] + la[tx_][2+k] + la[tx_][3+k] + la[tx_][4+k] + la[tx_][5+k] + la[tx_][6+k] + la[tx_][7+k] + la[tx_][8+k] + la[tx_][9+k] + la[tx_][10+k]+ la[tx_][11+k] + la[tx_][12+k]+ la[tx_][13+k] + la[tx_][14+k]+ la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } WC+=tx ; WC-=tx_; la[ty][tx]= res ; __syncthreads(); if( ty == 0 ) { res=la[0][tx]+ la[1][tx]+ la[2][tx]+ la[3][tx] ; WC[0+lda*(blkc)] = res; } } __global__ void magmablas_ssymv_130_L_update(magma_int_t n, float alpha, const float* A, magma_int_t lda, const float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *WC ) { magma_int_t i; magma_int_t tx = threadIdx.x ; magma_int_t ind = blockIdx.x * thread_x + tx ; float Ca; MAGMA_S_SET2REAL(Ca, 0) ; WC+= ind + lda * blockIdx.x; for(i = blockIdx.x*thread_x; i<n; i+=thread_x){ Ca += WC[0] ; WC += thread_x; } if( ind < n ) y[ind * incy] = beta * y[ind * incy] + alpha * Ca ; } extern "C" void magmablas_ssymv_130_L(magma_int_t m, float alpha, const float *A, magma_int_t lda, const float *X, magma_int_t incx, float beta, float *Y, magma_int_t incy, float *dC_work) { magma_int_t blocks; if (m % ssymv_bs==0) blocks = m / ssymv_bs; else blocks = m / ssymv_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(thread_x, thread_y, 1); dim3 threads_u(ssymv_bs, 1, 1); /* * If matrix size is multiple of ssymv_bs, we use a specific code. * otherwise, we call the generic case. */ if(m % ssymv_bs == 0 ) { magmablas_ssymv_130_L_special <<< grid, threads, 0, magma_stream >>>( m, alpha, A, lda, X, incx, beta, Y, incy, dC_work); } else{ magma_int_t m_mod_thread_x = m%ssymv_bs - 1; magmablas_ssymv_130_L_generic <<< grid, threads, 0, magma_stream >>> ( m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_thread_x); } magmablas_ssymv_130_L_update<<< grid, threads_u, 0, magma_stream >>>( m, alpha, A, lda, X, incx, beta, Y, incy, dC_work); } /************************************************************************* Purpose ======= magmablas_ssymv2 performs the matrix-vector operation on fermi: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n hermitian matrix. the interface of magmablas_ssymv2 is different from magmablas_ssymv in the last argument dC_work As magma implements ssymv through two steps: 1) perform the multiplication in each thread blocks and put the intermediate value in a space of device memory which we call working space. dC_work is the working space 2) sum the intermediate values and store the final result in y. the size of dC_work is lda * (n/thread_x + (n%thread_x !=0) where thread_x = 64 magamblasw_ssymv requires users to explicitly a working space, while magmablas_ssymv is a wrapper routine of magmabalsw_ssymv allocating the working space inside the routine and provides the same interface with cublas. If users need to call ssymv frequently, we suggest to use magmablas_ssymv2 instead of magmablas_ssymv. As the overhead of allocating and free in device memory in magmablas_ssymv would hurt performance. Our tests show that this penalty is about 10Gflop/s when matrix size is around 10000. */ extern "C" magma_int_t magmablas_ssymv2_130( char uplo, magma_int_t n, float alpha, const float *A, magma_int_t lda, const float *X, magma_int_t incx, float beta, float *Y, magma_int_t incy, float *dC_work, magma_int_t workspace) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) ) return MAGMA_SUCCESS; /* TODO: Upper case is not implemented in MAGMA */ if ( upper ) { #if defined(PRECISION_z) || defined(PRECISION_c) fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented"); #else cublasSsymv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy); #endif } else { magmablas_ssymv_130_L(n, alpha, A, lda, X, incx, beta, Y, incy, dC_work); } return MAGMA_SUCCESS; } /************************************************************************* Purpose ======= magmablas_ssymv performs the matrix-vector operation on fermi: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n hermitian matrix. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of A is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of A is to be referenced. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. ALPHA - REAL*16 . On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - REAL*16 array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular part of the hermitian matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular part of the hermitian matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. It is recommended that lda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. X - REAL*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. Unchanged on exit. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. BETA - REAL*16 . On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. Unchanged on exit. Y - REAL*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. INCY - INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. Unchanged on exit. */ extern "C" magma_int_t magmablas_ssymv_130( char uplo, magma_int_t n, float alpha, const float *A, magma_int_t lda, const float *X, magma_int_t incx, float beta, float *Y, magma_int_t incy) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) ) return MAGMA_SUCCESS; /* TODO: Upper case is not implemented in MAGMA */ if ( upper ) { #if defined(PRECISION_z) || defined(PRECISION_c) fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented"); #else cublasSsymv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy); #endif } else { float *dC_work; magma_int_t blocks = n / thread_x + (n % thread_x != 0); magma_int_t workspace = lda * (blocks + 1); /* TODO: need to add a MAGMA context to handle workspaces */ cublasAlloc( workspace, sizeof(float), (void**)&dC_work ) ; cublasGetError( ) ; magmablas_ssymv2_130( uplo, n, alpha, A, lda, X, incx, beta, Y, incy, dC_work, workspace); cublasFree(dC_work); cublasGetError( ) ; } return MAGMA_SUCCESS; } #endif /* (GPUSHMEM < 200) */
5a3c76f9a5e3e6a609eeee0fc95efb82bc18a297.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/gather.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/lists/contains.hpp> #include <cudf/lists/lists_column_view.hpp> #include <cudf/replace.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/scalar/scalar_device_view.cuh> #include <cudf/scalar/scalar_factories.hpp> #include <cudf/structs/structs_column_view.hpp> #include <cudf/table/table_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/mr/device/device_memory_resource.hpp> namespace cudf { namespace { /** * @brief Device function that searches for the specified lookup_key * in the list at index `row_index`, and writes out the index of the * first match to the output. * * This function is called once per row of the `input` column * If the lookup_key is not found, (-1) is returned for that list row. */ template <bool has_nulls> void __device__ search_each_list(size_type row_index, column_device_view input, mutable_column_device_view output, string_scalar_device_view lookup_key) { if (has_nulls && input.is_null(row_index)) { // List row is null. output.element<size_type>(row_index) = -1; // Not found. return; } auto offsets{input.child(0)}; auto start_index{offsets.element<size_type>(row_index)}; auto end_index{offsets.element<size_type>(row_index + 1)}; auto key_column{input.child(1).child(0)}; for (size_type list_element_index{start_index}; list_element_index < end_index; ++list_element_index) { if (has_nulls && key_column.is_null(list_element_index)) { continue; // Skip the list-element with null-key. } // List element's key is not null. Check if it matches the lookup_key. if (key_column.element<string_view>(list_element_index) == lookup_key.value()) { output.element<size_type>(row_index) = list_element_index; return; } } output.element<size_type>(row_index) = -1; // Not found. } /** * @brief The map-lookup CUDA kernel, which searches for the specified `lookup_key` * string in each list<string> row of the `input` column. * * The kernel writes the index (into the `input` list-column's child) where the `lookup_key` * is found, to the `output` column. If the `lookup_key` is not found, (-1) is written instead. * * The produces one output row per input, with no nulls. The output may then be used * with `cudf::gather()`, to find the values corresponding to the `lookup_key`. */ template <int block_size, bool has_nulls> __launch_bounds__(block_size) __global__ void gpu_find_first(column_device_view input, mutable_column_device_view output, string_scalar_device_view lookup_key) { size_type tid = blockIdx.x * block_size + threadIdx.x; size_type stride = block_size * gridDim.x; // Each CUDA thread processes one row of `input`. Each row is a list<string>. // So each thread searches for `lookup_key` in one row of the input column, // and writes its index out to output. while (tid < input.size()) { search_each_list<has_nulls>(tid, input, output, lookup_key); tid += stride; } } /** * @brief Function to generate a gather-map, based on the location of the `lookup_key` * string in each row of the input. * * The gather map may then be used to gather the values corresponding to the `lookup_key` * for each row. */ template <bool has_nulls> std::unique_ptr<column> get_gather_map_for_map_values(column_view const &input, string_scalar &lookup_key, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { constexpr size_type block_size{256}; cudf::detail::grid_1d grid{input.size(), block_size}; auto input_device_view = cudf::column_device_view::create(input, stream); auto lookup_key_device_view{get_scalar_device_view(lookup_key)}; auto gather_map = make_numeric_column(data_type{cudf::type_to_id<size_type>()}, input.size(), mask_state::ALL_VALID, stream, mr); auto output_view = mutable_column_device_view::create(gather_map->mutable_view(), stream); hipLaunchKernelGGL(( gpu_find_first<block_size, has_nulls>), dim3(grid.num_blocks), dim3(block_size), 0, stream.value(), *input_device_view, *output_view, lookup_key_device_view); CHECK_CUDA(stream.value()); return gather_map; } /** * @brief a defensive check for the map column that is going to be processed */ void map_input_check(column_view const &map_column, rmm::cuda_stream_view stream) { CUDF_EXPECTS(map_column.type().id() == type_id::LIST, "Expected LIST<STRUCT<key,value>>."); lists_column_view lcv{map_column}; column_view structs_column = lcv.get_sliced_child(stream); CUDF_EXPECTS(structs_column.type().id() == type_id::STRUCT, "Expected LIST<STRUCT<key,value>>."); CUDF_EXPECTS(structs_column.num_children() == 2, "Expected LIST<STRUCT<key,value>>."); CUDF_EXPECTS(structs_column.child(0).type().id() == type_id::STRING, "Expected LIST<STRUCT<key,value>>."); CUDF_EXPECTS(structs_column.child(1).type().id() == type_id::STRING, "Expected LIST<STRUCT<key,value>>."); } } // namespace namespace jni { std::unique_ptr<column> map_contains(column_view const &map_column, string_scalar lookup_key, bool has_nulls, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { // Defensive checks. map_input_check(map_column, stream); lists_column_view lcv(map_column); structs_column_view scv(lcv.child()); std::vector<column_view> children; children.push_back(lcv.offsets()); children.push_back(scv.child(0)); column_view list_of_keys(map_column.type(), map_column.size(), nullptr, map_column.null_mask(), map_column.null_count(), 0, children); auto contains_column = lists::contains(list_of_keys, lookup_key); // null will be skipped in all-aggregation when checking if all rows contain the key, // so replace all nulls with 0. std::unique_ptr<cudf::scalar> replacement = cudf::make_numeric_scalar(cudf::data_type(cudf::type_id::BOOL8)); replacement->set_valid_async(true); using ScalarType = cudf::scalar_type_t<int8_t>; static_cast<ScalarType *>(replacement.get())->set_value(0); auto result = cudf::replace_nulls(contains_column->view(), *replacement); return result; } std::unique_ptr<column> map_lookup(column_view const &map_column, string_scalar lookup_key, bool has_nulls, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { // Defensive checks. map_input_check(map_column, stream); if (map_column.size() == 0) { return make_empty_column(cudf::data_type{cudf::type_id::STRING}); } lists_column_view lcv{map_column}; column_view structs_column = lcv.get_sliced_child(stream); // Two-pass plan: construct gather map, and then gather() on structs_column.child(1). Plan A. // (Can do in one pass perhaps, but that's Plan B.) auto gather_map = has_nulls ? get_gather_map_for_map_values<true>(map_column, lookup_key, stream, mr) : get_gather_map_for_map_values<false>(map_column, lookup_key, stream, mr); // Gather map is now available. auto values_column = structs_column.child(1); auto table_for_gather = table_view{std::vector<cudf::column_view>{values_column}}; auto gathered_table = cudf::detail::gather(table_for_gather, gather_map->view(), out_of_bounds_policy::NULLIFY, detail::negative_index_policy::NOT_ALLOWED, stream, mr); return std::make_unique<cudf::column>(std::move(gathered_table->get_column(0))); } } // namespace jni } // namespace cudf
5a3c76f9a5e3e6a609eeee0fc95efb82bc18a297.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/gather.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/lists/contains.hpp> #include <cudf/lists/lists_column_view.hpp> #include <cudf/replace.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/scalar/scalar_device_view.cuh> #include <cudf/scalar/scalar_factories.hpp> #include <cudf/structs/structs_column_view.hpp> #include <cudf/table/table_view.hpp> #include <cudf/types.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/mr/device/device_memory_resource.hpp> namespace cudf { namespace { /** * @brief Device function that searches for the specified lookup_key * in the list at index `row_index`, and writes out the index of the * first match to the output. * * This function is called once per row of the `input` column * If the lookup_key is not found, (-1) is returned for that list row. */ template <bool has_nulls> void __device__ search_each_list(size_type row_index, column_device_view input, mutable_column_device_view output, string_scalar_device_view lookup_key) { if (has_nulls && input.is_null(row_index)) { // List row is null. output.element<size_type>(row_index) = -1; // Not found. return; } auto offsets{input.child(0)}; auto start_index{offsets.element<size_type>(row_index)}; auto end_index{offsets.element<size_type>(row_index + 1)}; auto key_column{input.child(1).child(0)}; for (size_type list_element_index{start_index}; list_element_index < end_index; ++list_element_index) { if (has_nulls && key_column.is_null(list_element_index)) { continue; // Skip the list-element with null-key. } // List element's key is not null. Check if it matches the lookup_key. if (key_column.element<string_view>(list_element_index) == lookup_key.value()) { output.element<size_type>(row_index) = list_element_index; return; } } output.element<size_type>(row_index) = -1; // Not found. } /** * @brief The map-lookup CUDA kernel, which searches for the specified `lookup_key` * string in each list<string> row of the `input` column. * * The kernel writes the index (into the `input` list-column's child) where the `lookup_key` * is found, to the `output` column. If the `lookup_key` is not found, (-1) is written instead. * * The produces one output row per input, with no nulls. The output may then be used * with `cudf::gather()`, to find the values corresponding to the `lookup_key`. */ template <int block_size, bool has_nulls> __launch_bounds__(block_size) __global__ void gpu_find_first(column_device_view input, mutable_column_device_view output, string_scalar_device_view lookup_key) { size_type tid = blockIdx.x * block_size + threadIdx.x; size_type stride = block_size * gridDim.x; // Each CUDA thread processes one row of `input`. Each row is a list<string>. // So each thread searches for `lookup_key` in one row of the input column, // and writes its index out to output. while (tid < input.size()) { search_each_list<has_nulls>(tid, input, output, lookup_key); tid += stride; } } /** * @brief Function to generate a gather-map, based on the location of the `lookup_key` * string in each row of the input. * * The gather map may then be used to gather the values corresponding to the `lookup_key` * for each row. */ template <bool has_nulls> std::unique_ptr<column> get_gather_map_for_map_values(column_view const &input, string_scalar &lookup_key, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { constexpr size_type block_size{256}; cudf::detail::grid_1d grid{input.size(), block_size}; auto input_device_view = cudf::column_device_view::create(input, stream); auto lookup_key_device_view{get_scalar_device_view(lookup_key)}; auto gather_map = make_numeric_column(data_type{cudf::type_to_id<size_type>()}, input.size(), mask_state::ALL_VALID, stream, mr); auto output_view = mutable_column_device_view::create(gather_map->mutable_view(), stream); gpu_find_first<block_size, has_nulls><<<grid.num_blocks, block_size, 0, stream.value()>>>( *input_device_view, *output_view, lookup_key_device_view); CHECK_CUDA(stream.value()); return gather_map; } /** * @brief a defensive check for the map column that is going to be processed */ void map_input_check(column_view const &map_column, rmm::cuda_stream_view stream) { CUDF_EXPECTS(map_column.type().id() == type_id::LIST, "Expected LIST<STRUCT<key,value>>."); lists_column_view lcv{map_column}; column_view structs_column = lcv.get_sliced_child(stream); CUDF_EXPECTS(structs_column.type().id() == type_id::STRUCT, "Expected LIST<STRUCT<key,value>>."); CUDF_EXPECTS(structs_column.num_children() == 2, "Expected LIST<STRUCT<key,value>>."); CUDF_EXPECTS(structs_column.child(0).type().id() == type_id::STRING, "Expected LIST<STRUCT<key,value>>."); CUDF_EXPECTS(structs_column.child(1).type().id() == type_id::STRING, "Expected LIST<STRUCT<key,value>>."); } } // namespace namespace jni { std::unique_ptr<column> map_contains(column_view const &map_column, string_scalar lookup_key, bool has_nulls, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { // Defensive checks. map_input_check(map_column, stream); lists_column_view lcv(map_column); structs_column_view scv(lcv.child()); std::vector<column_view> children; children.push_back(lcv.offsets()); children.push_back(scv.child(0)); column_view list_of_keys(map_column.type(), map_column.size(), nullptr, map_column.null_mask(), map_column.null_count(), 0, children); auto contains_column = lists::contains(list_of_keys, lookup_key); // null will be skipped in all-aggregation when checking if all rows contain the key, // so replace all nulls with 0. std::unique_ptr<cudf::scalar> replacement = cudf::make_numeric_scalar(cudf::data_type(cudf::type_id::BOOL8)); replacement->set_valid_async(true); using ScalarType = cudf::scalar_type_t<int8_t>; static_cast<ScalarType *>(replacement.get())->set_value(0); auto result = cudf::replace_nulls(contains_column->view(), *replacement); return result; } std::unique_ptr<column> map_lookup(column_view const &map_column, string_scalar lookup_key, bool has_nulls, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { // Defensive checks. map_input_check(map_column, stream); if (map_column.size() == 0) { return make_empty_column(cudf::data_type{cudf::type_id::STRING}); } lists_column_view lcv{map_column}; column_view structs_column = lcv.get_sliced_child(stream); // Two-pass plan: construct gather map, and then gather() on structs_column.child(1). Plan A. // (Can do in one pass perhaps, but that's Plan B.) auto gather_map = has_nulls ? get_gather_map_for_map_values<true>(map_column, lookup_key, stream, mr) : get_gather_map_for_map_values<false>(map_column, lookup_key, stream, mr); // Gather map is now available. auto values_column = structs_column.child(1); auto table_for_gather = table_view{std::vector<cudf::column_view>{values_column}}; auto gathered_table = cudf::detail::gather(table_for_gather, gather_map->view(), out_of_bounds_policy::NULLIFY, detail::negative_index_policy::NOT_ALLOWED, stream, mr); return std::make_unique<cudf::column>(std::move(gathered_table->get_column(0))); } } // namespace jni } // namespace cudf
all_kernels_faceted_ellipsoid.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2009-2019 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ComputeFreeVolumeGPU.cuh" #include "IntegratorHPMCMonoGPU.cuh" #include "IntegratorHPMCMonoImplicitGPU.cuh" #include "IntegratorHPMCMonoImplicitNewGPU.cuh" #include "ShapeFacetedEllipsoid.h" namespace hpmc { namespace detail { //! HPMC kernels for ShapeFacetedEllipsoid template hipError_t gpu_hpmc_free_volume<ShapeFacetedEllipsoid>(const hpmc_free_volume_args_t &args, const typename ShapeFacetedEllipsoid::param_type *d_params); template hipError_t gpu_hpmc_update<ShapeFacetedEllipsoid>(const hpmc_args_t& args, const typename ShapeFacetedEllipsoid::param_type *d_params); template hipError_t gpu_hpmc_implicit_count_overlaps<ShapeFacetedEllipsoid>(const hpmc_implicit_args_t& args, const typename ShapeFacetedEllipsoid::param_type *d_params); template hipError_t gpu_hpmc_implicit_accept_reject<ShapeFacetedEllipsoid>(const hpmc_implicit_args_t& args, const typename ShapeFacetedEllipsoid::param_type *d_params); template hipError_t gpu_hpmc_insert_depletants_queue<ShapeFacetedEllipsoid>(const hpmc_implicit_args_new_t& args, const typename ShapeFacetedEllipsoid::param_type *d_params); template hipError_t gpu_hpmc_implicit_accept_reject_new<ShapeFacetedEllipsoid>(const hpmc_implicit_args_new_t& args, const typename ShapeFacetedEllipsoid::param_type *d_params); }; // end namespace detail } // end namespace hpmc
all_kernels_faceted_ellipsoid.cu
// Copyright (c) 2009-2019 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ComputeFreeVolumeGPU.cuh" #include "IntegratorHPMCMonoGPU.cuh" #include "IntegratorHPMCMonoImplicitGPU.cuh" #include "IntegratorHPMCMonoImplicitNewGPU.cuh" #include "ShapeFacetedEllipsoid.h" namespace hpmc { namespace detail { //! HPMC kernels for ShapeFacetedEllipsoid template cudaError_t gpu_hpmc_free_volume<ShapeFacetedEllipsoid>(const hpmc_free_volume_args_t &args, const typename ShapeFacetedEllipsoid::param_type *d_params); template cudaError_t gpu_hpmc_update<ShapeFacetedEllipsoid>(const hpmc_args_t& args, const typename ShapeFacetedEllipsoid::param_type *d_params); template cudaError_t gpu_hpmc_implicit_count_overlaps<ShapeFacetedEllipsoid>(const hpmc_implicit_args_t& args, const typename ShapeFacetedEllipsoid::param_type *d_params); template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeFacetedEllipsoid>(const hpmc_implicit_args_t& args, const typename ShapeFacetedEllipsoid::param_type *d_params); template cudaError_t gpu_hpmc_insert_depletants_queue<ShapeFacetedEllipsoid>(const hpmc_implicit_args_new_t& args, const typename ShapeFacetedEllipsoid::param_type *d_params); template cudaError_t gpu_hpmc_implicit_accept_reject_new<ShapeFacetedEllipsoid>(const hpmc_implicit_args_new_t& args, const typename ShapeFacetedEllipsoid::param_type *d_params); }; // end namespace detail } // end namespace hpmc
5902e0a1d649dffedca499eb1f2c258b01ff8742.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <math.h> #include <hip/hip_runtime.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/reduce.h> #include <thrust/for_each.h> #include <thrust/scatter.h> #include <thrust/functional.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/transform_iterator.h> #include "common.h" #define NUM_THREADS 256 struct GridMetadata { int num_particles; // Approximately, but not exactly, equal to square_size*side_count. double side_size; double square_size; int side_count; int count; GridMetadata(int num_particles_v, double side_size_v, double square_size_v, int side_count_v): num_particles(num_particles_v), side_size(side_size_v), square_size(square_size_v), side_count(side_count_v), count(side_count_v*side_count_v) { } __host__ __device__ int particle_to_flat_square_idx(const particle_t& p) const { int x_idx = (int) (p.x / square_size); int y_idx = (int) (p.y / square_size); return square_to_flat_square_idx(x_idx, y_idx); } __host__ __device__ int square_to_flat_square_idx(int square_x, int square_y) const { const int idx = square_x + side_count*square_y; return idx; } }; // Functor to map a particle to its grid location. struct GridSquareCmIndex : public thrust::unary_function<particle_t&, int> { const GridMetadata g; GridSquareCmIndex(const GridMetadata g_v) : g(g_v) { } __host__ __device__ int operator()(particle_t& p) const { return g.particle_to_flat_square_idx(p); } }; // Functor to move a particle. struct MoveParticle : public thrust::unary_function<particle_t&, void> { const GridMetadata g; MoveParticle(const GridMetadata g_v) : g(g_v) { } __host__ __device__ void operator()(particle_t& p) { // // slightly simplified Velocity Verlet integration // conserves energy better than explicit Euler method // p.vx += p.ax * dt; p.vy += p.ay * dt; p.x += p.vx * dt; p.y += p.vy * dt; // // bounce from walls // while(p.x < 0 || p.x > g.side_size) { p.x = p.x < 0 ? -(p.x) : 2*g.side_size-p.x; p.vx = -(p.vx); } while(p.y < 0 || p.y > g.side_size) { p.y = p.y < 0 ? -(p.y) : 2*g.side_size-p.y; p.vy = -(p.vy); } } }; struct IgnoreZeroPredicate : public thrust::unary_function<int, bool> { __host__ __device__ bool operator()(int i) { return i != 0; } }; __device__ void apply_force_gpu(particle_t& particle, particle_t& neighbor) { double dx = neighbor.x - particle.x; double dy = neighbor.y - particle.y; double r2 = dx * dx + dy * dy; if( r2 > cutoff*cutoff ) return; //r2 = fmax( r2, min_r*min_r ); r2 = (r2 > min_r*min_r) ? r2 : min_r*min_r; double r = sqrt( r2 ); // // very simple short-range repulsive force // double coef = ( 1 - cutoff / r ) / r2 / mass; particle.ax += coef * dx; particle.ay += coef * dy; } __device__ int num_particles_in_square(int square_idx, const int* grid_offsets, const GridMetadata grid) { int square_start = grid_offsets[square_idx]; // For all but the last grid square, the number of particles in the square // equals the next square's offset minus its own offset. The last square // just has all the remaining particles. int square_end = (square_idx < (grid.count - 1)) ? grid_offsets[square_idx+1] : grid.num_particles; return square_end - square_start; } __global__ void compute_forces_gpu (particle_t* particles, const int* grid_offsets, const GridMetadata grid) { // Get grid square ID. Each call to this function computes forces // for all of the particles in one grid square. const int square_idx = threadIdx.x + blockIdx.x * blockDim.x; if (square_idx >= grid.count) return; const int square_x = square_idx % grid.side_count; const int square_y = square_idx / grid.side_count; const int first_particle_idx = grid_offsets[square_idx]; const int num_ps = num_particles_in_square(square_idx, grid_offsets, grid); for (int particle_idx = first_particle_idx; particle_idx < first_particle_idx + num_ps; particle_idx++) { particles[particle_idx].ax = particles[particle_idx].ay = 0; // Iterate only over neighboring grid squares. Note that we could reorder // this to access each neighbor only once, which would probably be better. for (int y_offset = -1; y_offset <= 1; y_offset++) { const int neighbor_y = square_y + y_offset; if (neighbor_y < 0 || neighbor_y >= grid.side_count) { continue; } for (int x_offset = -1; x_offset <= 1; x_offset++) { const int neighbor_x = square_x + x_offset; if (neighbor_x < 0 || neighbor_x >= grid.side_count) { continue; } const int neighbor_square_idx = grid.square_to_flat_square_idx(neighbor_x, neighbor_y); // Now we iterate over all the particles in the neighbor and apply // forces to our particle. const int first_neighbor_particle_idx = grid_offsets[neighbor_square_idx]; const int num_neighbor_ps = num_particles_in_square(neighbor_square_idx, grid_offsets, grid); for (int neighbor_particle_idx = first_neighbor_particle_idx; neighbor_particle_idx < first_neighbor_particle_idx + num_neighbor_ps; neighbor_particle_idx++) { apply_force_gpu(particles[particle_idx], particles[neighbor_particle_idx]); } } } } } /* * @param particle_square_idx_storage is a preallocated device array of size * at least grid.num_particles. Its value can be arbitrary. It will be * clobbered. * @param grid_offsets will be populated with offsets corresponding to grid * squares. [grid_offsets[i], grid_offsets[i+1]) (note the inclusiveness!) * is the set of indices in particles for grid square i, after this function * returns. * @param grid_idx_storage is a preallocated device array of size at least * grid.count. Its value can be arbitrary. It will be clobbered. * @param grid_count_storage is a preallocated device array of size at least * grid.count. Its value can be arbitrary. It will be clobbered. */ void sort_to_bins( thrust::device_vector<particle_t>& particles, thrust::device_vector<int>& particle_square_idx_storage, thrust::device_vector<int>& grid_offsets, thrust::device_vector<int>& grid_idx_storage, thrust::device_vector<int>& grid_count_storage, const GridMetadata& grid) { thrust::fill(particle_square_idx_storage.begin(), particle_square_idx_storage.end(), 0); thrust::transform( particles.begin(), particles.end(), particle_square_idx_storage.begin(), GridSquareCmIndex(grid)); // Sort the particles by column-major order in the grid. Thrust offers no // sort function that leaves the keys in place, so we must actually allocate // memory for the square indices. Another option is to use a comparison // functor; that would avoid memory allocation but end up invoking a less // efficient sorting algorithm that works on user-defined functors (rather // than the fast int-sorter). It could be worth trying that, too. // // I think we could do much better than sort_by_key here. This makes the // algorithm O(n log n) when we can do O(n). Here is a sketch of a better // algorithm that seems to equal the asymptotic efficiency of a graph-based // or message-passing method, while having very low constant factors when // implemented on the GPU: // * Associate with each particle its grid square coordinates (x,y). // * Use a LSH algorithm to hash the grid square coordinates. LSH means // that the absolute difference between the hashes of two points is (with // high probability) close to the L2 distance of the two points. // * Sort the particles according to the hash. The sorting algorithm we use // is important: // - Notice that, since particles may move // only a bounded L2 distance in an iteration, there is a bound on the // distance any particle needs to travel in this sorting step. That is, // the particles are already "almost sorted." // - Therefore we want a sorting algorithm that can be implemented quickly // on the GPU and that takes only O(k N) time, where k is the maximum // (or average) distance moved by any element. // - There are fast algorithms for nearly-sorted lists, but I haven't // found on that is parallelizable yet. That seems very possible, // though. So I'm leaving this for future work, due to time // constraints. // Unrelated note: Thrust's sort_by_key seems to have a bug (or exercises a // memory leak in my code?) that kicks in for particle counts between 18,000 // and 26,000 and causes this line to crash with a bad_alloc error. For // larger or smaller particle counts, everything seems fine, which is // terrifying. thrust::sort_by_key( particle_square_idx_storage.begin(), particle_square_idx_storage.end(), particles.begin()); // Compute the starting offset of each grid square (that is, the index of the // first particle in particles contained in each square). // I cannot find a totally natural way to do this using Thrust, so we have to // jump through some hoops. We reduce_by_key to compute the number of // particles in each nonempty grid square and the corresponding square // indices. Then we scatter into a vector that is initially filled with 0s, // mapping each count according to its index. Then we scan with plus, so // that the value at the ith location is the number of particles in grid // squares preceding the ith grid square. thrust::fill(grid_idx_storage.begin(), grid_count_storage.end(), 0); thrust::fill(grid_count_storage.begin(), grid_count_storage.end(), 0); thrust::reduce_by_key( particle_square_idx_storage.begin(), particle_square_idx_storage.end(), thrust::make_constant_iterator(1), grid_idx_storage.begin(), grid_count_storage.begin()); // Here we set grid_offsets[grid_idx_storage[i]] = grid_count_storage[i] // for all i. Since some grid squares might have been empty, there might // be several trailing elements of grid_idx_storage that are zero. So we // use scatter_if() to ignore those. thrust::fill(grid_offsets.begin(), grid_offsets.end(), 0); thrust::scatter_if( grid_count_storage.begin(), grid_count_storage.end(), grid_idx_storage.begin(), grid_count_storage.begin(), grid_offsets.begin(), IgnoreZeroPredicate()); thrust::exclusive_scan( grid_offsets.begin(), grid_offsets.end(), grid_offsets.begin()); } void simulate_forces(thrust::device_vector<particle_t>& particles, const thrust::device_vector<int>& grid_offsets, const GridMetadata& grid) { // The communication pattern is not simple, so we have to resort to writing // device code ourselves here. particle_t* d_particles = thrust::raw_pointer_cast(particles.data()); const int* d_grid_offsets = thrust::raw_pointer_cast(grid_offsets.data()); int num_blocks = div_round_up(grid.count, NUM_THREADS); hipLaunchKernelGGL(( compute_forces_gpu), dim3(num_blocks), dim3(NUM_THREADS), 0, 0, d_particles, d_grid_offsets, grid); } void simulate_movement(thrust::device_vector<particle_t>& particles, const GridMetadata& grid) { thrust::for_each(particles.begin(), particles.end(), MoveParticle(grid)); } int main( int argc, char **argv ) { // This takes a few seconds to initialize the runtime hipDeviceSynchronize(); if (find_option( argc, argv, "-h" ) >= 0) { printf( "Options:\n" ); printf( "-h to see this help\n" ); printf( "-n <int> to set the number of particles\n" ); printf( "-o <filename> to specify the output file name\n" ); printf( "-no turns off all correctness checks and particle output\n"); return 0; } const int n = read_int( argc, argv, "-n", 1000 ); const bool fast = (find_option( argc, argv, "-no" ) != -1); const char *savename = read_string( argc, argv, "-o", NULL ); const char *sumname = read_string( argc, argv, "-s", NULL ); FILE *fsave = ((!fast) && savename) ? fopen( savename, "w" ) : NULL; const double size = set_size( n ); // Particles are stored in a flattened array of squares. Each square // is large enough that particles can only move across 1 square per // simulated time step, but small enough that the expected number of // particles (and hopefully the maximum number) per square is a small // constant. // Following NVIDIA's example particle simulator, we store the particles // in a flattened array and sort them when we need to change the grid // structure. That is, we use one vector // of size n to store the particles in sorted order (column major by grid // square, with arbitrary order within each square), and a second vector // to store the starting offsets for the // particles contained in each square. Then rebuilding the grid involves // an in-place sort and recomputing the index. const double square_size = cutoff + 0.0001; const int side_count = div_round_up_f(size, square_size); const GridMetadata grid(n, size, square_size, side_count); double init_on_host_time = read_timer(); //TODO: This part of the initialization is serial. It is embarrassingly // parallel and could easily be done on the GPU. particle_t* particles = init_particles(n); thrust::host_vector<particle_t> ps(particles, particles+n); init_on_host_time = read_timer() - init_on_host_time; double init_on_device_time = read_timer(); // Copy the particles to the GPU. thrust::device_vector<particle_t> d_ps = ps; // Allocate the structure that maps grid locations to offsets in // d_ps. Like d_ps, this will be populated inside the simulation loop, // and for now is uninitialized. thrust::device_vector<int> d_grid_offsets(grid.count); // Allocate scratch space that the algorithm will need. thrust::device_vector<int> d_particle_square_idx_storage(grid.num_particles); thrust::device_vector<int> d_grid_idx_storage(grid.count); thrust::device_vector<int> d_grid_count_storage(grid.count); init_on_device_time = read_timer() - init_on_device_time; // // simulate a number of time steps // double simulation_time = read_timer(); for (int step = 0; step < NSTEPS; step++) { // First, we must build the grid. sort_to_bins(d_ps, d_particle_square_idx_storage, d_grid_offsets, d_grid_idx_storage, d_grid_count_storage, grid); // Now we can simulate forces and movement. simulate_forces(d_ps, d_grid_offsets, grid); simulate_movement(d_ps, grid); if( fsave && (step%SAVEFREQ) == 0 ) { // Copy the particles back to the CPU. ps = d_ps; save( fsave, n, ps.data()); } } hipDeviceSynchronize(); simulation_time = read_timer( ) - simulation_time; printf( "CPU-GPU copy time = %g seconds\n", init_on_device_time); printf( "n = %d, simulation time = %g seconds\n", n, simulation_time ); free(particles); if( fsave ) fclose( fsave ); return 0; }
5902e0a1d649dffedca499eb1f2c258b01ff8742.cu
#include <stdlib.h> #include <stdio.h> #include <assert.h> #include <math.h> #include <cuda.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/reduce.h> #include <thrust/for_each.h> #include <thrust/scatter.h> #include <thrust/functional.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/transform_iterator.h> #include "common.h" #define NUM_THREADS 256 struct GridMetadata { int num_particles; // Approximately, but not exactly, equal to square_size*side_count. double side_size; double square_size; int side_count; int count; GridMetadata(int num_particles_v, double side_size_v, double square_size_v, int side_count_v): num_particles(num_particles_v), side_size(side_size_v), square_size(square_size_v), side_count(side_count_v), count(side_count_v*side_count_v) { } __host__ __device__ int particle_to_flat_square_idx(const particle_t& p) const { int x_idx = (int) (p.x / square_size); int y_idx = (int) (p.y / square_size); return square_to_flat_square_idx(x_idx, y_idx); } __host__ __device__ int square_to_flat_square_idx(int square_x, int square_y) const { const int idx = square_x + side_count*square_y; return idx; } }; // Functor to map a particle to its grid location. struct GridSquareCmIndex : public thrust::unary_function<particle_t&, int> { const GridMetadata g; GridSquareCmIndex(const GridMetadata g_v) : g(g_v) { } __host__ __device__ int operator()(particle_t& p) const { return g.particle_to_flat_square_idx(p); } }; // Functor to move a particle. struct MoveParticle : public thrust::unary_function<particle_t&, void> { const GridMetadata g; MoveParticle(const GridMetadata g_v) : g(g_v) { } __host__ __device__ void operator()(particle_t& p) { // // slightly simplified Velocity Verlet integration // conserves energy better than explicit Euler method // p.vx += p.ax * dt; p.vy += p.ay * dt; p.x += p.vx * dt; p.y += p.vy * dt; // // bounce from walls // while(p.x < 0 || p.x > g.side_size) { p.x = p.x < 0 ? -(p.x) : 2*g.side_size-p.x; p.vx = -(p.vx); } while(p.y < 0 || p.y > g.side_size) { p.y = p.y < 0 ? -(p.y) : 2*g.side_size-p.y; p.vy = -(p.vy); } } }; struct IgnoreZeroPredicate : public thrust::unary_function<int, bool> { __host__ __device__ bool operator()(int i) { return i != 0; } }; __device__ void apply_force_gpu(particle_t& particle, particle_t& neighbor) { double dx = neighbor.x - particle.x; double dy = neighbor.y - particle.y; double r2 = dx * dx + dy * dy; if( r2 > cutoff*cutoff ) return; //r2 = fmax( r2, min_r*min_r ); r2 = (r2 > min_r*min_r) ? r2 : min_r*min_r; double r = sqrt( r2 ); // // very simple short-range repulsive force // double coef = ( 1 - cutoff / r ) / r2 / mass; particle.ax += coef * dx; particle.ay += coef * dy; } __device__ int num_particles_in_square(int square_idx, const int* grid_offsets, const GridMetadata grid) { int square_start = grid_offsets[square_idx]; // For all but the last grid square, the number of particles in the square // equals the next square's offset minus its own offset. The last square // just has all the remaining particles. int square_end = (square_idx < (grid.count - 1)) ? grid_offsets[square_idx+1] : grid.num_particles; return square_end - square_start; } __global__ void compute_forces_gpu (particle_t* particles, const int* grid_offsets, const GridMetadata grid) { // Get grid square ID. Each call to this function computes forces // for all of the particles in one grid square. const int square_idx = threadIdx.x + blockIdx.x * blockDim.x; if (square_idx >= grid.count) return; const int square_x = square_idx % grid.side_count; const int square_y = square_idx / grid.side_count; const int first_particle_idx = grid_offsets[square_idx]; const int num_ps = num_particles_in_square(square_idx, grid_offsets, grid); for (int particle_idx = first_particle_idx; particle_idx < first_particle_idx + num_ps; particle_idx++) { particles[particle_idx].ax = particles[particle_idx].ay = 0; // Iterate only over neighboring grid squares. Note that we could reorder // this to access each neighbor only once, which would probably be better. for (int y_offset = -1; y_offset <= 1; y_offset++) { const int neighbor_y = square_y + y_offset; if (neighbor_y < 0 || neighbor_y >= grid.side_count) { continue; } for (int x_offset = -1; x_offset <= 1; x_offset++) { const int neighbor_x = square_x + x_offset; if (neighbor_x < 0 || neighbor_x >= grid.side_count) { continue; } const int neighbor_square_idx = grid.square_to_flat_square_idx(neighbor_x, neighbor_y); // Now we iterate over all the particles in the neighbor and apply // forces to our particle. const int first_neighbor_particle_idx = grid_offsets[neighbor_square_idx]; const int num_neighbor_ps = num_particles_in_square(neighbor_square_idx, grid_offsets, grid); for (int neighbor_particle_idx = first_neighbor_particle_idx; neighbor_particle_idx < first_neighbor_particle_idx + num_neighbor_ps; neighbor_particle_idx++) { apply_force_gpu(particles[particle_idx], particles[neighbor_particle_idx]); } } } } } /* * @param particle_square_idx_storage is a preallocated device array of size * at least grid.num_particles. Its value can be arbitrary. It will be * clobbered. * @param grid_offsets will be populated with offsets corresponding to grid * squares. [grid_offsets[i], grid_offsets[i+1]) (note the inclusiveness!) * is the set of indices in particles for grid square i, after this function * returns. * @param grid_idx_storage is a preallocated device array of size at least * grid.count. Its value can be arbitrary. It will be clobbered. * @param grid_count_storage is a preallocated device array of size at least * grid.count. Its value can be arbitrary. It will be clobbered. */ void sort_to_bins( thrust::device_vector<particle_t>& particles, thrust::device_vector<int>& particle_square_idx_storage, thrust::device_vector<int>& grid_offsets, thrust::device_vector<int>& grid_idx_storage, thrust::device_vector<int>& grid_count_storage, const GridMetadata& grid) { thrust::fill(particle_square_idx_storage.begin(), particle_square_idx_storage.end(), 0); thrust::transform( particles.begin(), particles.end(), particle_square_idx_storage.begin(), GridSquareCmIndex(grid)); // Sort the particles by column-major order in the grid. Thrust offers no // sort function that leaves the keys in place, so we must actually allocate // memory for the square indices. Another option is to use a comparison // functor; that would avoid memory allocation but end up invoking a less // efficient sorting algorithm that works on user-defined functors (rather // than the fast int-sorter). It could be worth trying that, too. // // I think we could do much better than sort_by_key here. This makes the // algorithm O(n log n) when we can do O(n). Here is a sketch of a better // algorithm that seems to equal the asymptotic efficiency of a graph-based // or message-passing method, while having very low constant factors when // implemented on the GPU: // * Associate with each particle its grid square coordinates (x,y). // * Use a LSH algorithm to hash the grid square coordinates. LSH means // that the absolute difference between the hashes of two points is (with // high probability) close to the L2 distance of the two points. // * Sort the particles according to the hash. The sorting algorithm we use // is important: // - Notice that, since particles may move // only a bounded L2 distance in an iteration, there is a bound on the // distance any particle needs to travel in this sorting step. That is, // the particles are already "almost sorted." // - Therefore we want a sorting algorithm that can be implemented quickly // on the GPU and that takes only O(k N) time, where k is the maximum // (or average) distance moved by any element. // - There are fast algorithms for nearly-sorted lists, but I haven't // found on that is parallelizable yet. That seems very possible, // though. So I'm leaving this for future work, due to time // constraints. // Unrelated note: Thrust's sort_by_key seems to have a bug (or exercises a // memory leak in my code?) that kicks in for particle counts between 18,000 // and 26,000 and causes this line to crash with a bad_alloc error. For // larger or smaller particle counts, everything seems fine, which is // terrifying. thrust::sort_by_key( particle_square_idx_storage.begin(), particle_square_idx_storage.end(), particles.begin()); // Compute the starting offset of each grid square (that is, the index of the // first particle in particles contained in each square). // I cannot find a totally natural way to do this using Thrust, so we have to // jump through some hoops. We reduce_by_key to compute the number of // particles in each nonempty grid square and the corresponding square // indices. Then we scatter into a vector that is initially filled with 0s, // mapping each count according to its index. Then we scan with plus, so // that the value at the ith location is the number of particles in grid // squares preceding the ith grid square. thrust::fill(grid_idx_storage.begin(), grid_count_storage.end(), 0); thrust::fill(grid_count_storage.begin(), grid_count_storage.end(), 0); thrust::reduce_by_key( particle_square_idx_storage.begin(), particle_square_idx_storage.end(), thrust::make_constant_iterator(1), grid_idx_storage.begin(), grid_count_storage.begin()); // Here we set grid_offsets[grid_idx_storage[i]] = grid_count_storage[i] // for all i. Since some grid squares might have been empty, there might // be several trailing elements of grid_idx_storage that are zero. So we // use scatter_if() to ignore those. thrust::fill(grid_offsets.begin(), grid_offsets.end(), 0); thrust::scatter_if( grid_count_storage.begin(), grid_count_storage.end(), grid_idx_storage.begin(), grid_count_storage.begin(), grid_offsets.begin(), IgnoreZeroPredicate()); thrust::exclusive_scan( grid_offsets.begin(), grid_offsets.end(), grid_offsets.begin()); } void simulate_forces(thrust::device_vector<particle_t>& particles, const thrust::device_vector<int>& grid_offsets, const GridMetadata& grid) { // The communication pattern is not simple, so we have to resort to writing // device code ourselves here. particle_t* d_particles = thrust::raw_pointer_cast(particles.data()); const int* d_grid_offsets = thrust::raw_pointer_cast(grid_offsets.data()); int num_blocks = div_round_up(grid.count, NUM_THREADS); compute_forces_gpu<<<num_blocks, NUM_THREADS>>>(d_particles, d_grid_offsets, grid); } void simulate_movement(thrust::device_vector<particle_t>& particles, const GridMetadata& grid) { thrust::for_each(particles.begin(), particles.end(), MoveParticle(grid)); } int main( int argc, char **argv ) { // This takes a few seconds to initialize the runtime cudaThreadSynchronize(); if (find_option( argc, argv, "-h" ) >= 0) { printf( "Options:\n" ); printf( "-h to see this help\n" ); printf( "-n <int> to set the number of particles\n" ); printf( "-o <filename> to specify the output file name\n" ); printf( "-no turns off all correctness checks and particle output\n"); return 0; } const int n = read_int( argc, argv, "-n", 1000 ); const bool fast = (find_option( argc, argv, "-no" ) != -1); const char *savename = read_string( argc, argv, "-o", NULL ); const char *sumname = read_string( argc, argv, "-s", NULL ); FILE *fsave = ((!fast) && savename) ? fopen( savename, "w" ) : NULL; const double size = set_size( n ); // Particles are stored in a flattened array of squares. Each square // is large enough that particles can only move across 1 square per // simulated time step, but small enough that the expected number of // particles (and hopefully the maximum number) per square is a small // constant. // Following NVIDIA's example particle simulator, we store the particles // in a flattened array and sort them when we need to change the grid // structure. That is, we use one vector // of size n to store the particles in sorted order (column major by grid // square, with arbitrary order within each square), and a second vector // to store the starting offsets for the // particles contained in each square. Then rebuilding the grid involves // an in-place sort and recomputing the index. const double square_size = cutoff + 0.0001; const int side_count = div_round_up_f(size, square_size); const GridMetadata grid(n, size, square_size, side_count); double init_on_host_time = read_timer(); //TODO: This part of the initialization is serial. It is embarrassingly // parallel and could easily be done on the GPU. particle_t* particles = init_particles(n); thrust::host_vector<particle_t> ps(particles, particles+n); init_on_host_time = read_timer() - init_on_host_time; double init_on_device_time = read_timer(); // Copy the particles to the GPU. thrust::device_vector<particle_t> d_ps = ps; // Allocate the structure that maps grid locations to offsets in // d_ps. Like d_ps, this will be populated inside the simulation loop, // and for now is uninitialized. thrust::device_vector<int> d_grid_offsets(grid.count); // Allocate scratch space that the algorithm will need. thrust::device_vector<int> d_particle_square_idx_storage(grid.num_particles); thrust::device_vector<int> d_grid_idx_storage(grid.count); thrust::device_vector<int> d_grid_count_storage(grid.count); init_on_device_time = read_timer() - init_on_device_time; // // simulate a number of time steps // double simulation_time = read_timer(); for (int step = 0; step < NSTEPS; step++) { // First, we must build the grid. sort_to_bins(d_ps, d_particle_square_idx_storage, d_grid_offsets, d_grid_idx_storage, d_grid_count_storage, grid); // Now we can simulate forces and movement. simulate_forces(d_ps, d_grid_offsets, grid); simulate_movement(d_ps, grid); if( fsave && (step%SAVEFREQ) == 0 ) { // Copy the particles back to the CPU. ps = d_ps; save( fsave, n, ps.data()); } } cudaThreadSynchronize(); simulation_time = read_timer( ) - simulation_time; printf( "CPU-GPU copy time = %g seconds\n", init_on_device_time); printf( "n = %d, simulation time = %g seconds\n", n, simulation_time ); free(particles); if( fsave ) fclose( fsave ); return 0; }
ded848d5a0662a59ba3b55da250efe81a7ff924d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // SPDX-FileCopyrightText: 2021 CERN // SPDX-License-Identifier: Apache-2.0 #include "example9.cuh" #include <AdePT/LoopNavigator.h> #include <CopCore/PhysicalConstants.h> #include <G4HepEmGammaManager.hh> #include <G4HepEmTrack.hh> #include <G4HepEmGammaInteractionCompton.hh> #include <G4HepEmGammaInteractionConversion.hh> // Pull in implementation. #include <G4HepEmGammaManager.icc> #include <G4HepEmGammaInteractionCompton.icc> #include <G4HepEmGammaInteractionConversion.icc> __global__ void TransportGammas(Track *gammas, const adept::MParray *active, Secondaries secondaries, adept::MParray *activeQueue, adept::MParray *relocateQueue, GlobalScoring *scoring) { int activeSize = active->size(); for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < activeSize; i += blockDim.x * gridDim.x) { const int slot = (*active)[i]; Track &currentTrack = gammas[slot]; // Init a track with the needed data to call into G4HepEm. G4HepEmTrack emTrack; emTrack.SetEKin(currentTrack.energy); // For now, just assume a single material. int theMCIndex = 1; emTrack.SetMCIndex(theMCIndex); // Sample the `number-of-interaction-left` and put it into the track. for (int ip = 0; ip < 3; ++ip) { double numIALeft = currentTrack.numIALeft[ip]; if (numIALeft <= 0) { numIALeft = -::log(currentTrack.Uniform()); currentTrack.numIALeft[ip] = numIALeft; } emTrack.SetNumIALeft(numIALeft, ip); } // Call G4HepEm to compute the physics step limit. G4HepEmGammaManager::HowFar(&g4HepEmData, &g4HepEmPars, &emTrack); // Get result into variables. double geometricalStepLengthFromPhysics = emTrack.GetGStepLength(); int winnerProcessIndex = emTrack.GetWinnerProcessIndex(); // Leave the range and MFP inside the G4HepEmTrack. If we split kernels, we // also need to carry them over! // Check if there's a volume boundary in between. vecgeom::NavStateIndex nextState; double geometryStepLength = LoopNavigator::ComputeStepAndNextVolume( currentTrack.pos, currentTrack.dir, geometricalStepLengthFromPhysics, currentTrack.navState, nextState); currentTrack.pos += geometryStepLength * currentTrack.dir; if (nextState.IsOnBoundary()) { emTrack.SetGStepLength(geometryStepLength); emTrack.SetOnBoundary(true); } G4HepEmGammaManager::UpdateNumIALeft(&emTrack); // Save the `number-of-interaction-left` in our track. for (int ip = 0; ip < 3; ++ip) { double numIALeft = emTrack.GetNumIALeft(ip); currentTrack.numIALeft[ip] = numIALeft; } if (nextState.IsOnBoundary()) { // For now, just count that we hit something. atomicAdd(&scoring->hits, 1); // Kill the particle if it left the world. if (nextState.Top() != nullptr) { activeQueue->push_back(slot); relocateQueue->push_back(slot); // Move to the next boundary. currentTrack.navState = nextState; } continue; } else if (winnerProcessIndex < 0) { // No discrete process, move on. activeQueue->push_back(slot); continue; } // Reset number of interaction left for the winner discrete process. // (Will be resampled in the next iteration.) currentTrack.numIALeft[winnerProcessIndex] = -1.0; // Perform the discrete interaction. RanluxppDoubleEngine rnge(&currentTrack.rngState); // We might need one branched RNG state, prepare while threads are synchronized. RanluxppDouble newRNG(currentTrack.rngState.Branch()); const double energy = currentTrack.energy; switch (winnerProcessIndex) { case 0: { // Invoke gamma conversion to e-/e+ pairs, if the energy is above the threshold. if (energy < 2 * copcore::units::kElectronMassC2) { activeQueue->push_back(slot); continue; } double logEnergy = ::log(energy); double elKinEnergy, posKinEnergy; G4HepEmGammaInteractionConversion::SampleKinEnergies(&g4HepEmData, energy, logEnergy, theMCIndex, elKinEnergy, posKinEnergy, &rnge); double dirPrimary[] = {currentTrack.dir.x(), currentTrack.dir.y(), currentTrack.dir.z()}; double dirSecondaryEl[3], dirSecondaryPos[3]; G4HepEmGammaInteractionConversion::SampleDirections(dirPrimary, dirSecondaryEl, dirSecondaryPos, elKinEnergy, posKinEnergy, &rnge); Track &electron = secondaries.electrons.NextTrack(); Track &positron = secondaries.positrons.NextTrack(); atomicAdd(&scoring->secondaries, 2); electron.InitAsSecondary(/*parent=*/currentTrack); electron.rngState = newRNG; electron.energy = elKinEnergy; electron.dir.Set(dirSecondaryEl[0], dirSecondaryEl[1], dirSecondaryEl[2]); positron.InitAsSecondary(/*parent=*/currentTrack); // Reuse the RNG state of the dying track. positron.rngState = currentTrack.rngState; positron.energy = posKinEnergy; positron.dir.Set(dirSecondaryPos[0], dirSecondaryPos[1], dirSecondaryPos[2]); // The current track is killed by not enqueuing into the next activeQueue. break; } case 1: { // Invoke Compton scattering of gamma. constexpr double LowEnergyThreshold = 100 * copcore::units::eV; if (energy < LowEnergyThreshold) { activeQueue->push_back(slot); continue; } const double origDirPrimary[] = {currentTrack.dir.x(), currentTrack.dir.y(), currentTrack.dir.z()}; double dirPrimary[3]; const double newEnergyGamma = G4HepEmGammaInteractionCompton::SamplePhotonEnergyAndDirection(energy, dirPrimary, origDirPrimary, &rnge); vecgeom::Vector3D<double> newDirGamma(dirPrimary[0], dirPrimary[1], dirPrimary[2]); const double energyEl = energy - newEnergyGamma; if (energyEl > LowEnergyThreshold) { // Create a secondary electron and sample/compute directions. Track &electron = secondaries.electrons.NextTrack(); atomicAdd(&scoring->secondaries, 1); electron.InitAsSecondary(/*parent=*/currentTrack); electron.rngState = newRNG; electron.energy = energyEl; electron.dir = energy * currentTrack.dir - newEnergyGamma * newDirGamma; electron.dir.Normalize(); } else { atomicAdd(&scoring->energyDeposit, energyEl); } // Check the new gamma energy and deposit if below threshold. if (newEnergyGamma > LowEnergyThreshold) { currentTrack.energy = newEnergyGamma; currentTrack.dir = newDirGamma; // The current track continues to live. activeQueue->push_back(slot); } else { atomicAdd(&scoring->energyDeposit, newEnergyGamma); // The current track is killed by not enqueuing into the next activeQueue. } break; } case 2: { // Invoke photoelectric process: right now only absorb the gamma. atomicAdd(&scoring->energyDeposit, energy); // The current track is killed by not enqueuing into the next activeQueue. break; } } } }
ded848d5a0662a59ba3b55da250efe81a7ff924d.cu
// SPDX-FileCopyrightText: 2021 CERN // SPDX-License-Identifier: Apache-2.0 #include "example9.cuh" #include <AdePT/LoopNavigator.h> #include <CopCore/PhysicalConstants.h> #include <G4HepEmGammaManager.hh> #include <G4HepEmTrack.hh> #include <G4HepEmGammaInteractionCompton.hh> #include <G4HepEmGammaInteractionConversion.hh> // Pull in implementation. #include <G4HepEmGammaManager.icc> #include <G4HepEmGammaInteractionCompton.icc> #include <G4HepEmGammaInteractionConversion.icc> __global__ void TransportGammas(Track *gammas, const adept::MParray *active, Secondaries secondaries, adept::MParray *activeQueue, adept::MParray *relocateQueue, GlobalScoring *scoring) { int activeSize = active->size(); for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < activeSize; i += blockDim.x * gridDim.x) { const int slot = (*active)[i]; Track &currentTrack = gammas[slot]; // Init a track with the needed data to call into G4HepEm. G4HepEmTrack emTrack; emTrack.SetEKin(currentTrack.energy); // For now, just assume a single material. int theMCIndex = 1; emTrack.SetMCIndex(theMCIndex); // Sample the `number-of-interaction-left` and put it into the track. for (int ip = 0; ip < 3; ++ip) { double numIALeft = currentTrack.numIALeft[ip]; if (numIALeft <= 0) { numIALeft = -std::log(currentTrack.Uniform()); currentTrack.numIALeft[ip] = numIALeft; } emTrack.SetNumIALeft(numIALeft, ip); } // Call G4HepEm to compute the physics step limit. G4HepEmGammaManager::HowFar(&g4HepEmData, &g4HepEmPars, &emTrack); // Get result into variables. double geometricalStepLengthFromPhysics = emTrack.GetGStepLength(); int winnerProcessIndex = emTrack.GetWinnerProcessIndex(); // Leave the range and MFP inside the G4HepEmTrack. If we split kernels, we // also need to carry them over! // Check if there's a volume boundary in between. vecgeom::NavStateIndex nextState; double geometryStepLength = LoopNavigator::ComputeStepAndNextVolume( currentTrack.pos, currentTrack.dir, geometricalStepLengthFromPhysics, currentTrack.navState, nextState); currentTrack.pos += geometryStepLength * currentTrack.dir; if (nextState.IsOnBoundary()) { emTrack.SetGStepLength(geometryStepLength); emTrack.SetOnBoundary(true); } G4HepEmGammaManager::UpdateNumIALeft(&emTrack); // Save the `number-of-interaction-left` in our track. for (int ip = 0; ip < 3; ++ip) { double numIALeft = emTrack.GetNumIALeft(ip); currentTrack.numIALeft[ip] = numIALeft; } if (nextState.IsOnBoundary()) { // For now, just count that we hit something. atomicAdd(&scoring->hits, 1); // Kill the particle if it left the world. if (nextState.Top() != nullptr) { activeQueue->push_back(slot); relocateQueue->push_back(slot); // Move to the next boundary. currentTrack.navState = nextState; } continue; } else if (winnerProcessIndex < 0) { // No discrete process, move on. activeQueue->push_back(slot); continue; } // Reset number of interaction left for the winner discrete process. // (Will be resampled in the next iteration.) currentTrack.numIALeft[winnerProcessIndex] = -1.0; // Perform the discrete interaction. RanluxppDoubleEngine rnge(&currentTrack.rngState); // We might need one branched RNG state, prepare while threads are synchronized. RanluxppDouble newRNG(currentTrack.rngState.Branch()); const double energy = currentTrack.energy; switch (winnerProcessIndex) { case 0: { // Invoke gamma conversion to e-/e+ pairs, if the energy is above the threshold. if (energy < 2 * copcore::units::kElectronMassC2) { activeQueue->push_back(slot); continue; } double logEnergy = std::log(energy); double elKinEnergy, posKinEnergy; G4HepEmGammaInteractionConversion::SampleKinEnergies(&g4HepEmData, energy, logEnergy, theMCIndex, elKinEnergy, posKinEnergy, &rnge); double dirPrimary[] = {currentTrack.dir.x(), currentTrack.dir.y(), currentTrack.dir.z()}; double dirSecondaryEl[3], dirSecondaryPos[3]; G4HepEmGammaInteractionConversion::SampleDirections(dirPrimary, dirSecondaryEl, dirSecondaryPos, elKinEnergy, posKinEnergy, &rnge); Track &electron = secondaries.electrons.NextTrack(); Track &positron = secondaries.positrons.NextTrack(); atomicAdd(&scoring->secondaries, 2); electron.InitAsSecondary(/*parent=*/currentTrack); electron.rngState = newRNG; electron.energy = elKinEnergy; electron.dir.Set(dirSecondaryEl[0], dirSecondaryEl[1], dirSecondaryEl[2]); positron.InitAsSecondary(/*parent=*/currentTrack); // Reuse the RNG state of the dying track. positron.rngState = currentTrack.rngState; positron.energy = posKinEnergy; positron.dir.Set(dirSecondaryPos[0], dirSecondaryPos[1], dirSecondaryPos[2]); // The current track is killed by not enqueuing into the next activeQueue. break; } case 1: { // Invoke Compton scattering of gamma. constexpr double LowEnergyThreshold = 100 * copcore::units::eV; if (energy < LowEnergyThreshold) { activeQueue->push_back(slot); continue; } const double origDirPrimary[] = {currentTrack.dir.x(), currentTrack.dir.y(), currentTrack.dir.z()}; double dirPrimary[3]; const double newEnergyGamma = G4HepEmGammaInteractionCompton::SamplePhotonEnergyAndDirection(energy, dirPrimary, origDirPrimary, &rnge); vecgeom::Vector3D<double> newDirGamma(dirPrimary[0], dirPrimary[1], dirPrimary[2]); const double energyEl = energy - newEnergyGamma; if (energyEl > LowEnergyThreshold) { // Create a secondary electron and sample/compute directions. Track &electron = secondaries.electrons.NextTrack(); atomicAdd(&scoring->secondaries, 1); electron.InitAsSecondary(/*parent=*/currentTrack); electron.rngState = newRNG; electron.energy = energyEl; electron.dir = energy * currentTrack.dir - newEnergyGamma * newDirGamma; electron.dir.Normalize(); } else { atomicAdd(&scoring->energyDeposit, energyEl); } // Check the new gamma energy and deposit if below threshold. if (newEnergyGamma > LowEnergyThreshold) { currentTrack.energy = newEnergyGamma; currentTrack.dir = newDirGamma; // The current track continues to live. activeQueue->push_back(slot); } else { atomicAdd(&scoring->energyDeposit, newEnergyGamma); // The current track is killed by not enqueuing into the next activeQueue. } break; } case 2: { // Invoke photoelectric process: right now only absorb the gamma. atomicAdd(&scoring->energyDeposit, energy); // The current track is killed by not enqueuing into the next activeQueue. break; } } } }
fe951f6314a2671196ebe45f3fc5cdbed96f2a6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <iostream> #include <vector> #include <math.h> #include <stdlib.h> #include <time.h> #include <algorithm> using namespace std; //vector< vector<double> > PointValues; //vector< vector<double> > KCentroids; //vector<int> ClusteringValues; unsigned int total_points, total_values, K, max_iterations; #define THREADS 8 __global__ void updateCentroids(double *PointValues, double *KCentroids, double *ClusteringValues, int total_points, int total_values, int K){ int kevaluada = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int ind = j; float tmp = 0.0; int count = 0; if (j < total_values) { for (int i = 0; i<total_points; ++i, ind = ind + total_values) { //printf("kevaluada: %d \n",kevaluada); if (kevaluada == ClusteringValues[i]) { tmp += PointValues[ind]; ++count; } } //printf("tmp: %d \n",tmp); KCentroids[kevaluada * total_values + j] = tmp/count; } } void printClusters(double *PointValues, double *KCentroids, double *ClusteringValues); //void updateCentroids(double *PointValues, double *KCentroids, // double *ClusteringValues); bool updatePointDistances(); void CheckCudaError(char sms[], int line); int main(int argc, char** argv) { unsigned int numBytesPointValues, numBytesKCentroids, numBytesClustering; unsigned int nBlocksC, nThreadsC; hipEvent_t E1, E2, E3, E4, E5; float TiempoTotal, TiempoUpdateCentroids, TiempoUpdatePointDistances; double *h_PointValues, *h_KCentroids, *h_ClusteringValues; double *d_PointValues, *d_KCentroids, *d_ClusteringValues; cin >> total_points >> total_values >> K >> max_iterations; if(K > total_points) cout << "INPUT ERROR: K CANT BE BIGGER THAN TOTAL POINTS" << endl; //Reservamos el expacio que necesitaremos en memoria numBytesKCentroids = K * total_values * sizeof(double); numBytesPointValues = total_points * total_values * sizeof(double); numBytesClustering = total_points * sizeof(double); //Declaramos los eventos hipEventCreate(&E1); hipEventCreate(&E2); hipEventCreate(&E3); hipEventCreate(&E4); hipEventCreate(&E5); // Obtener Memoria en el host h_PointValues = (double*) malloc(numBytesPointValues); h_KCentroids = (double*) malloc(numBytesKCentroids); h_ClusteringValues = (double*) malloc(numBytesClustering); //Lectura de los valores for(int i = 0; i < total_points; i++) { for(int j = 0; j < total_values; j++) { double value; cin >> value; int ind = i * total_values + j; h_PointValues[ind] = value; } } for (int i = 0; i<total_points; ++i) { h_ClusteringValues[i] = 0; } vector<int> prohibited_indexes; srand(1); for(int i = 0; i < K; i++) { while(true) { int index_point = rand() % total_points; if(find(prohibited_indexes.begin(), prohibited_indexes.end(), index_point) == prohibited_indexes.end()) { cout << "index_point: " << index_point << endl; prohibited_indexes.push_back(index_point); h_ClusteringValues[index_point] = i; break; } } } // Obtener Memoria en el device hipMalloc((double**)&d_PointValues, numBytesPointValues); hipMalloc((double**)&d_KCentroids, numBytesKCentroids); hipMalloc((double**)&d_ClusteringValues, numBytesClustering); CheckCudaError((char *) "Obtener Memoria en el device", __LINE__); // Copiar datos desde el host en el device hipMemcpy(d_PointValues, h_PointValues, numBytesPointValues, hipMemcpyHostToDevice); hipMemcpy(d_KCentroids, h_KCentroids, numBytesKCentroids, hipMemcpyHostToDevice); hipMemcpy(d_ClusteringValues, h_ClusteringValues, numBytesClustering, hipMemcpyHostToDevice); CheckCudaError((char *) "Copiar Datos Host --> Device", __LINE__); // Ejecutar el kernel nThreadsC = total_values; nBlocksC = (total_values + nThreadsC - 1)/nThreadsC; // Funciona bien en cualquier caso cout << "nBlocksC: " << (total_values + nThreadsC - 1)/nThreadsC << endl; cout << "total_values: " << total_values << endl; cout << "nThreadsC: " << nThreadsC << endl; dim3 dimGridC(nBlocksC, 1, 1); dim3 dimBlockC(nThreadsC, K, 1); printf("\n"); printf("Kernel de su puta madre\n"); printf("Dimension Block: %d x %d x %d (%d) threads\n", dimBlockC.x, dimBlockC.y, dimBlockC.z, dimBlockC.x * dimBlockC.y * dimBlockC.z); printf("Dimension Grid: %d x %d x %d (%d) blocks\n", dimGridC.x, dimGridC.y, dimGridC.z, dimGridC.x * dimGridC.y * dimGridC.z); hipEventRecord(E1, 0); hipEventSynchronize(E1); hipLaunchKernelGGL(( updateCentroids), dim3(dimGridC),dim3(dimBlockC), 0, 0, d_PointValues, d_KCentroids, d_ClusteringValues, total_points, total_values, K); hipEventRecord(E2, 0); hipEventSynchronize(E2); //CheckCudaError((char *) "Invocar Kernel", __LINE__); /*int counter = 0; hipEventRecord(E3, 0); hipEventSynchronize(E3); bool yeray = updatePointDistances(); hipEventRecord(E4, 0); hipEventSynchronize(E4); while (yeray and counter <= max_iterations) { ++counter; updateCentroids(total_values); yeray = updatePointDistances(); } hipEventRecord(E5, 0); hipEventSynchronize(E5);*/ // Obtener el resultado desde el host //hipMemcpy(h_PointValues, d_PointValues, numBytesPointValues, // hipMemcpyDeviceToHost); hipMemcpy(h_KCentroids, d_KCentroids, numBytesKCentroids, hipMemcpyDeviceToHost); //hipMemcpy(h_ClusteringValues, d_ClusteringValues, numBytesClustering, // hipMemcpyDeviceToHost); //CheckCudaError((char *) "Copiar Datos Device --> Host", __LINE__); cout << "AFTER UPDATING CENTROIDS: " << endl; printClusters(h_PointValues, h_KCentroids, h_ClusteringValues); // Liberar Memoria del device hipFree(d_PointValues); hipFree(d_KCentroids); hipFree(d_ClusteringValues); hipDeviceSynchronize(); hipEventElapsedTime(&TiempoUpdateCentroids, E1, E2); //hipEventElapsedTime(&TiempoUpdatePointDistances, E3, E4); //hipEventElapsedTime(&TiempoTotal, E1, E5); hipEventDestroy(E1); hipEventDestroy(E2); hipEventDestroy(E3); hipEventDestroy(E4); hipEventDestroy(E5); printf("Tiempo UpdateCentroids function: %4.6f milseg\n", TiempoUpdateCentroids); /*printf("Tiempo UpdatePointDistances function: %4.6f milseg\n", TiempoUpdatePointDistances); printf("Tiempo Global: %4.6f milseg\n", TiempoTotal);*/ free(h_PointValues); free(h_KCentroids); free(h_ClusteringValues); } /*bool updatePointDistances(){ double sum, min_dist; int min_k; bool change = false; for (int i = 0; i<PointValues.size(); ++i) { min_dist = 0.0; for (int j = 0; j<KCentroids.size(); ++j) { sum = 0.0; for (int k = 0; k<PointValues[i].size(); ++k) { sum += pow(KCentroids[j][k] - PointValues[i][k], 2.0); } if (j == 0) { min_dist = sqrt(sum); min_k = j; } if (min_dist > sqrt(sum)) { min_dist = sqrt(sum); min_k = j; } } if (ClusteringValues[i] != min_k) { ClusteringValues[i] = min_k; change = true; } } return change; }*/ /*void updateCentroids(double *PointValues, double *KCentroids, double *ClusteringValues){ double *updatingK; updatingK.resize(KCentroids.size()); for (int i = 0; i<ClusteringValues.size(); ++i) { vector<double> AddingK; for (int j = 0; j<PointValues[i].size(); ++j) { AddingK.push_back(PointValues[i*total_values+j]);//AddingK.push_back(PointValues[i][j]); } for (int j = 0; j<AddingK.size(); ++j) { updatingK[ClusteringValues[i]].push_back(AddingK[j]); } } vector<double> KUpdated(total_values,0); for (int i = 0; i<updatingK.size(); ++i) { vector<double> KUpdated(total_values,0); for (int j = 0; j<updatingK[i].size(); ++j) { KUpdated[j%total_values] += updatingK[i][j]; } if (updatingK[i].size() > 0) { for (int j = 0; j<KUpdated.size(); ++j) { KUpdated[j] /= (updatingK[i].size()/total_values); } KCentroids[i] = KUpdated; } } }*/ void printClusters(double *PointValues, double *KCentroids, double *ClusteringValues) { for (int i = 0; i<K; ++i) { cout << "Centroid " << i << ": "; for (int j = 0; j<total_values; ++j) { int ind = i * total_values + j; cout << KCentroids[ind] << " "; } cout << endl; } for (int i = 0; i<total_points; ++i) { cout << "Point " << i << ": "; for (int j = 0; j<total_values; ++j) { int ind = i * total_values + j; cout << PointValues[ind] << " "; } cout << "is located on cluster: " << ClusteringValues[i] << endl; } } int error(float a, float b) { if (abs (a - b) / a > 0.000001) return 1; else return 0; } void CheckCudaError(char sms[], int line) { hipError_t error; error = hipGetLastError(); if (error) { printf("(ERROR) %s - %s in %s at line %d\n", sms, hipGetErrorString(error), __FILE__, line); exit(EXIT_FAILURE); } }
fe951f6314a2671196ebe45f3fc5cdbed96f2a6f.cu
#include <stdio.h> #include <stdlib.h> #include <iostream> #include <vector> #include <math.h> #include <stdlib.h> #include <time.h> #include <algorithm> using namespace std; //vector< vector<double> > PointValues; //vector< vector<double> > KCentroids; //vector<int> ClusteringValues; unsigned int total_points, total_values, K, max_iterations; #define THREADS 8 __global__ void updateCentroids(double *PointValues, double *KCentroids, double *ClusteringValues, int total_points, int total_values, int K){ int kevaluada = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int ind = j; float tmp = 0.0; int count = 0; if (j < total_values) { for (int i = 0; i<total_points; ++i, ind = ind + total_values) { //printf("kevaluada: %d \n",kevaluada); if (kevaluada == ClusteringValues[i]) { tmp += PointValues[ind]; ++count; } } //printf("tmp: %d \n",tmp); KCentroids[kevaluada * total_values + j] = tmp/count; } } void printClusters(double *PointValues, double *KCentroids, double *ClusteringValues); //void updateCentroids(double *PointValues, double *KCentroids, // double *ClusteringValues); bool updatePointDistances(); void CheckCudaError(char sms[], int line); int main(int argc, char** argv) { unsigned int numBytesPointValues, numBytesKCentroids, numBytesClustering; unsigned int nBlocksC, nThreadsC; cudaEvent_t E1, E2, E3, E4, E5; float TiempoTotal, TiempoUpdateCentroids, TiempoUpdatePointDistances; double *h_PointValues, *h_KCentroids, *h_ClusteringValues; double *d_PointValues, *d_KCentroids, *d_ClusteringValues; cin >> total_points >> total_values >> K >> max_iterations; if(K > total_points) cout << "INPUT ERROR: K CANT BE BIGGER THAN TOTAL POINTS" << endl; //Reservamos el expacio que necesitaremos en memoria numBytesKCentroids = K * total_values * sizeof(double); numBytesPointValues = total_points * total_values * sizeof(double); numBytesClustering = total_points * sizeof(double); //Declaramos los eventos cudaEventCreate(&E1); cudaEventCreate(&E2); cudaEventCreate(&E3); cudaEventCreate(&E4); cudaEventCreate(&E5); // Obtener Memoria en el host h_PointValues = (double*) malloc(numBytesPointValues); h_KCentroids = (double*) malloc(numBytesKCentroids); h_ClusteringValues = (double*) malloc(numBytesClustering); //Lectura de los valores for(int i = 0; i < total_points; i++) { for(int j = 0; j < total_values; j++) { double value; cin >> value; int ind = i * total_values + j; h_PointValues[ind] = value; } } for (int i = 0; i<total_points; ++i) { h_ClusteringValues[i] = 0; } vector<int> prohibited_indexes; srand(1); for(int i = 0; i < K; i++) { while(true) { int index_point = rand() % total_points; if(find(prohibited_indexes.begin(), prohibited_indexes.end(), index_point) == prohibited_indexes.end()) { cout << "index_point: " << index_point << endl; prohibited_indexes.push_back(index_point); h_ClusteringValues[index_point] = i; break; } } } // Obtener Memoria en el device cudaMalloc((double**)&d_PointValues, numBytesPointValues); cudaMalloc((double**)&d_KCentroids, numBytesKCentroids); cudaMalloc((double**)&d_ClusteringValues, numBytesClustering); CheckCudaError((char *) "Obtener Memoria en el device", __LINE__); // Copiar datos desde el host en el device cudaMemcpy(d_PointValues, h_PointValues, numBytesPointValues, cudaMemcpyHostToDevice); cudaMemcpy(d_KCentroids, h_KCentroids, numBytesKCentroids, cudaMemcpyHostToDevice); cudaMemcpy(d_ClusteringValues, h_ClusteringValues, numBytesClustering, cudaMemcpyHostToDevice); CheckCudaError((char *) "Copiar Datos Host --> Device", __LINE__); // Ejecutar el kernel nThreadsC = total_values; nBlocksC = (total_values + nThreadsC - 1)/nThreadsC; // Funciona bien en cualquier caso cout << "nBlocksC: " << (total_values + nThreadsC - 1)/nThreadsC << endl; cout << "total_values: " << total_values << endl; cout << "nThreadsC: " << nThreadsC << endl; dim3 dimGridC(nBlocksC, 1, 1); dim3 dimBlockC(nThreadsC, K, 1); printf("\n"); printf("Kernel de su puta madre\n"); printf("Dimension Block: %d x %d x %d (%d) threads\n", dimBlockC.x, dimBlockC.y, dimBlockC.z, dimBlockC.x * dimBlockC.y * dimBlockC.z); printf("Dimension Grid: %d x %d x %d (%d) blocks\n", dimGridC.x, dimGridC.y, dimGridC.z, dimGridC.x * dimGridC.y * dimGridC.z); cudaEventRecord(E1, 0); cudaEventSynchronize(E1); updateCentroids<<<dimGridC,dimBlockC>>>(d_PointValues, d_KCentroids, d_ClusteringValues, total_points, total_values, K); cudaEventRecord(E2, 0); cudaEventSynchronize(E2); //CheckCudaError((char *) "Invocar Kernel", __LINE__); /*int counter = 0; cudaEventRecord(E3, 0); cudaEventSynchronize(E3); bool yeray = updatePointDistances(); cudaEventRecord(E4, 0); cudaEventSynchronize(E4); while (yeray and counter <= max_iterations) { ++counter; updateCentroids(total_values); yeray = updatePointDistances(); } cudaEventRecord(E5, 0); cudaEventSynchronize(E5);*/ // Obtener el resultado desde el host //cudaMemcpy(h_PointValues, d_PointValues, numBytesPointValues, // cudaMemcpyDeviceToHost); cudaMemcpy(h_KCentroids, d_KCentroids, numBytesKCentroids, cudaMemcpyDeviceToHost); //cudaMemcpy(h_ClusteringValues, d_ClusteringValues, numBytesClustering, // cudaMemcpyDeviceToHost); //CheckCudaError((char *) "Copiar Datos Device --> Host", __LINE__); cout << "AFTER UPDATING CENTROIDS: " << endl; printClusters(h_PointValues, h_KCentroids, h_ClusteringValues); // Liberar Memoria del device cudaFree(d_PointValues); cudaFree(d_KCentroids); cudaFree(d_ClusteringValues); cudaDeviceSynchronize(); cudaEventElapsedTime(&TiempoUpdateCentroids, E1, E2); //cudaEventElapsedTime(&TiempoUpdatePointDistances, E3, E4); //cudaEventElapsedTime(&TiempoTotal, E1, E5); cudaEventDestroy(E1); cudaEventDestroy(E2); cudaEventDestroy(E3); cudaEventDestroy(E4); cudaEventDestroy(E5); printf("Tiempo UpdateCentroids function: %4.6f milseg\n", TiempoUpdateCentroids); /*printf("Tiempo UpdatePointDistances function: %4.6f milseg\n", TiempoUpdatePointDistances); printf("Tiempo Global: %4.6f milseg\n", TiempoTotal);*/ free(h_PointValues); free(h_KCentroids); free(h_ClusteringValues); } /*bool updatePointDistances(){ double sum, min_dist; int min_k; bool change = false; for (int i = 0; i<PointValues.size(); ++i) { min_dist = 0.0; for (int j = 0; j<KCentroids.size(); ++j) { sum = 0.0; for (int k = 0; k<PointValues[i].size(); ++k) { sum += pow(KCentroids[j][k] - PointValues[i][k], 2.0); } if (j == 0) { min_dist = sqrt(sum); min_k = j; } if (min_dist > sqrt(sum)) { min_dist = sqrt(sum); min_k = j; } } if (ClusteringValues[i] != min_k) { ClusteringValues[i] = min_k; change = true; } } return change; }*/ /*void updateCentroids(double *PointValues, double *KCentroids, double *ClusteringValues){ double *updatingK; updatingK.resize(KCentroids.size()); for (int i = 0; i<ClusteringValues.size(); ++i) { vector<double> AddingK; for (int j = 0; j<PointValues[i].size(); ++j) { AddingK.push_back(PointValues[i*total_values+j]);//AddingK.push_back(PointValues[i][j]); } for (int j = 0; j<AddingK.size(); ++j) { updatingK[ClusteringValues[i]].push_back(AddingK[j]); } } vector<double> KUpdated(total_values,0); for (int i = 0; i<updatingK.size(); ++i) { vector<double> KUpdated(total_values,0); for (int j = 0; j<updatingK[i].size(); ++j) { KUpdated[j%total_values] += updatingK[i][j]; } if (updatingK[i].size() > 0) { for (int j = 0; j<KUpdated.size(); ++j) { KUpdated[j] /= (updatingK[i].size()/total_values); } KCentroids[i] = KUpdated; } } }*/ void printClusters(double *PointValues, double *KCentroids, double *ClusteringValues) { for (int i = 0; i<K; ++i) { cout << "Centroid " << i << ": "; for (int j = 0; j<total_values; ++j) { int ind = i * total_values + j; cout << KCentroids[ind] << " "; } cout << endl; } for (int i = 0; i<total_points; ++i) { cout << "Point " << i << ": "; for (int j = 0; j<total_values; ++j) { int ind = i * total_values + j; cout << PointValues[ind] << " "; } cout << "is located on cluster: " << ClusteringValues[i] << endl; } } int error(float a, float b) { if (abs (a - b) / a > 0.000001) return 1; else return 0; } void CheckCudaError(char sms[], int line) { cudaError_t error; error = cudaGetLastError(); if (error) { printf("(ERROR) %s - %s in %s at line %d\n", sms, cudaGetErrorString(error), __FILE__, line); exit(EXIT_FAILURE); } }
cb68398b532bb0f1ab8eb93d681f34a39b6e0830.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define MAX_ERR 1e-6 // Random array generator (generates float between 0 - 256 for each entry in the array) float* RandArray(const int size) { srand((unsigned)time(NULL)); static float* r; r = NULL; r = (float*)malloc(size * sizeof(float)); for(int i=0; i<size; i++) // r[i] = (float)rand()/(float)(RAND_MAX/256); r[i] = 1; return r; } __global__ void conv_layer(float *output_fm_g, float *input_fm_g, float *filter_kernel_g, int N, int M, int F, int E, int R, int S, int C, int H, int W, int U) { int start_idx_x= blockIdx.x*(blockDim.x)+ threadIdx.x; //Output X and Output Y and Output Z for 1 batch int start_idx_y= blockIdx.y*(blockDim.y)+ threadIdx.y; int start_idx_z= blockIdx.z; //N is for Batch //M is for Output Filter Map channel (Z dimension) //F is for Output Y dimension //E is for Output X dimension //R and S are for filter //C is for input striding int m=start_idx_z; int f=start_idx_y; int e=start_idx_x; float temp_output =0; for(int n=0;n<N;n++) { if((m<M)&&(f<F)&&(e<E)) { temp_output=1; //bias for(int i=0;i<R;i++) { for(int j=0;j<S;j++) { for(int k=0;k<C;k++) { temp_output += input_fm_g[C*H*W*n + H*W*k + (U*f+i)*W + (U*e+j)] * filter_kernel_g[C*R*S*m + R*S*k + S*i + j]; } } } output_fm_g[M*F*E*n + F*E*m + E*f + e]=temp_output; } } } int main( int argc, char *argv[] ) { int N ; // input batch size int M ; // num of filters int C ; // num of channels int H ; // input height int W ; // input height and weight int R ; // kernel height int S ; // kernel weight int E ; // output FMAP height int F ; // output FMAP weight int U ; // convolution stride float *input_fm_g; float *filter_kernel_g; float *output_fm_g; //CHANGE BATCH SIZE int layer_num; if(argc == 1) printf("Error No Parameters passed\n"); N=atoi(argv[1]); printf("N(Number of Batches) = %d\n ",N); layer_num=atoi(argv[2]); printf("Layer= %d\n",layer_num); if (layer_num==1) { //FIRST LAYER M=96,C=3,H=227,W=227,R=11,S=11,E=55,F=55,U=4; //printf("First Layer\n"); } else if (layer_num==2) { //SECOND LAYER M=256,C=96,H=31,W=31,R=5,S=5,E=27,F=27,U=1; //printf("Second Layer\n"); } else if (layer_num==3) { //THIRD LAYER M = 384, F = 13, E = 13, R = 3, S = 3, H = 15, W = 15, C = 254, U = 1; //printf("Third Layer\n"); } else if (layer_num==4) { //FOURTH LAYER M = 384, F = 13, E = 13, R = 3, S = 3, H = 15, W = 15, C = 384, U = 1; //printf("Fourth Layer\n"); } else if (layer_num==5) { //FIFTH LAYER M = 256, F = 13, E = 13, R = 3, S = 3, H = 15, W = 15, C = 384, U = 1; //printf("Fifth Layer\n"); } else printf("Invalid Layer Number Input\n"); //Nth LAYER //Allocating CPU and GPU Memory hipMallocManaged((void**)&input_fm_g, sizeof(float) * N*C*H*W); hipMallocManaged((void**)&filter_kernel_g, sizeof(float) * M*C*R*S); hipMallocManaged((void**)&output_fm_g, sizeof(float) * N*M*E*F); //Assigning Input Image data for(int n=0;n<N;n++) { for(int c=0;c<C;c++) { for(int h=0;h<H;h++) { for(int w=0;w<W;w++) { input_fm_g[C*H*W*n + H*W*c + W*h + w] = 1.00f; //(float)rand()/(float)(RAND_MAX/256); } } } } //Assigning Kernel Data for(int m=0;m<M;m++) { for(int c=0;c<C;c++) { for(int r=0;r<R;r++) { for(int s=0;s<S;s++) { // filter_kernel[M*m + C*c + R*r + S*s] = 1.00f; filter_kernel_g[C*R*S*m + R*S*c + S*r + s] = 1.00f; //(float)rand()/(float)(RAND_MAX/256); } } } } dim3 block_2d_dimension(16,16,1); int ceil1_E = ceil((double)E/16.0); int ceil1_F = ceil((double)F/16.0); int ceil1_M = ceil((double)M); dim3 grid_3d_dimension(ceil1_E,ceil1_F,ceil1_M); //printf("Dimensions are %d %d %d \n",ceil1_E,ceil1_F,ceil1_M); // Launch kernel hipLaunchKernelGGL(( conv_layer), dim3(grid_3d_dimension), dim3(block_2d_dimension) , 0, 0, output_fm_g, input_fm_g, filter_kernel_g, N, M, F, E, R, S, C, H, W, U); hipDeviceSynchronize(); printf("%f \n",output_fm_g[0]); //Done with Kernel //Freeing the Kernel hipFree(input_fm_g); hipFree(output_fm_g),hipFree(filter_kernel_g); // END OF Nth LAYER //VALIDATION CODE // float *output_fm_v; // output_fm_v = (float*)malloc(sizeof(float)*(N*M*E*F)); // //VALIDATION CODE // for(int n=0;n<N;n++) // { // for(int m=0;m<M;m++) { // for(int f=0;f<F;f++) { // for(int e=0;e<E;e++) { // //output_fm[N*n + M*m + F*f + E*e]=0; // output_fm_v[M*F*E*n + F*E*m + E*f + e]=0; // for(int i=0;i<R;i++) { // for(int j=0;j<S;j++) { // for(int k=0;k<C;k++) { // output_fm_v[M*F*E*n + F*E*m + E*f + e] += input_fm[C*H*W*n + H*W*k + (U*f+i)*W + (U*e+j)] * filter_kernel[C*R*S*m + R*S*k + S*i + j]; // // printf("%f ",output_fm_v[M*F*E*n + F*E*m + E*f + e]); // } // } // } // //printf("%f ",output_fm_v[M*F*E*n + F*E*m + E*f + e]); // } // } // } // } // // Verification // for(int i = 0; i < N*M*F*E ; i++) // { // assert(fabs(output_fm_v[i] - output_fm[i] ) < MAX_ERR); // } // printf("output_fm_v[0] = %f\n", output_fm_v[0]); // printf("PASSED\n"); // //int op_index; // // // //Saving To File // int op_index; // FILE *file1 = fopen("Output_Toeplitz.txt","wb"); // for(int n=0;n<N;n++){ // fprintf(file1,"%d Output Batch\n",n); // for(int m=0;m<M;m++){ // fprintf(file1,"%d Output Channel\n",m); // for(int f=0;f<F;f++){ // for(int e=0;e<E;e++){ // op_index=M*F*E*n + F*E*m + E*f + e; // // int output=(int)output_fm[op_index]; // fprintf(file1,"%f ",output_fm[op_index]); // } // fprintf(file1,"\n"); // } // } // } // FILE *file2 = fopen("Output_Toeplitz_v.txt","wb"); // for(int n=0;n<N;n++){ // fprintf(file2,"%d Output Batch\n",n); // for(int m=0;m<M;m++){ // fprintf(file2,"%d Output Channel\n",m); // for(int f=0;f<F;f++){ // for(int e=0;e<E;e++){ // op_index=M*F*E*n + F*E*m + E*f+e; // //int output=(int)output_fm_v[op_index]; // fprintf(file2,"%f ",output_fm_v[op_index]); // } // fprintf(file2,"\n"); // } // } // } return 0; }
cb68398b532bb0f1ab8eb93d681f34a39b6e0830.cu
#include <stdio.h> #include <stdlib.h> #include <assert.h> #include <math.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda.h> #define MAX_ERR 1e-6 // Random array generator (generates float between 0 - 256 for each entry in the array) float* RandArray(const int size) { srand((unsigned)time(NULL)); static float* r; r = NULL; r = (float*)malloc(size * sizeof(float)); for(int i=0; i<size; i++) // r[i] = (float)rand()/(float)(RAND_MAX/256); r[i] = 1; return r; } __global__ void conv_layer(float *output_fm_g, float *input_fm_g, float *filter_kernel_g, int N, int M, int F, int E, int R, int S, int C, int H, int W, int U) { int start_idx_x= blockIdx.x*(blockDim.x)+ threadIdx.x; //Output X and Output Y and Output Z for 1 batch int start_idx_y= blockIdx.y*(blockDim.y)+ threadIdx.y; int start_idx_z= blockIdx.z; //N is for Batch //M is for Output Filter Map channel (Z dimension) //F is for Output Y dimension //E is for Output X dimension //R and S are for filter //C is for input striding int m=start_idx_z; int f=start_idx_y; int e=start_idx_x; float temp_output =0; for(int n=0;n<N;n++) { if((m<M)&&(f<F)&&(e<E)) { temp_output=1; //bias for(int i=0;i<R;i++) { for(int j=0;j<S;j++) { for(int k=0;k<C;k++) { temp_output += input_fm_g[C*H*W*n + H*W*k + (U*f+i)*W + (U*e+j)] * filter_kernel_g[C*R*S*m + R*S*k + S*i + j]; } } } output_fm_g[M*F*E*n + F*E*m + E*f + e]=temp_output; } } } int main( int argc, char *argv[] ) { int N ; // input batch size int M ; // num of filters int C ; // num of channels int H ; // input height int W ; // input height and weight int R ; // kernel height int S ; // kernel weight int E ; // output FMAP height int F ; // output FMAP weight int U ; // convolution stride float *input_fm_g; float *filter_kernel_g; float *output_fm_g; //CHANGE BATCH SIZE int layer_num; if(argc == 1) printf("Error No Parameters passed\n"); N=atoi(argv[1]); printf("N(Number of Batches) = %d\n ",N); layer_num=atoi(argv[2]); printf("Layer= %d\n",layer_num); if (layer_num==1) { //FIRST LAYER M=96,C=3,H=227,W=227,R=11,S=11,E=55,F=55,U=4; //printf("First Layer\n"); } else if (layer_num==2) { //SECOND LAYER M=256,C=96,H=31,W=31,R=5,S=5,E=27,F=27,U=1; //printf("Second Layer\n"); } else if (layer_num==3) { //THIRD LAYER M = 384, F = 13, E = 13, R = 3, S = 3, H = 15, W = 15, C = 254, U = 1; //printf("Third Layer\n"); } else if (layer_num==4) { //FOURTH LAYER M = 384, F = 13, E = 13, R = 3, S = 3, H = 15, W = 15, C = 384, U = 1; //printf("Fourth Layer\n"); } else if (layer_num==5) { //FIFTH LAYER M = 256, F = 13, E = 13, R = 3, S = 3, H = 15, W = 15, C = 384, U = 1; //printf("Fifth Layer\n"); } else printf("Invalid Layer Number Input\n"); //Nth LAYER //Allocating CPU and GPU Memory cudaMallocManaged((void**)&input_fm_g, sizeof(float) * N*C*H*W); cudaMallocManaged((void**)&filter_kernel_g, sizeof(float) * M*C*R*S); cudaMallocManaged((void**)&output_fm_g, sizeof(float) * N*M*E*F); //Assigning Input Image data for(int n=0;n<N;n++) { for(int c=0;c<C;c++) { for(int h=0;h<H;h++) { for(int w=0;w<W;w++) { input_fm_g[C*H*W*n + H*W*c + W*h + w] = 1.00f; //(float)rand()/(float)(RAND_MAX/256); } } } } //Assigning Kernel Data for(int m=0;m<M;m++) { for(int c=0;c<C;c++) { for(int r=0;r<R;r++) { for(int s=0;s<S;s++) { // filter_kernel[M*m + C*c + R*r + S*s] = 1.00f; filter_kernel_g[C*R*S*m + R*S*c + S*r + s] = 1.00f; //(float)rand()/(float)(RAND_MAX/256); } } } } dim3 block_2d_dimension(16,16,1); int ceil1_E = ceil((double)E/16.0); int ceil1_F = ceil((double)F/16.0); int ceil1_M = ceil((double)M); dim3 grid_3d_dimension(ceil1_E,ceil1_F,ceil1_M); //printf("Dimensions are %d %d %d \n",ceil1_E,ceil1_F,ceil1_M); // Launch kernel conv_layer<<< grid_3d_dimension, block_2d_dimension >>>( output_fm_g, input_fm_g, filter_kernel_g, N, M, F, E, R, S, C, H, W, U); cudaDeviceSynchronize(); printf("%f \n",output_fm_g[0]); //Done with Kernel //Freeing the Kernel cudaFree(input_fm_g); cudaFree(output_fm_g),cudaFree(filter_kernel_g); // END OF Nth LAYER //VALIDATION CODE // float *output_fm_v; // output_fm_v = (float*)malloc(sizeof(float)*(N*M*E*F)); // //VALIDATION CODE // for(int n=0;n<N;n++) // { // for(int m=0;m<M;m++) { // for(int f=0;f<F;f++) { // for(int e=0;e<E;e++) { // //output_fm[N*n + M*m + F*f + E*e]=0; // output_fm_v[M*F*E*n + F*E*m + E*f + e]=0; // for(int i=0;i<R;i++) { // for(int j=0;j<S;j++) { // for(int k=0;k<C;k++) { // output_fm_v[M*F*E*n + F*E*m + E*f + e] += input_fm[C*H*W*n + H*W*k + (U*f+i)*W + (U*e+j)] * filter_kernel[C*R*S*m + R*S*k + S*i + j]; // // printf("%f ",output_fm_v[M*F*E*n + F*E*m + E*f + e]); // } // } // } // //printf("%f ",output_fm_v[M*F*E*n + F*E*m + E*f + e]); // } // } // } // } // // Verification // for(int i = 0; i < N*M*F*E ; i++) // { // assert(fabs(output_fm_v[i] - output_fm[i] ) < MAX_ERR); // } // printf("output_fm_v[0] = %f\n", output_fm_v[0]); // printf("PASSED\n"); // //int op_index; // // // //Saving To File // int op_index; // FILE *file1 = fopen("Output_Toeplitz.txt","wb"); // for(int n=0;n<N;n++){ // fprintf(file1,"%d Output Batch\n",n); // for(int m=0;m<M;m++){ // fprintf(file1,"%d Output Channel\n",m); // for(int f=0;f<F;f++){ // for(int e=0;e<E;e++){ // op_index=M*F*E*n + F*E*m + E*f + e; // // int output=(int)output_fm[op_index]; // fprintf(file1,"%f ",output_fm[op_index]); // } // fprintf(file1,"\n"); // } // } // } // FILE *file2 = fopen("Output_Toeplitz_v.txt","wb"); // for(int n=0;n<N;n++){ // fprintf(file2,"%d Output Batch\n",n); // for(int m=0;m<M;m++){ // fprintf(file2,"%d Output Channel\n",m); // for(int f=0;f<F;f++){ // for(int e=0;e<E;e++){ // op_index=M*F*E*n + F*E*m + E*f+e; // //int output=(int)output_fm_v[op_index]; // fprintf(file2,"%f ",output_fm_v[op_index]); // } // fprintf(file2,"\n"); // } // } // } return 0; }
ea399fe4f4d056f979463b94500c1cdba2e63624.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Triangle/triangle intersection test routine, * by Tomas Moller, 1997. * See article "A Fast Triangle-Triangle Intersection Test", * Journal of Graphics Tools, 2(2), 1997 * * Updated June 1999: removed the divisions -- a little faster now! * Updated October 1999: added {} to CROSS and SUB macros * * int NoDivTriTriIsect(float V0[3],float V1[3],float V2[3], * float U0[3],float U1[3],float U2[3]) * * parameters: vertices of triangle 1: V0,V1,V2 * vertices of triangle 2: U0,U1,U2 * result : returns 1 if the triangles intersect, otherwise 0 * */ #include <math.h> #include <device_launch_parameters.h> #include "TriangleTriangleIntersect.cuh" #define FABS(x) (float(fabs(x))) /* implement as is fastest on your machine */ /* if USE_EPSILON_TEST is true then we do a check: if |dv|<EPSILON then dv=0.0; else no check is done (which is less robust) */ #define USE_EPSILON_TEST TRUE #define EPSILON 0.000001 /* some macros */ #define CROSS(dest,v1,v2){ \ dest[0]=v1[1]*v2[2]-v1[2]*v2[1]; \ dest[1]=v1[2]*v2[0]-v1[0]*v2[2]; \ dest[2]=v1[0]*v2[1]-v1[1]*v2[0];} #define DOT(v1,v2) (v1[0]*v2[0]+v1[1]*v2[1]+v1[2]*v2[2]) #define SUB(dest,v1,v2){ \ dest[0]=v1[0]-v2[0]; \ dest[1]=v1[1]-v2[1]; \ dest[2]=v1[2]-v2[2];} /* sort so that a<=b */ #define SORT(a,b) \ if(a>b) \ { \ float c; \ c=a; \ a=b; \ b=c; \ } /* this edge to edge test is based on Franlin Antonio's gem: "Faster Line Segment Intersection", in Graphics Gems III, pp. 199-202 */ #define EDGE_EDGE_TEST(V0,U0,U1) \ Bx=U0[i0]-U1[i0]; \ By=U0[i1]-U1[i1]; \ Cx=V0[i0]-U0[i0]; \ Cy=V0[i1]-U0[i1]; \ f=Ay*Bx-Ax*By; \ d=By*Cx-Bx*Cy; \ if((f>0 && d>=0 && d<=f) || (f<0 && d<=0 && d>=f)) \ { \ e=Ax*Cy-Ay*Cx; \ if(f>0) \ { \ if(e>=0 && e<=f) return 1; \ } \ else \ { \ if(e<=0 && e>=f) return 1; \ } \ } #define EDGE_AGAINST_TRI_EDGES(V0,V1,U0,U1,U2) \ { \ float Ax,Ay,Bx,By,Cx,Cy,e,d,f; \ Ax=V1[i0]-V0[i0]; \ Ay=V1[i1]-V0[i1]; \ /* test edge U0,U1 against V0,V1 */ \ EDGE_EDGE_TEST(V0,U0,U1); \ /* test edge U1,U2 against V0,V1 */ \ EDGE_EDGE_TEST(V0,U1,U2); \ /* test edge U2,U1 against V0,V1 */ \ EDGE_EDGE_TEST(V0,U2,U0); \ } #define POINT_IN_TRI(V0,U0,U1,U2) \ { \ float a,b,c,d0,d1,d2; \ /* is T1 completly inside T2? */ \ /* check if V0 is inside tri(U0,U1,U2) */ \ a=U1[i1]-U0[i1]; \ b=-(U1[i0]-U0[i0]); \ c=-a*U0[i0]-b*U0[i1]; \ d0=a*V0[i0]+b*V0[i1]+c; \ \ a=U2[i1]-U1[i1]; \ b=-(U2[i0]-U1[i0]); \ c=-a*U1[i0]-b*U1[i1]; \ d1=a*V0[i0]+b*V0[i1]+c; \ \ a=U0[i1]-U2[i1]; \ b=-(U0[i0]-U2[i0]); \ c=-a*U2[i0]-b*U2[i1]; \ d2=a*V0[i0]+b*V0[i1]+c; \ if(d0*d1>0.0) \ { \ if(d0*d2>0.0) return 1; \ } \ } #define NEWCOMPUTE_INTERVALS(VV0,VV1,VV2,D0,D1,D2,D0D1,D0D2,A,B,C,X0,X1) \ { \ if(D0D1>0.0f) \ { \ /* here we know that D0D2<=0.0 */ \ /* that is D0, D1 are on the same side, D2 on the other or on the plane */ \ A=VV2; B=(VV0-VV2)*D2; C=(VV1-VV2)*D2; X0=D2-D0; X1=D2-D1; \ } \ else if(D0D2>0.0f)\ { \ /* here we know that d0d1<=0.0 */ \ A=VV1; B=(VV0-VV1)*D1; C=(VV2-VV1)*D1; X0=D1-D0; X1=D1-D2; \ } \ else if(D1*D2>0.0f || D0!=0.0f) \ { \ /* here we know that d0d1<=0.0 or that D0!=0.0 */ \ A=VV0; B=(VV1-VV0)*D0; C=(VV2-VV0)*D0; X0=D0-D1; X1=D0-D2; \ } \ else if(D1!=0.0f) \ { \ A=VV1; B=(VV0-VV1)*D1; C=(VV2-VV1)*D1; X0=D1-D0; X1=D1-D2; \ } \ else if(D2!=0.0f) \ { \ A=VV2; B=(VV0-VV2)*D2; C=(VV1-VV2)*D2; X0=D2-D0; X1=D2-D1; \ } \ else \ { \ /* triangles are coplanar */ \ return coplanar_tri_tri(N1,V0,V1,V2,U0,U1,U2); \ } \ } __host__ __device__ int NoDivTriTriIsect(float V0[3], float V1[3], float V2[3], float U0[3], float U1[3], float U2[3]) { float E1[3], E2[3]; float N1[3], N2[3], d1, d2; float du0, du1, du2, dv0, dv1, dv2; float D[3]; float isect1[2], isect2[2]; float du0du1, du0du2, dv0dv1, dv0dv2; short index; float vp0, vp1, vp2; float up0, up1, up2; float bb, cc, max; /* compute plane equation of triangle(V0,V1,V2) */ SUB(E1, V1, V0); SUB(E2, V2, V0); CROSS(N1, E1, E2); d1 = -DOT(N1, V0); /* plane equation 1: N1.X+d1=0 */ /* put U0,U1,U2 into plane equation 1 to compute signed distances to the plane*/ du0 = DOT(N1, U0) + d1; du1 = DOT(N1, U1) + d1; du2 = DOT(N1, U2) + d1; /* coplanarity robustness check */ #if USE_EPSILON_TEST==TRUE if (FABS(du0) < EPSILON) du0 = 0.0; if (FABS(du1) < EPSILON) du1 = 0.0; if (FABS(du2) < EPSILON) du2 = 0.0; #endif du0du1 = du0 * du1; du0du2 = du0 * du2; if (du0du1 > 0.0f && du0du2 > 0.0f) /* same sign on all of them + not equal 0 ? */ return 0; /* no intersection occurs */ /* compute plane of triangle (U0,U1,U2) */ SUB(E1, U1, U0); SUB(E2, U2, U0); CROSS(N2, E1, E2); d2 = -DOT(N2, U0); /* plane equation 2: N2.X+d2=0 */ /* put V0,V1,V2 into plane equation 2 */ dv0 = DOT(N2, V0) + d2; dv1 = DOT(N2, V1) + d2; dv2 = DOT(N2, V2) + d2; #if USE_EPSILON_TEST==TRUE if (FABS(dv0) < EPSILON) dv0 = 0.0; if (FABS(dv1) < EPSILON) dv1 = 0.0; if (FABS(dv2) < EPSILON) dv2 = 0.0; #endif dv0dv1 = dv0 * dv1; dv0dv2 = dv0 * dv2; if (dv0dv1 > 0.0f && dv0dv2 > 0.0f) /* same sign on all of them + not equal 0 ? */ return 0; /* no intersection occurs */ /* compute direction of intersection line */ CROSS(D, N1, N2); /* compute and index to the largest component of D */ max = (float)FABS(D[0]); index = 0; bb = (float)FABS(D[1]); cc = (float)FABS(D[2]); if (bb > max) max = bb, index = 1; if (cc > max) max = cc, index = 2; /* this is the simplified projection onto L*/ vp0 = V0[index]; vp1 = V1[index]; vp2 = V2[index]; up0 = U0[index]; up1 = U1[index]; up2 = U2[index]; /* compute interval for triangle 1 */ float a, b, c, x0, x1; NEWCOMPUTE_INTERVALS(vp0, vp1, vp2, dv0, dv1, dv2, dv0dv1, dv0dv2, a, b, c, x0, x1); /* compute interval for triangle 2 */ float d, e, f, y0, y1; NEWCOMPUTE_INTERVALS(up0, up1, up2, du0, du1, du2, du0du1, du0du2, d, e, f, y0, y1); float xx, yy, xxyy, tmp; xx = x0 * x1; yy = y0 * y1; xxyy = xx * yy; tmp = a * xxyy; isect1[0] = tmp + b * x1 * yy; isect1[1] = tmp + c * x0 * yy; tmp = d * xxyy; isect2[0] = tmp + e * xx * y1; isect2[1] = tmp + f * xx * y0; SORT(isect1[0], isect1[1]); SORT(isect2[0], isect2[1]); if (isect1[1] < isect2[0] || isect2[1] < isect1[0]) return 0; return 1; } __host__ __device__ int coplanar_tri_tri(float N[3], float V0[3], float V1[3], float V2[3], float U0[3], float U1[3], float U2[3]) { float A[3]; short i0, i1; /* first project onto an axis-aligned plane, that maximizes the area */ /* of the triangles, compute indices: i0,i1. */ A[0] = FABS(N[0]); A[1] = FABS(N[1]); A[2] = FABS(N[2]); if (A[0] > A[1]) { if (A[0] > A[2]) { i0 = 1; /* A[0] is greatest */ i1 = 2; } else { i0 = 0; /* A[2] is greatest */ i1 = 1; } } else /* A[0]<=A[1] */ { if (A[2] > A[1]) { i0 = 0; /* A[2] is greatest */ i1 = 1; } else { i0 = 0; /* A[1] is greatest */ i1 = 2; } } /* test all edges of triangle 1 against the edges of triangle 2 */ EDGE_AGAINST_TRI_EDGES(V0, V1, U0, U1, U2); EDGE_AGAINST_TRI_EDGES(V1, V2, U0, U1, U2); EDGE_AGAINST_TRI_EDGES(V2, V0, U0, U1, U2); /* finally, test if tri1 is totally contained in tri2 or vice versa */ POINT_IN_TRI(V0, U0, U1, U2); POINT_IN_TRI(U0, V0, V1, V2); return 0; } //block per (inner) triangle (with Broad Phase Collision Detection) __global__ void triangle_triangle_GPU_BPCD( int3* cudaInsideTriangles, float3* cudaInsideVertices, int3* cudaOutsideTriangles, float3* cudaOutsideVertices, bool* inside, int numberOfInsideTriangles, int numberOfOutsideTriangles, float2* cudaOutsideTriangleIntervals) { // , int* cudaIntersectionsPerInsideTriangle int threadidx = threadIdx.x; float vert1_1[3] = { cudaInsideVertices[cudaInsideTriangles[blockIdx.x].x].x, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].x].y, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].x].z }; float vert1_2[3] = { cudaInsideVertices[cudaInsideTriangles[blockIdx.x].y].x, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].y].y, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].y].z }; float vert1_3[3] = { cudaInsideVertices[cudaInsideTriangles[blockIdx.x].z].x, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].z].y, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].z].z }; float max_temp = (vert1_1[0] < vert1_2[0]) ? vert1_2[0] : vert1_1[0]; float max = ((max_temp < vert1_3[0]) ? vert1_3[0] : max_temp); float min_temp = (vert1_1[0] > vert1_2[0]) ? vert1_2[0] : vert1_1[0]; float min = ((min_temp > vert1_3[0]) ? vert1_3[0] : min_temp); //int numberOfIntersections = 0; while (threadidx < numberOfOutsideTriangles && *inside) { float vert2_1[3] = { cudaOutsideVertices[cudaOutsideTriangles[threadidx].x].x, cudaOutsideVertices[cudaOutsideTriangles[threadidx].x].y, cudaOutsideVertices[cudaOutsideTriangles[threadidx].x].z }; float vert2_2[3] = { cudaOutsideVertices[cudaOutsideTriangles[threadidx].y].x, cudaOutsideVertices[cudaOutsideTriangles[threadidx].y].y, cudaOutsideVertices[cudaOutsideTriangles[threadidx].y].z }; float vert2_3[3] = { cudaOutsideVertices[cudaOutsideTriangles[threadidx].z].x, cudaOutsideVertices[cudaOutsideTriangles[threadidx].z].y, cudaOutsideVertices[cudaOutsideTriangles[threadidx].z].z }; if (cudaOutsideTriangleIntervals[threadidx].x <= max && cudaOutsideTriangleIntervals[threadidx].y >= min) // Broad Phase Collision Detection (x = min, y = max) { if (NoDivTriTriIsect(vert1_1, vert1_2, vert1_3, vert2_1, vert2_2, vert2_3) == 1) { //numberOfIntersections++; *inside = false; return; //cudaIntersectionsPerInsideTriangle[tid] = 1; // Sneller als je dit weg laat in het geval de meshes elkaar niet sijden ==> dit zorgt er voor dat het trager wordt als de meshes in elkaar liggen } //if(intersect){ cudaIntersectionsPerInsideTriangle[tid] = 1; } // Sneller als je dit weg laat in het geval de meshes elkaar niet sijden } threadidx += 128; //if(intersect){ cudaIntersectionsPerInsideTriangle[tid] = 1; } // Sneller als je dit weg laat in het geval de meshes elkaar niet sijden } //printf("numberOfIntersections = %d\n", numberOfIntersections); //cudaIntersectionsPerInsideTriangle[tid] = numberOfIntersections; } //block per (inner) triangle (without Broad Phase Collision Detection) __global__ void triangle_triangle_GPU(int3* cudaInsideTriangles, float3* cudaInsideVertices, int3* cudaOutsideTriangles, float3* cudaOutsideVertices, bool* inside, int numberOfInsideTriangles, int numberOfOutsideTriangles) { // , int* cudaIntersectionsPerInsideTriangle int threadidx = threadIdx.x; float vert1_1[3] = { cudaInsideVertices[cudaInsideTriangles[blockIdx.x].x].x, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].x].y, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].x].z }; float vert1_2[3] = { cudaInsideVertices[cudaInsideTriangles[blockIdx.x].y].x, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].y].y, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].y].z }; float vert1_3[3] = { cudaInsideVertices[cudaInsideTriangles[blockIdx.x].z].x, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].z].y, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].z].z }; //int numberOfIntersections = 0; while (threadidx < numberOfOutsideTriangles && *inside){ float vert2_1[3] = { cudaOutsideVertices[cudaOutsideTriangles[threadidx].x].x, cudaOutsideVertices[cudaOutsideTriangles[threadidx].x].y, cudaOutsideVertices[cudaOutsideTriangles[threadidx].x].z }; float vert2_2[3] = { cudaOutsideVertices[cudaOutsideTriangles[threadidx].y].x, cudaOutsideVertices[cudaOutsideTriangles[threadidx].y].y, cudaOutsideVertices[cudaOutsideTriangles[threadidx].y].z }; float vert2_3[3] = { cudaOutsideVertices[cudaOutsideTriangles[threadidx].z].x, cudaOutsideVertices[cudaOutsideTriangles[threadidx].z].y, cudaOutsideVertices[cudaOutsideTriangles[threadidx].z].z }; if (NoDivTriTriIsect(vert1_1, vert1_2, vert1_3, vert2_1, vert2_2, vert2_3) == 1) { //numberOfIntersections++; *inside = false; return; //cudaIntersectionsPerInsideTriangle[tid] = 1; // Sneller als je dit weg laat in het geval de meshes elkaar niet sijden ==> dit zorgt er voor dat het trager wordt als de meshes in elkaar liggen } threadidx += 128; //if(intersect){ cudaIntersectionsPerInsideTriangle[tid] = 1; } // Sneller als je dit weg laat in het geval de meshes elkaar niet sijden } //printf("numberOfIntersections = %d\n", numberOfIntersections); //cudaIntersectionsPerInsideTriangle[tid] = numberOfIntersections; }
ea399fe4f4d056f979463b94500c1cdba2e63624.cu
/* Triangle/triangle intersection test routine, * by Tomas Moller, 1997. * See article "A Fast Triangle-Triangle Intersection Test", * Journal of Graphics Tools, 2(2), 1997 * * Updated June 1999: removed the divisions -- a little faster now! * Updated October 1999: added {} to CROSS and SUB macros * * int NoDivTriTriIsect(float V0[3],float V1[3],float V2[3], * float U0[3],float U1[3],float U2[3]) * * parameters: vertices of triangle 1: V0,V1,V2 * vertices of triangle 2: U0,U1,U2 * result : returns 1 if the triangles intersect, otherwise 0 * */ #include <math.h> #include <device_launch_parameters.h> #include "TriangleTriangleIntersect.cuh" #define FABS(x) (float(fabs(x))) /* implement as is fastest on your machine */ /* if USE_EPSILON_TEST is true then we do a check: if |dv|<EPSILON then dv=0.0; else no check is done (which is less robust) */ #define USE_EPSILON_TEST TRUE #define EPSILON 0.000001 /* some macros */ #define CROSS(dest,v1,v2){ \ dest[0]=v1[1]*v2[2]-v1[2]*v2[1]; \ dest[1]=v1[2]*v2[0]-v1[0]*v2[2]; \ dest[2]=v1[0]*v2[1]-v1[1]*v2[0];} #define DOT(v1,v2) (v1[0]*v2[0]+v1[1]*v2[1]+v1[2]*v2[2]) #define SUB(dest,v1,v2){ \ dest[0]=v1[0]-v2[0]; \ dest[1]=v1[1]-v2[1]; \ dest[2]=v1[2]-v2[2];} /* sort so that a<=b */ #define SORT(a,b) \ if(a>b) \ { \ float c; \ c=a; \ a=b; \ b=c; \ } /* this edge to edge test is based on Franlin Antonio's gem: "Faster Line Segment Intersection", in Graphics Gems III, pp. 199-202 */ #define EDGE_EDGE_TEST(V0,U0,U1) \ Bx=U0[i0]-U1[i0]; \ By=U0[i1]-U1[i1]; \ Cx=V0[i0]-U0[i0]; \ Cy=V0[i1]-U0[i1]; \ f=Ay*Bx-Ax*By; \ d=By*Cx-Bx*Cy; \ if((f>0 && d>=0 && d<=f) || (f<0 && d<=0 && d>=f)) \ { \ e=Ax*Cy-Ay*Cx; \ if(f>0) \ { \ if(e>=0 && e<=f) return 1; \ } \ else \ { \ if(e<=0 && e>=f) return 1; \ } \ } #define EDGE_AGAINST_TRI_EDGES(V0,V1,U0,U1,U2) \ { \ float Ax,Ay,Bx,By,Cx,Cy,e,d,f; \ Ax=V1[i0]-V0[i0]; \ Ay=V1[i1]-V0[i1]; \ /* test edge U0,U1 against V0,V1 */ \ EDGE_EDGE_TEST(V0,U0,U1); \ /* test edge U1,U2 against V0,V1 */ \ EDGE_EDGE_TEST(V0,U1,U2); \ /* test edge U2,U1 against V0,V1 */ \ EDGE_EDGE_TEST(V0,U2,U0); \ } #define POINT_IN_TRI(V0,U0,U1,U2) \ { \ float a,b,c,d0,d1,d2; \ /* is T1 completly inside T2? */ \ /* check if V0 is inside tri(U0,U1,U2) */ \ a=U1[i1]-U0[i1]; \ b=-(U1[i0]-U0[i0]); \ c=-a*U0[i0]-b*U0[i1]; \ d0=a*V0[i0]+b*V0[i1]+c; \ \ a=U2[i1]-U1[i1]; \ b=-(U2[i0]-U1[i0]); \ c=-a*U1[i0]-b*U1[i1]; \ d1=a*V0[i0]+b*V0[i1]+c; \ \ a=U0[i1]-U2[i1]; \ b=-(U0[i0]-U2[i0]); \ c=-a*U2[i0]-b*U2[i1]; \ d2=a*V0[i0]+b*V0[i1]+c; \ if(d0*d1>0.0) \ { \ if(d0*d2>0.0) return 1; \ } \ } #define NEWCOMPUTE_INTERVALS(VV0,VV1,VV2,D0,D1,D2,D0D1,D0D2,A,B,C,X0,X1) \ { \ if(D0D1>0.0f) \ { \ /* here we know that D0D2<=0.0 */ \ /* that is D0, D1 are on the same side, D2 on the other or on the plane */ \ A=VV2; B=(VV0-VV2)*D2; C=(VV1-VV2)*D2; X0=D2-D0; X1=D2-D1; \ } \ else if(D0D2>0.0f)\ { \ /* here we know that d0d1<=0.0 */ \ A=VV1; B=(VV0-VV1)*D1; C=(VV2-VV1)*D1; X0=D1-D0; X1=D1-D2; \ } \ else if(D1*D2>0.0f || D0!=0.0f) \ { \ /* here we know that d0d1<=0.0 or that D0!=0.0 */ \ A=VV0; B=(VV1-VV0)*D0; C=(VV2-VV0)*D0; X0=D0-D1; X1=D0-D2; \ } \ else if(D1!=0.0f) \ { \ A=VV1; B=(VV0-VV1)*D1; C=(VV2-VV1)*D1; X0=D1-D0; X1=D1-D2; \ } \ else if(D2!=0.0f) \ { \ A=VV2; B=(VV0-VV2)*D2; C=(VV1-VV2)*D2; X0=D2-D0; X1=D2-D1; \ } \ else \ { \ /* triangles are coplanar */ \ return coplanar_tri_tri(N1,V0,V1,V2,U0,U1,U2); \ } \ } __host__ __device__ int NoDivTriTriIsect(float V0[3], float V1[3], float V2[3], float U0[3], float U1[3], float U2[3]) { float E1[3], E2[3]; float N1[3], N2[3], d1, d2; float du0, du1, du2, dv0, dv1, dv2; float D[3]; float isect1[2], isect2[2]; float du0du1, du0du2, dv0dv1, dv0dv2; short index; float vp0, vp1, vp2; float up0, up1, up2; float bb, cc, max; /* compute plane equation of triangle(V0,V1,V2) */ SUB(E1, V1, V0); SUB(E2, V2, V0); CROSS(N1, E1, E2); d1 = -DOT(N1, V0); /* plane equation 1: N1.X+d1=0 */ /* put U0,U1,U2 into plane equation 1 to compute signed distances to the plane*/ du0 = DOT(N1, U0) + d1; du1 = DOT(N1, U1) + d1; du2 = DOT(N1, U2) + d1; /* coplanarity robustness check */ #if USE_EPSILON_TEST==TRUE if (FABS(du0) < EPSILON) du0 = 0.0; if (FABS(du1) < EPSILON) du1 = 0.0; if (FABS(du2) < EPSILON) du2 = 0.0; #endif du0du1 = du0 * du1; du0du2 = du0 * du2; if (du0du1 > 0.0f && du0du2 > 0.0f) /* same sign on all of them + not equal 0 ? */ return 0; /* no intersection occurs */ /* compute plane of triangle (U0,U1,U2) */ SUB(E1, U1, U0); SUB(E2, U2, U0); CROSS(N2, E1, E2); d2 = -DOT(N2, U0); /* plane equation 2: N2.X+d2=0 */ /* put V0,V1,V2 into plane equation 2 */ dv0 = DOT(N2, V0) + d2; dv1 = DOT(N2, V1) + d2; dv2 = DOT(N2, V2) + d2; #if USE_EPSILON_TEST==TRUE if (FABS(dv0) < EPSILON) dv0 = 0.0; if (FABS(dv1) < EPSILON) dv1 = 0.0; if (FABS(dv2) < EPSILON) dv2 = 0.0; #endif dv0dv1 = dv0 * dv1; dv0dv2 = dv0 * dv2; if (dv0dv1 > 0.0f && dv0dv2 > 0.0f) /* same sign on all of them + not equal 0 ? */ return 0; /* no intersection occurs */ /* compute direction of intersection line */ CROSS(D, N1, N2); /* compute and index to the largest component of D */ max = (float)FABS(D[0]); index = 0; bb = (float)FABS(D[1]); cc = (float)FABS(D[2]); if (bb > max) max = bb, index = 1; if (cc > max) max = cc, index = 2; /* this is the simplified projection onto L*/ vp0 = V0[index]; vp1 = V1[index]; vp2 = V2[index]; up0 = U0[index]; up1 = U1[index]; up2 = U2[index]; /* compute interval for triangle 1 */ float a, b, c, x0, x1; NEWCOMPUTE_INTERVALS(vp0, vp1, vp2, dv0, dv1, dv2, dv0dv1, dv0dv2, a, b, c, x0, x1); /* compute interval for triangle 2 */ float d, e, f, y0, y1; NEWCOMPUTE_INTERVALS(up0, up1, up2, du0, du1, du2, du0du1, du0du2, d, e, f, y0, y1); float xx, yy, xxyy, tmp; xx = x0 * x1; yy = y0 * y1; xxyy = xx * yy; tmp = a * xxyy; isect1[0] = tmp + b * x1 * yy; isect1[1] = tmp + c * x0 * yy; tmp = d * xxyy; isect2[0] = tmp + e * xx * y1; isect2[1] = tmp + f * xx * y0; SORT(isect1[0], isect1[1]); SORT(isect2[0], isect2[1]); if (isect1[1] < isect2[0] || isect2[1] < isect1[0]) return 0; return 1; } __host__ __device__ int coplanar_tri_tri(float N[3], float V0[3], float V1[3], float V2[3], float U0[3], float U1[3], float U2[3]) { float A[3]; short i0, i1; /* first project onto an axis-aligned plane, that maximizes the area */ /* of the triangles, compute indices: i0,i1. */ A[0] = FABS(N[0]); A[1] = FABS(N[1]); A[2] = FABS(N[2]); if (A[0] > A[1]) { if (A[0] > A[2]) { i0 = 1; /* A[0] is greatest */ i1 = 2; } else { i0 = 0; /* A[2] is greatest */ i1 = 1; } } else /* A[0]<=A[1] */ { if (A[2] > A[1]) { i0 = 0; /* A[2] is greatest */ i1 = 1; } else { i0 = 0; /* A[1] is greatest */ i1 = 2; } } /* test all edges of triangle 1 against the edges of triangle 2 */ EDGE_AGAINST_TRI_EDGES(V0, V1, U0, U1, U2); EDGE_AGAINST_TRI_EDGES(V1, V2, U0, U1, U2); EDGE_AGAINST_TRI_EDGES(V2, V0, U0, U1, U2); /* finally, test if tri1 is totally contained in tri2 or vice versa */ POINT_IN_TRI(V0, U0, U1, U2); POINT_IN_TRI(U0, V0, V1, V2); return 0; } //block per (inner) triangle (with Broad Phase Collision Detection) __global__ void triangle_triangle_GPU_BPCD( int3* cudaInsideTriangles, float3* cudaInsideVertices, int3* cudaOutsideTriangles, float3* cudaOutsideVertices, bool* inside, int numberOfInsideTriangles, int numberOfOutsideTriangles, float2* cudaOutsideTriangleIntervals) { // , int* cudaIntersectionsPerInsideTriangle int threadidx = threadIdx.x; float vert1_1[3] = { cudaInsideVertices[cudaInsideTriangles[blockIdx.x].x].x, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].x].y, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].x].z }; float vert1_2[3] = { cudaInsideVertices[cudaInsideTriangles[blockIdx.x].y].x, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].y].y, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].y].z }; float vert1_3[3] = { cudaInsideVertices[cudaInsideTriangles[blockIdx.x].z].x, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].z].y, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].z].z }; float max_temp = (vert1_1[0] < vert1_2[0]) ? vert1_2[0] : vert1_1[0]; float max = ((max_temp < vert1_3[0]) ? vert1_3[0] : max_temp); float min_temp = (vert1_1[0] > vert1_2[0]) ? vert1_2[0] : vert1_1[0]; float min = ((min_temp > vert1_3[0]) ? vert1_3[0] : min_temp); //int numberOfIntersections = 0; while (threadidx < numberOfOutsideTriangles && *inside) { float vert2_1[3] = { cudaOutsideVertices[cudaOutsideTriangles[threadidx].x].x, cudaOutsideVertices[cudaOutsideTriangles[threadidx].x].y, cudaOutsideVertices[cudaOutsideTriangles[threadidx].x].z }; float vert2_2[3] = { cudaOutsideVertices[cudaOutsideTriangles[threadidx].y].x, cudaOutsideVertices[cudaOutsideTriangles[threadidx].y].y, cudaOutsideVertices[cudaOutsideTriangles[threadidx].y].z }; float vert2_3[3] = { cudaOutsideVertices[cudaOutsideTriangles[threadidx].z].x, cudaOutsideVertices[cudaOutsideTriangles[threadidx].z].y, cudaOutsideVertices[cudaOutsideTriangles[threadidx].z].z }; if (cudaOutsideTriangleIntervals[threadidx].x <= max && cudaOutsideTriangleIntervals[threadidx].y >= min) // Broad Phase Collision Detection (x = min, y = max) { if (NoDivTriTriIsect(vert1_1, vert1_2, vert1_3, vert2_1, vert2_2, vert2_3) == 1) { //numberOfIntersections++; *inside = false; return; //cudaIntersectionsPerInsideTriangle[tid] = 1; // Sneller als je dit weg laat in het geval de meshes elkaar niet sijden ==> dit zorgt er voor dat het trager wordt als de meshes in elkaar liggen } //if(intersect){ cudaIntersectionsPerInsideTriangle[tid] = 1; } // Sneller als je dit weg laat in het geval de meshes elkaar niet sijden } threadidx += 128; //if(intersect){ cudaIntersectionsPerInsideTriangle[tid] = 1; } // Sneller als je dit weg laat in het geval de meshes elkaar niet sijden } //printf("numberOfIntersections = %d\n", numberOfIntersections); //cudaIntersectionsPerInsideTriangle[tid] = numberOfIntersections; } //block per (inner) triangle (without Broad Phase Collision Detection) __global__ void triangle_triangle_GPU(int3* cudaInsideTriangles, float3* cudaInsideVertices, int3* cudaOutsideTriangles, float3* cudaOutsideVertices, bool* inside, int numberOfInsideTriangles, int numberOfOutsideTriangles) { // , int* cudaIntersectionsPerInsideTriangle int threadidx = threadIdx.x; float vert1_1[3] = { cudaInsideVertices[cudaInsideTriangles[blockIdx.x].x].x, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].x].y, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].x].z }; float vert1_2[3] = { cudaInsideVertices[cudaInsideTriangles[blockIdx.x].y].x, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].y].y, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].y].z }; float vert1_3[3] = { cudaInsideVertices[cudaInsideTriangles[blockIdx.x].z].x, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].z].y, cudaInsideVertices[cudaInsideTriangles[blockIdx.x].z].z }; //int numberOfIntersections = 0; while (threadidx < numberOfOutsideTriangles && *inside){ float vert2_1[3] = { cudaOutsideVertices[cudaOutsideTriangles[threadidx].x].x, cudaOutsideVertices[cudaOutsideTriangles[threadidx].x].y, cudaOutsideVertices[cudaOutsideTriangles[threadidx].x].z }; float vert2_2[3] = { cudaOutsideVertices[cudaOutsideTriangles[threadidx].y].x, cudaOutsideVertices[cudaOutsideTriangles[threadidx].y].y, cudaOutsideVertices[cudaOutsideTriangles[threadidx].y].z }; float vert2_3[3] = { cudaOutsideVertices[cudaOutsideTriangles[threadidx].z].x, cudaOutsideVertices[cudaOutsideTriangles[threadidx].z].y, cudaOutsideVertices[cudaOutsideTriangles[threadidx].z].z }; if (NoDivTriTriIsect(vert1_1, vert1_2, vert1_3, vert2_1, vert2_2, vert2_3) == 1) { //numberOfIntersections++; *inside = false; return; //cudaIntersectionsPerInsideTriangle[tid] = 1; // Sneller als je dit weg laat in het geval de meshes elkaar niet sijden ==> dit zorgt er voor dat het trager wordt als de meshes in elkaar liggen } threadidx += 128; //if(intersect){ cudaIntersectionsPerInsideTriangle[tid] = 1; } // Sneller als je dit weg laat in het geval de meshes elkaar niet sijden } //printf("numberOfIntersections = %d\n", numberOfIntersections); //cudaIntersectionsPerInsideTriangle[tid] = numberOfIntersections; }
847e9b963561cb9c5f55da1290d6d5f12a2578e8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif } __global__ void uplo_axpby_no_transp (const int sd, const int unit, const int bottom, const REAL alpha, const REAL* a, const int offset_a, const int ld_a, const REAL beta, REAL* b, const int offset_b, const int ld_b) { const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x; const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y; const bool valid = (gid_0 < sd) && (gid_1 < sd); const bool check = valid && ((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1); if (check) { const int ia = offset_a + gid_0 + gid_1 * ld_a; const int ib = offset_b + gid_0 + gid_1 * ld_b; b[ib] = alpha * a[ia] + beta * b[ib]; } }
847e9b963561cb9c5f55da1290d6d5f12a2578e8.cu
#include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif } __global__ void uplo_axpby_no_transp (const int sd, const int unit, const int bottom, const REAL alpha, const REAL* a, const int offset_a, const int ld_a, const REAL beta, REAL* b, const int offset_b, const int ld_b) { const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x; const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y; const bool valid = (gid_0 < sd) && (gid_1 < sd); const bool check = valid && ((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1); if (check) { const int ia = offset_a + gid_0 + gid_1 * ld_a; const int ib = offset_b + gid_0 + gid_1 * ld_b; b[ib] = alpha * a[ia] + beta * b[ib]; } }
79bb0c3fa12e4ca5b29643fcd594cbe451f7be5b.hip
// !!! This is a file automatically generated by hipify!!! //#define GRB_USE_SEQUENTIAL #define GRB_USE_APSPIE //#define private public #include <iostream> #include <random> #include <algorithm> #include <cstdio> #include <cstdlib> #include <hip/hip_runtime_api.h> #include "graphblas/graphblas.hpp" #define BOOST_TEST_MAIN #define BOOST_TEST_MODULE matrix_suite #include <boost/test/included/unit_test.hpp> #include "test/test.hpp" BOOST_AUTO_TEST_SUITE(matrix_suite) BOOST_AUTO_TEST_CASE( matrix1 ) { std::vector<graphblas::Index> row_indices = {0, 1, 2}; std::vector<graphblas::Index> col_indices = {1, 1, 1}; std::vector<float> values = {1.0, 2.0, 3.0}; graphblas::Matrix<float> a(3, 3); a.build( row_indices, col_indices, values, 3 ); std::vector<graphblas::Index> row; std::vector<graphblas::Index> col; std::vector<float> val; a.extractTuples( row, col, val ); BOOST_ASSERT_LIST( row_indices, row ); BOOST_ASSERT_LIST( col_indices, col ); BOOST_ASSERT_LIST( values, val ); } // SpGEMM unit test // Assert: error: out of dimension tuple passed into build BOOST_AUTO_TEST_CASE( matrix2 ) { std::vector<graphblas::Index> row_indices = {2, 3, 4, 1, 3, 5, 4, 5, 6, 6, 7, 3, 6, 7, 7, 9, 10, 11, 10, 11}; std::vector<graphblas::Index> col_indices = {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 8, 8, 8, 9, 9}; std::vector<float> values (20, 1.0); graphblas::Matrix<float> a(11, 11); graphblas::Matrix<float> b(11, 11); graphblas::Info err = a.build( row_indices, col_indices, values, 20 ); BOOST_ASSERT( err == graphblas::GrB_INDEX_OUT_OF_BOUNDS ); } // SpMM unit test // Assert: error out of dimension tuple passed into build BOOST_AUTO_TEST_CASE( matrix3 ) { std::vector<graphblas::Index> row_indices = {2, 3, 4, 1, 3, 5, 4, 5, 6, 6, 7, 3, 6, 7, 7, 9, 10, 11, 10, 11}; std::vector<graphblas::Index> col_indices = {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 8, 8, 8, 9, 9}; std::vector<float> values (20, 1.0); graphblas::Matrix<float> a(11, 11); graphblas::Matrix<float> b(11, 11); graphblas::Info err = a.build( row_indices, col_indices, values, 20 ); BOOST_ASSERT( err == graphblas::GrB_INDEX_OUT_OF_BOUNDS ); } BOOST_AUTO_TEST_SUITE_END()
79bb0c3fa12e4ca5b29643fcd594cbe451f7be5b.cu
//#define GRB_USE_SEQUENTIAL #define GRB_USE_APSPIE //#define private public #include <iostream> #include <random> #include <algorithm> #include <cstdio> #include <cstdlib> #include <cuda_profiler_api.h> #include "graphblas/graphblas.hpp" #define BOOST_TEST_MAIN #define BOOST_TEST_MODULE matrix_suite #include <boost/test/included/unit_test.hpp> #include "test/test.hpp" BOOST_AUTO_TEST_SUITE(matrix_suite) BOOST_AUTO_TEST_CASE( matrix1 ) { std::vector<graphblas::Index> row_indices = {0, 1, 2}; std::vector<graphblas::Index> col_indices = {1, 1, 1}; std::vector<float> values = {1.0, 2.0, 3.0}; graphblas::Matrix<float> a(3, 3); a.build( row_indices, col_indices, values, 3 ); std::vector<graphblas::Index> row; std::vector<graphblas::Index> col; std::vector<float> val; a.extractTuples( row, col, val ); BOOST_ASSERT_LIST( row_indices, row ); BOOST_ASSERT_LIST( col_indices, col ); BOOST_ASSERT_LIST( values, val ); } // SpGEMM unit test // Assert: error: out of dimension tuple passed into build BOOST_AUTO_TEST_CASE( matrix2 ) { std::vector<graphblas::Index> row_indices = {2, 3, 4, 1, 3, 5, 4, 5, 6, 6, 7, 3, 6, 7, 7, 9, 10, 11, 10, 11}; std::vector<graphblas::Index> col_indices = {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 8, 8, 8, 9, 9}; std::vector<float> values (20, 1.0); graphblas::Matrix<float> a(11, 11); graphblas::Matrix<float> b(11, 11); graphblas::Info err = a.build( row_indices, col_indices, values, 20 ); BOOST_ASSERT( err == graphblas::GrB_INDEX_OUT_OF_BOUNDS ); } // SpMM unit test // Assert: error out of dimension tuple passed into build BOOST_AUTO_TEST_CASE( matrix3 ) { std::vector<graphblas::Index> row_indices = {2, 3, 4, 1, 3, 5, 4, 5, 6, 6, 7, 3, 6, 7, 7, 9, 10, 11, 10, 11}; std::vector<graphblas::Index> col_indices = {1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 8, 8, 8, 9, 9}; std::vector<float> values (20, 1.0); graphblas::Matrix<float> a(11, 11); graphblas::Matrix<float> b(11, 11); graphblas::Info err = a.build( row_indices, col_indices, values, 20 ); BOOST_ASSERT( err == graphblas::GrB_INDEX_OUT_OF_BOUNDS ); } BOOST_AUTO_TEST_SUITE_END()
e2799892e7472755d2ffe988ccbcc8e0d4398441.hip
// !!! This is a file automatically generated by hipify!!! #define FP float #include <stdio.h> #include <hip/hip_runtime.h> #include <stdlib.h> #include <math.h> #define BLOCK_SIZE blockDim.x // Note: Blocks should be square. __global__ void gpu_matrixmult(FP *a,FP *b, FP *c, int n) { int m, e; int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; extern __shared__ FP s[]; FP *A_tile = s; FP *B_tile = &A_tile[blockDim.x * blockDim.y]; FP c_value = 0; if(col < n && row < n) { // Make sure that we have a value of `c` to set. // TODO make sure this works // m is the column of A and row of B. for(m = 0; m < n; m+= blockDim.x) { A_tile[BLOCK_SIZE*threadIdx.x + threadIdx.y] = a[row * n + (m+theadIdx.y)]; B_tile[BLOCK_SIZE*threadIdx.x + threadIdx.y] = b[(m+threadIdx.y) * n + col]; __syncthreads(); for(e=0; e<blockDim.x; ++e) { c_value += A_tile[BLOCK_SIZE*threadIdx.x + e] * B_tile[BLOCK_SIZE*e + threadIdx.y]; } __syncthreads(); } c[row*n + col] = c_value; } } void cpu_matrixmult(FP *a,FP *b, FP *c, int n) { int index, indexa, indexb; FP cvalue; for(int col=0;col < n; col++) for(int row=0;row < n; row++) { indexb = col; index = row * n + col; cvalue = 0.; for (indexa = row*n; indexa < (row*n + n); indexa++, indexb+=n) cvalue += a[indexa]*b[indexb]; c[index] -= cvalue; //NOTE: This calculates the diff between CPU and GPU computations. } } int main(int argc, char *argv[]) { int i, j; // loop counters int gpucount = 0; // Count of available GPUs int gpunum = 0; // Device number to use int Grid_Dim = 1; //Grid dimension, x and y, square int Block_Dim = 1; //Block dimension, x and y, square int n; // matrix dimension FP *a,*b,*c; FP *dev_a, *dev_b, *dev_c; int size; // number of bytes in arrays int tile_size; // Number of bytes in a tile hipEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also hipError_t errorcode; // --------------------SET PARAMETERS AND DATA ----------------------- errorcode = hipGetDeviceCount(&gpucount); if (errorcode == hipErrorNoDevice) { printf("No GPUs are visible\n"); exit(-1); } else { printf("Device count = %d\n",gpucount); } if (argc!=4) { printf("Usage: matmul <matrix dim> <block dim> <grid dim>\n"); exit (-1); } n = atoi(argv[1]); Block_Dim = atoi(argv[2]); // Square block if (Block_Dim*Block_Dim > 1024) { printf("Error, too many threads in block\n"); exit (-1); } Grid_Dim = atoi(argv[3]); // Square grid if (Grid_Dim*Block_Dim < n) { printf("Error, number of threads in x/y dimensions less than number of array elements\n"); exit (-1); } hipSetDevice(gpunum); printf("Using device %d\n",gpunum); printf("Matrix Dimension = %d\n",n); printf("Block_Dim = %d, Grid_Dim = %d\n",Block_Dim,Grid_Dim); dim3 Grid(Grid_Dim, Grid_Dim); //Grid structure dim3 Block(Block_Dim, Block_Dim); //Block structure size = n * n * sizeof(FP); // number of bytes in total in arrays tile_size = Block_Dim * Block_Dim * sizeof(FP); a = (FP*) malloc(size); // dynamically allocated memory for arrays on host b = (FP*) malloc(size); c = (FP*) malloc(size); // results from GPU srand(12345); int p = n; //Used here only to illustrate proper initialization for non-square case for(i=0;i < n;i++) for(j=0;j < p;j++) { a[i * p + j] = (FP) rand() / (FP) RAND_MAX; // a[i * p + j] = (FP) i+j; // may be helpful for debugging } for(i=0;i < p;i++) for(j=0;j < n;j++) { b[i * n + j] = (FP) rand() / (FP) RAND_MAX; // b[i * n + j] = (FP) i+j; // may be helpful for debugging } // ------------- COMPUTATION DONE ON GPU ---------------------------- hipMalloc((void**)&dev_a, size); // allocate memory on device hipMalloc((void**)&dev_b, size); hipMalloc((void**)&dev_c, size); hipMemcpy(dev_a, a , size ,hipMemcpyHostToDevice); hipMemcpy(dev_b, b , size ,hipMemcpyHostToDevice); hipEventCreate(&start); // instrument code to measure start time hipEventCreate(&stop); hipEventRecord(start, 0); // hipEventSynchronize(start); // not needed hipLaunchKernelGGL(( gpu_matrixmult), dim3(Grid),dim3(Block), 2*tile_size, 0, dev_a,dev_b,dev_c,n); hipEventRecord(stop, 0); // instrument code to measure end time hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time_ms, start, stop ); hipMemcpy(c,dev_c, size ,hipMemcpyDeviceToHost); printf("Time to calculate results on GPU: %f ms.\n", elapsed_time_ms); // exec. time // ------------- COMPUTATION DONE ON HOST CPU ---------------------------- // DEBUGGING USE ONLY (AND FOR LIMITED NUMBERS OF TIMING RUNS) hipEventRecord(start, 0); // use same timing // hipEventSynchronize(start); // not needed cpu_matrixmult(a,b,c, n); // do calculation on host (NOTE: This computes the diff with GPU result.) hipEventRecord(stop, 0); // instrument code to measue end time hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time_ms, start, stop ); printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // exec. time // ------------------- check device creates correct results ----------------- double error, suma, sumb, sumc, ai, bi, ci; suma = 0.; sumb = 0; sumc = 0; for(i=0;i < n*n;i++) { ai = (double) a[i]; bi = (double) b[i]; ci = (double) c[i]; suma += ai*ai; sumb += bi*bi; sumc += ci*ci; } suma = sqrt(suma); sumb = sqrt(sumb); sumc = sqrt(sumc); error = sumc/(n*suma*sumb); printf("Scaled error between GPU and CPU: %e\n", error); // -------------- clean up --------------------------------------- free(a); free(b); free(c); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); hipEventDestroy(start); hipEventDestroy(stop); return 0; }
e2799892e7472755d2ffe988ccbcc8e0d4398441.cu
#define FP float #include <stdio.h> #include <cuda.h> #include <stdlib.h> #include <math.h> #define BLOCK_SIZE blockDim.x // Note: Blocks should be square. __global__ void gpu_matrixmult(FP *a,FP *b, FP *c, int n) { int m, e; int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; extern __shared__ FP s[]; FP *A_tile = s; FP *B_tile = &A_tile[blockDim.x * blockDim.y]; FP c_value = 0; if(col < n && row < n) { // Make sure that we have a value of `c` to set. // TODO make sure this works // m is the column of A and row of B. for(m = 0; m < n; m+= blockDim.x) { A_tile[BLOCK_SIZE*threadIdx.x + threadIdx.y] = a[row * n + (m+theadIdx.y)]; B_tile[BLOCK_SIZE*threadIdx.x + threadIdx.y] = b[(m+threadIdx.y) * n + col]; __syncthreads(); for(e=0; e<blockDim.x; ++e) { c_value += A_tile[BLOCK_SIZE*threadIdx.x + e] * B_tile[BLOCK_SIZE*e + threadIdx.y]; } __syncthreads(); } c[row*n + col] = c_value; } } void cpu_matrixmult(FP *a,FP *b, FP *c, int n) { int index, indexa, indexb; FP cvalue; for(int col=0;col < n; col++) for(int row=0;row < n; row++) { indexb = col; index = row * n + col; cvalue = 0.; for (indexa = row*n; indexa < (row*n + n); indexa++, indexb+=n) cvalue += a[indexa]*b[indexb]; c[index] -= cvalue; //NOTE: This calculates the diff between CPU and GPU computations. } } int main(int argc, char *argv[]) { int i, j; // loop counters int gpucount = 0; // Count of available GPUs int gpunum = 0; // Device number to use int Grid_Dim = 1; //Grid dimension, x and y, square int Block_Dim = 1; //Block dimension, x and y, square int n; // matrix dimension FP *a,*b,*c; FP *dev_a, *dev_b, *dev_c; int size; // number of bytes in arrays int tile_size; // Number of bytes in a tile cudaEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also cudaError_t errorcode; // --------------------SET PARAMETERS AND DATA ----------------------- errorcode = cudaGetDeviceCount(&gpucount); if (errorcode == cudaErrorNoDevice) { printf("No GPUs are visible\n"); exit(-1); } else { printf("Device count = %d\n",gpucount); } if (argc!=4) { printf("Usage: matmul <matrix dim> <block dim> <grid dim>\n"); exit (-1); } n = atoi(argv[1]); Block_Dim = atoi(argv[2]); // Square block if (Block_Dim*Block_Dim > 1024) { printf("Error, too many threads in block\n"); exit (-1); } Grid_Dim = atoi(argv[3]); // Square grid if (Grid_Dim*Block_Dim < n) { printf("Error, number of threads in x/y dimensions less than number of array elements\n"); exit (-1); } cudaSetDevice(gpunum); printf("Using device %d\n",gpunum); printf("Matrix Dimension = %d\n",n); printf("Block_Dim = %d, Grid_Dim = %d\n",Block_Dim,Grid_Dim); dim3 Grid(Grid_Dim, Grid_Dim); //Grid structure dim3 Block(Block_Dim, Block_Dim); //Block structure size = n * n * sizeof(FP); // number of bytes in total in arrays tile_size = Block_Dim * Block_Dim * sizeof(FP); a = (FP*) malloc(size); // dynamically allocated memory for arrays on host b = (FP*) malloc(size); c = (FP*) malloc(size); // results from GPU srand(12345); int p = n; //Used here only to illustrate proper initialization for non-square case for(i=0;i < n;i++) for(j=0;j < p;j++) { a[i * p + j] = (FP) rand() / (FP) RAND_MAX; // a[i * p + j] = (FP) i+j; // may be helpful for debugging } for(i=0;i < p;i++) for(j=0;j < n;j++) { b[i * n + j] = (FP) rand() / (FP) RAND_MAX; // b[i * n + j] = (FP) i+j; // may be helpful for debugging } // ------------- COMPUTATION DONE ON GPU ---------------------------- cudaMalloc((void**)&dev_a, size); // allocate memory on device cudaMalloc((void**)&dev_b, size); cudaMalloc((void**)&dev_c, size); cudaMemcpy(dev_a, a , size ,cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b , size ,cudaMemcpyHostToDevice); cudaEventCreate(&start); // instrument code to measure start time cudaEventCreate(&stop); cudaEventRecord(start, 0); // cudaEventSynchronize(start); // not needed gpu_matrixmult<<<Grid,Block, 2*tile_size>>>(dev_a,dev_b,dev_c,n); cudaEventRecord(stop, 0); // instrument code to measure end time cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time_ms, start, stop ); cudaMemcpy(c,dev_c, size ,cudaMemcpyDeviceToHost); printf("Time to calculate results on GPU: %f ms.\n", elapsed_time_ms); // exec. time // ------------- COMPUTATION DONE ON HOST CPU ---------------------------- // DEBUGGING USE ONLY (AND FOR LIMITED NUMBERS OF TIMING RUNS) cudaEventRecord(start, 0); // use same timing // cudaEventSynchronize(start); // not needed cpu_matrixmult(a,b,c, n); // do calculation on host (NOTE: This computes the diff with GPU result.) cudaEventRecord(stop, 0); // instrument code to measue end time cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time_ms, start, stop ); printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // exec. time // ------------------- check device creates correct results ----------------- double error, suma, sumb, sumc, ai, bi, ci; suma = 0.; sumb = 0; sumc = 0; for(i=0;i < n*n;i++) { ai = (double) a[i]; bi = (double) b[i]; ci = (double) c[i]; suma += ai*ai; sumb += bi*bi; sumc += ci*ci; } suma = sqrt(suma); sumb = sqrt(sumb); sumc = sqrt(sumc); error = sumc/(n*suma*sumb); printf("Scaled error between GPU and CPU: %e\n", error); // -------------- clean up --------------------------------------- free(a); free(b); free(c); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
a037ddb6c7c3ff623810b4d8926c4b485bfdfa79.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <time.h> #include <math.h> #include <omp.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define V 8 #define E 11 #define MAX_WEIGHT 1000000 #define TRUE 1 #define FALSE 0 typedef int boolean; //Representiation of edges in the graph with both the end points typedef struct { int u; int v; } Edge; //Representation of the vertices in the graph with the node id and a boolean visited value for tracking the nodes //Represents a Vertex typedef struct { int title; boolean visited; } Vertex; //Used to find the weight of the path from u to v __device__ __host__ int findEdge(Vertex u, Vertex v, Edge *edges, int *weights) { int i; for(i = 0; i < E; i++) { if(edges[i].u == u.title && edges[i].v == v.title) { return weights[i]; } } return MAX_WEIGHT; } //Finds the branches of the vertex __global__ void Find_Vertex(Vertex *vertices, Edge *edges, int *weights, int *length, int *updateLength) { int u = threadIdx.x; if(vertices[u].visited == FALSE) { vertices[u].visited = TRUE; int v; for(v = 0; v < V; v++) { //Find the weight of the edge int weight = findEdge(vertices[u], vertices[v], edges, weights); //Checks if the weight is a candidate if(weight < MAX_WEIGHT) { //If the weight is shorter than the current weight, replace it if(updateLength[v] > length[u] + weight) { updateLength[v] = length[u] + weight; } } } } } //Updates the shortest path array (length) __global__ void Update_Paths(Vertex *vertices, int *length, int *updateLength) { int u = threadIdx.x; if(length[u] > updateLength[u]) { length[u] = updateLength[u]; vertices[u].visited = FALSE; } updateLength[u] = length[u]; } //Prints the an array of elements void printArray(int *array) { int i; for(i = 0; i < V; i++) { printf("Shortest Path to Vertex: %d is %d\n", i, array[i]); } } //Runs the program int main(void) { //Variables for the Host Device Vertex *vertices; Edge *edges; //Weight of the paths int *weights; //Len is the shortest path and updateLength is a special array for modifying updates to the shortest path int *len, *updateLength; //Pointers for the CUDA device Vertex *d_V; Edge *d_E; int *d_W; int *d_L; int *d_C; //Sizes used for allocation int sizeV = sizeof(Vertex) * V; int sizeE = sizeof(Edge) * E; int size = V * sizeof(int); //Timer initialization float runningTime; hipEvent_t timeStart, timeEnd; //Creates the timers hipEventCreate(&timeStart); hipEventCreate(&timeEnd); //Allocates space for the variables vertices = (Vertex *)malloc(sizeV); edges = (Edge *)malloc(sizeE); weights = (int *)malloc(E* sizeof(int)); len = (int *)malloc(size); updateLength = (int *)malloc(size); //----------------------------------Graph Base Test-------------------------------------// /* Edge ed[E] = {{0, 4}, {0, 6}, {0,2}, {4,6}, {4,7}, {0, 7}, {7, 3}, {3, 1}, {2,5}, {2, 1}, {5,3}}; int w[E] = {10, 90, 30, 20, 20, 50, 10, 20, 10, 10, 10}; int i = 0; for(i = 0; i < V; i++) { Vertex a = { .title =i , .visited=FALSE}; vertices[i] = a; } for(i = 0; i < E; i++) { edges[i] = ed[i]; weights[i] = w[i]; } //----------------------------------Graph Base Test-------------------------------------// //--------------------------------Graph Randomizer-----------------------------------// */ srand(19); int i = 0; for(i = 0; i < V; i++) { Vertex a = { .title =(int) i, .visited=FALSE}; vertices[i] = a; } for(i = 0; i < E; i++) { Edge e = {.u = (int) rand()%V , .v = rand()%V}; edges[i] = e; weights[i] = rand()%100; } //--------------------------------Graph Randomizer-----------------------------------// //Allocate space on the device hipMalloc((void**)&d_V, sizeV); hipMalloc((void**)&d_E, sizeE); hipMalloc((void**)&d_W, E * sizeof(int)); hipMalloc((void**)&d_L, size); hipMalloc((void**)&d_C, size); //Initial Node Vertex root = {0, FALSE}; //--------------------------------------Dijkstra's Algorithm--------------------------------------// root.visited = TRUE; len[root.title] = 0; updateLength[root.title] = 0; //Copy variables to the Device hipMemcpy(d_V, vertices, sizeV, hipMemcpyHostToDevice); hipMemcpy(d_E, edges, sizeE, hipMemcpyHostToDevice); hipMemcpy(d_W, weights, E * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_L, len, size, hipMemcpyHostToDevice); hipMemcpy(d_C, updateLength, size, hipMemcpyHostToDevice); int j; //Loop finds the initial paths from the node 's' for(i = 0; i < V;i++) { if(vertices[i].title != root.title) { len[(int)vertices[i].title] = findEdge(root, vertices[i], edges, weights); updateLength[vertices[i].title] = len[(int)vertices[i].title]; } else{ vertices[i].visited = TRUE; } } //Start the timer hipEventRecord(timeStart, 0); //Recopy the variables hipMemcpy(d_L, len, size, hipMemcpyHostToDevice); hipMemcpy(d_C, updateLength, size, hipMemcpyHostToDevice); //Parallelization for(i = 0; i < V; i++){ int numBlocks, numThreads; if(V>1024) { numBlocks = V/1024; numThreads = 1024 ; } else { numBlocks = 1 ; numThreads = V ; } hipLaunchKernelGGL(( Find_Vertex), dim3(numBlocks), dim3(numThreads), 0, 0, d_V, d_E, d_W, d_L, d_C); for(j = 0; j < V; j++) { hipLaunchKernelGGL(( Update_Paths), dim3(1),dim3(V), 0, 0, d_V, d_L, d_C); } } //Timing Events hipEventRecord(timeEnd, 0); hipEventSynchronize(timeEnd); hipEventElapsedTime(&runningTime, timeStart, timeEnd); //Copies the results back hipMemcpy(len, d_L, size, hipMemcpyDeviceToHost); printArray(len); //Running Time printf("Running Time: %f ms\n", runningTime); //--------------------------------------Dijkstra's Algorithm--------------------------------------// //Free up the space free(vertices); free(edges); free(weights); free(len); free(updateLength); hipFree(d_V); hipFree(d_E); hipFree(d_W); hipFree(d_L); hipFree(d_C); hipEventDestroy(timeStart); hipEventDestroy(timeEnd); }
a037ddb6c7c3ff623810b4d8926c4b485bfdfa79.cu
#include <stdio.h> #include <time.h> #include <math.h> #include <omp.h> #include <cuda.h> #include <cuda_runtime.h> #define V 8 #define E 11 #define MAX_WEIGHT 1000000 #define TRUE 1 #define FALSE 0 typedef int boolean; //Representiation of edges in the graph with both the end points typedef struct { int u; int v; } Edge; //Representation of the vertices in the graph with the node id and a boolean visited value for tracking the nodes //Represents a Vertex typedef struct { int title; boolean visited; } Vertex; //Used to find the weight of the path from u to v __device__ __host__ int findEdge(Vertex u, Vertex v, Edge *edges, int *weights) { int i; for(i = 0; i < E; i++) { if(edges[i].u == u.title && edges[i].v == v.title) { return weights[i]; } } return MAX_WEIGHT; } //Finds the branches of the vertex __global__ void Find_Vertex(Vertex *vertices, Edge *edges, int *weights, int *length, int *updateLength) { int u = threadIdx.x; if(vertices[u].visited == FALSE) { vertices[u].visited = TRUE; int v; for(v = 0; v < V; v++) { //Find the weight of the edge int weight = findEdge(vertices[u], vertices[v], edges, weights); //Checks if the weight is a candidate if(weight < MAX_WEIGHT) { //If the weight is shorter than the current weight, replace it if(updateLength[v] > length[u] + weight) { updateLength[v] = length[u] + weight; } } } } } //Updates the shortest path array (length) __global__ void Update_Paths(Vertex *vertices, int *length, int *updateLength) { int u = threadIdx.x; if(length[u] > updateLength[u]) { length[u] = updateLength[u]; vertices[u].visited = FALSE; } updateLength[u] = length[u]; } //Prints the an array of elements void printArray(int *array) { int i; for(i = 0; i < V; i++) { printf("Shortest Path to Vertex: %d is %d\n", i, array[i]); } } //Runs the program int main(void) { //Variables for the Host Device Vertex *vertices; Edge *edges; //Weight of the paths int *weights; //Len is the shortest path and updateLength is a special array for modifying updates to the shortest path int *len, *updateLength; //Pointers for the CUDA device Vertex *d_V; Edge *d_E; int *d_W; int *d_L; int *d_C; //Sizes used for allocation int sizeV = sizeof(Vertex) * V; int sizeE = sizeof(Edge) * E; int size = V * sizeof(int); //Timer initialization float runningTime; cudaEvent_t timeStart, timeEnd; //Creates the timers cudaEventCreate(&timeStart); cudaEventCreate(&timeEnd); //Allocates space for the variables vertices = (Vertex *)malloc(sizeV); edges = (Edge *)malloc(sizeE); weights = (int *)malloc(E* sizeof(int)); len = (int *)malloc(size); updateLength = (int *)malloc(size); //----------------------------------Graph Base Test-------------------------------------// /* Edge ed[E] = {{0, 4}, {0, 6}, {0,2}, {4,6}, {4,7}, {0, 7}, {7, 3}, {3, 1}, {2,5}, {2, 1}, {5,3}}; int w[E] = {10, 90, 30, 20, 20, 50, 10, 20, 10, 10, 10}; int i = 0; for(i = 0; i < V; i++) { Vertex a = { .title =i , .visited=FALSE}; vertices[i] = a; } for(i = 0; i < E; i++) { edges[i] = ed[i]; weights[i] = w[i]; } //----------------------------------Graph Base Test-------------------------------------// //--------------------------------Graph Randomizer-----------------------------------// */ srand(19); int i = 0; for(i = 0; i < V; i++) { Vertex a = { .title =(int) i, .visited=FALSE}; vertices[i] = a; } for(i = 0; i < E; i++) { Edge e = {.u = (int) rand()%V , .v = rand()%V}; edges[i] = e; weights[i] = rand()%100; } //--------------------------------Graph Randomizer-----------------------------------// //Allocate space on the device cudaMalloc((void**)&d_V, sizeV); cudaMalloc((void**)&d_E, sizeE); cudaMalloc((void**)&d_W, E * sizeof(int)); cudaMalloc((void**)&d_L, size); cudaMalloc((void**)&d_C, size); //Initial Node Vertex root = {0, FALSE}; //--------------------------------------Dijkstra's Algorithm--------------------------------------// root.visited = TRUE; len[root.title] = 0; updateLength[root.title] = 0; //Copy variables to the Device cudaMemcpy(d_V, vertices, sizeV, cudaMemcpyHostToDevice); cudaMemcpy(d_E, edges, sizeE, cudaMemcpyHostToDevice); cudaMemcpy(d_W, weights, E * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_L, len, size, cudaMemcpyHostToDevice); cudaMemcpy(d_C, updateLength, size, cudaMemcpyHostToDevice); int j; //Loop finds the initial paths from the node 's' for(i = 0; i < V;i++) { if(vertices[i].title != root.title) { len[(int)vertices[i].title] = findEdge(root, vertices[i], edges, weights); updateLength[vertices[i].title] = len[(int)vertices[i].title]; } else{ vertices[i].visited = TRUE; } } //Start the timer cudaEventRecord(timeStart, 0); //Recopy the variables cudaMemcpy(d_L, len, size, cudaMemcpyHostToDevice); cudaMemcpy(d_C, updateLength, size, cudaMemcpyHostToDevice); //Parallelization for(i = 0; i < V; i++){ int numBlocks, numThreads; if(V>1024) { numBlocks = V/1024; numThreads = 1024 ; } else { numBlocks = 1 ; numThreads = V ; } Find_Vertex<<<numBlocks, numThreads>>>(d_V, d_E, d_W, d_L, d_C); for(j = 0; j < V; j++) { Update_Paths<<<1,V>>>(d_V, d_L, d_C); } } //Timing Events cudaEventRecord(timeEnd, 0); cudaEventSynchronize(timeEnd); cudaEventElapsedTime(&runningTime, timeStart, timeEnd); //Copies the results back cudaMemcpy(len, d_L, size, cudaMemcpyDeviceToHost); printArray(len); //Running Time printf("Running Time: %f ms\n", runningTime); //--------------------------------------Dijkstra's Algorithm--------------------------------------// //Free up the space free(vertices); free(edges); free(weights); free(len); free(updateLength); cudaFree(d_V); cudaFree(d_E); cudaFree(d_W); cudaFree(d_L); cudaFree(d_C); cudaEventDestroy(timeStart); cudaEventDestroy(timeEnd); }
c05f0b0e009d05d25682e616dcbe213478090ac2.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <rocblas.h> #include <hiprand/hiprand.h> #include "common.h" #include "mlp.h" #include "device_launch_parameters.h" #include <thrust/device_vector.h> #include <thrust/fill.h> #include <fstream> #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) #define blockSize 128 int number_of_instances; int number_of_features; int hidden_layer_size; int number_of_classes; float* learning_rate; float *weight_input_hidden; float* weight_hidden_output; float *weight_input_hidden_gradient; float* weight_hidden_output_gradient; float* dev_input; float* dev_true_labels; float* dev_test_input; float* output; float* hidden; float* output_non_linear; float* hidden_non_linear; float* temp_loss; float* loss_per_epoch; float* all_losses; //Print matrix A(nr_rows_A, nr_cols_A) storage in column-major format void print_matrix(const float *A, int nr_rows_A, int nr_cols_A) { for (int i = 0; i < nr_rows_A; ++i) { for (int j = 0; j < nr_cols_A; ++j) { printf("%f ", A[j * nr_rows_A + i]); } printf("\n"); } printf("\n"); } //Print matrix A(nr_rows_A, nr_cols_A) storage in column-major format void print_matrix_to_file(const float *A, int nr_rows_A, int nr_cols_A, std::string filename) { std::ofstream f(filename); if (f.is_open()) { for (int i = 0; i < nr_rows_A; ++i) { for (int j = 0; j < nr_cols_A; ++j) { //printf("%f ", A[j * nr_rows_A + i]); f << A[j * nr_rows_A + i] << " "; } f << "\n"; } f << "\n"; } f.close(); } // REF: https://solarianprogrammer.com/2012/05/31/matrix-multiplication-cuda-cublas-hiprand-thrust/ // Fill the array A(nr_rows_A, nr_cols_A) with random numbers on GPU void GPU_fill_rand(float *A, int nr_rows_A, int nr_cols_A) { // Create a pseudo-random number generator } // Multiply the arrays A and B on GPU and save the result in C // C(m,n) = A(m,k) * B(k,n) void gpu_blas_mmul(hipblasHandle_t &handle, const float *A, const float *B, float *C, const int m, const int k, const int n) { int lda = m, ldb = k, ldc = m; const float alf = 1; const float bet = 0; const float *alpha = &alf; const float *beta = &bet; // Do the actual multiplication hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); } __global__ void sigmoid(int n, float const *input, float *output) { /* Computes the value of the sigmoid function f(x) = 1/(1 + e^-x). Inputs: input: array output: array, the results of the computation are to be stored here */ const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } output[index] = 1.0 / (1.0 + ::exp(-input[index])); } __global__ void exponential(int n, float* input, float* output) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } output[index] = ::exp(input[index]); } __global__ void upSweepOptimized(int n, int d, float* A) { int index = threadIdx.x + (blockIdx.x * blockDim.x); int other_index = 1 << d; int stride = other_index * 2; int new_index = stride * index; if (new_index >= n) { return; } A[new_index + stride - 1] += A[new_index + other_index - 1]; } __global__ void intermediate_calculation(int n, float* temp, float* h) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } temp[index] *= (h[index] * (1 - h[index])); } __global__ void matrix_subtraction(int n, float* A, float* B, float* C) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } C[index] = A[index] - B[index]; } __global__ void multiply_by_constant(int n, float* A, float* x) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } A[index] *= (*x); } void getArraySum(int n, float* input, float* sum) { float* padded_idata; int padded_size = 1 << (ilog2ceil(n)); hipMalloc((void**)&padded_idata, padded_size * sizeof(float)); checkCUDAErrorWithLine("hipMalloc padded_idata failed!"); hipMemset(padded_idata, 0, padded_size * sizeof(float)); hipMemcpy(padded_idata, input, sizeof(float) * n, hipMemcpyDeviceToDevice); int iterations = ilog2(padded_size); int number_of_threads = padded_size; for (int d = 0; d < iterations; d++) { number_of_threads /= 2; dim3 fullBlocksPerGridUpSweep((number_of_threads + blockSize - 1) / blockSize); upSweepOptimized << <fullBlocksPerGridUpSweep, blockSize >> > (padded_size, d, padded_idata); } hipMemcpy(sum, padded_idata + padded_size - 1, sizeof(float), hipMemcpyDeviceToDevice); hipFree(padded_idata); } __global__ void normalize(int n, float* input, float* sum) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } /*if(index == 1) printf("Sum: %f \n", *sum);*/ input[index] /= (*sum); } __global__ void cross_entropy_loss(int n, float* true_label, float* predicted, float* temp) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } temp[index] = -1 * (true_label[index] * ::log(predicted[index])); } __global__ void add(float* a, float* b) { *a = (*a) + (*b); } __global__ void matrix_normalization(int n, float* A, float alpha, float beta) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } A[index] = (A[index] * alpha) - beta; } void softmax(int n, float* input, float* softmax_output) { dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); exponential<< <fullBlocksPerGrid, blockSize >> >(n, input, softmax_output); float* sum; hipMalloc((void**)&sum, sizeof(float)); checkCUDAErrorWithLine("hipMalloc sum failed!"); getArraySum(n, softmax_output, sum); normalize << <fullBlocksPerGrid, blockSize >> > (n, softmax_output, sum); } namespace CharacterRecognition { using Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } //Performs one complete forward pass. Updates arrays - hidden and output void forward_pass(hipblasHandle_t &handle, float* input, int instance_number) { //Compute hidden layer float* current_input; hipMalloc((void**)&current_input, number_of_features * sizeof(float)); checkCUDAErrorWithLine("hipMalloc current_input failed!"); hipMemcpy(current_input, input + (instance_number * number_of_features), sizeof(float) * number_of_features, hipMemcpyDeviceToDevice); /*float* output_to_print = (float *)malloc(number_of_features * sizeof(float)); hipMemcpy(output_to_print, current_input, sizeof(float) * number_of_features, hipMemcpyDeviceToHost); printf("Current Input: \n"); print_matrix(output_to_print, number_of_features, 1);*/ gpu_blas_mmul(handle, current_input, weight_input_hidden, hidden, 1, number_of_features, hidden_layer_size); /*float* output_to_print1 = (float *)malloc(hidden_layer_size * sizeof(float)); hipMemcpy(output_to_print1, hidden, sizeof(float) * hidden_layer_size, hipMemcpyDeviceToHost); printf("Hidden layer: \n"); print_matrix(output_to_print1, hidden_layer_size, 1);*/ //Compute sigmoid if hidden layer dim3 fullBlocksPerGrid((hidden_layer_size + blockSize - 1) / blockSize); sigmoid << <fullBlocksPerGrid, blockSize >> > (hidden_layer_size, hidden, hidden_non_linear); /*float* output_to_print2 = (float *)malloc(hidden_layer_size * sizeof(float)); hipMemcpy(output_to_print2, hidden_non_linear, sizeof(float) * hidden_layer_size, hipMemcpyDeviceToHost); printf("Hidden layer after sigmoid: \n"); print_matrix(output_to_print2, hidden_layer_size, 1);*/ //Compute output layer gpu_blas_mmul(handle, hidden_non_linear, weight_hidden_output, output, 1, hidden_layer_size, number_of_classes); /*float* output_to_print3 = (float *)malloc(number_of_classes * sizeof(float)); hipMemcpy(output_to_print3, output, sizeof(float) * number_of_classes, hipMemcpyDeviceToHost); printf("Output layer: \n"); print_matrix(output_to_print3, number_of_classes, 1);*/ //Compute softmax of output layer softmax(number_of_classes, output, output_non_linear); /*float* output_to_print4 = (float *)malloc(number_of_classes * sizeof(float)); hipMemcpy(output_to_print4, output_non_linear, sizeof(float) * number_of_classes, hipMemcpyDeviceToHost); printf("After Softmax: \n"); print_matrix(output_to_print4, number_of_classes, 1);*/ } //Returns the loss computed for the given iteration void compute_loss(float* true_output, float* loss) { float* temp; hipMalloc((void**)&temp, number_of_classes * sizeof(float)); checkCUDAErrorWithLine("hipMalloc temp failed!"); dim3 fullBlocksPerGridUpSweep((number_of_classes + blockSize - 1) / blockSize); cross_entropy_loss << <fullBlocksPerGridUpSweep, blockSize >> > (number_of_classes, true_output, output_non_linear, temp); getArraySum(number_of_classes, temp, loss); hipFree(temp); } //Computes the gradient for the current pass. Updates - weight_input_hidden_gradient and weight_hidden_output_gradient void compute_gradients(hipblasHandle_t &handle, float* true_output, float* input, int instance_number) { float* current_input; hipMalloc((void**)&current_input, number_of_features * sizeof(float)); checkCUDAErrorWithLine("hipMalloc current_input failed!"); hipMemcpy(current_input, input + (instance_number * number_of_features), sizeof(float) * number_of_features, hipMemcpyDeviceToDevice); float* current_output; hipMalloc((void**)&current_output, number_of_classes * sizeof(float)); checkCUDAErrorWithLine("hipMalloc current_output failed!"); hipMemcpy(current_output, true_output + (instance_number * number_of_classes), sizeof(float) * number_of_classes, hipMemcpyDeviceToDevice); //Compute gradient w.r.t weights between hidden and output layer float* temp; hipMalloc((void**)&temp, number_of_classes * sizeof(float)); checkCUDAErrorWithLine("hipMalloc temp failed!"); dim3 fullBlocksPerGridUpSweep((number_of_classes + blockSize - 1) / blockSize); matrix_subtraction << <fullBlocksPerGridUpSweep, blockSize >> > (number_of_classes, output_non_linear, current_output, temp); gpu_blas_mmul(handle, hidden_non_linear, temp, weight_hidden_output_gradient, hidden_layer_size, 1, number_of_classes); //Compute gradient w.r.t. weights between input and hidden layer float* temp1; hipMalloc((void**)&temp1, hidden_layer_size * sizeof(float)); checkCUDAErrorWithLine("hipMalloc temp1 failed!"); gpu_blas_mmul(handle, weight_hidden_output, temp, temp1, hidden_layer_size, number_of_classes, 1); dim3 fullBlocksPerGrid((hidden_layer_size + blockSize - 1) / blockSize); intermediate_calculation << <fullBlocksPerGrid, blockSize >> > (hidden_layer_size, temp1, hidden_non_linear); gpu_blas_mmul(handle, current_input, temp1, weight_input_hidden_gradient, number_of_features, 1, hidden_layer_size); //Compute loss hipMalloc((void**)&temp_loss, sizeof(float)); checkCUDAErrorWithLine("hipMalloc loss failed!"); compute_loss(current_output, temp_loss); hipFree(temp); hipFree(temp1); hipFree(current_input); hipFree(current_output); } //Updates the weights according to the learning rate. Updates - weight_input_hidden and weight_hidden_output void update_weights() { dim3 fullBlocksPerGridUpSweep(((hidden_layer_size * number_of_classes) + blockSize - 1) / blockSize); multiply_by_constant << <fullBlocksPerGridUpSweep, blockSize >> > (hidden_layer_size * number_of_classes, weight_hidden_output_gradient, learning_rate); matrix_subtraction << <fullBlocksPerGridUpSweep, blockSize >> > (hidden_layer_size * number_of_classes, weight_hidden_output, weight_hidden_output_gradient, weight_hidden_output); dim3 fullBlocksPerGrid(((number_of_features * hidden_layer_size) + blockSize - 1) / blockSize); multiply_by_constant << <fullBlocksPerGrid, blockSize >> > (number_of_features * hidden_layer_size, weight_input_hidden_gradient, learning_rate); matrix_subtraction << <fullBlocksPerGrid, blockSize >> > (number_of_features * hidden_layer_size, weight_input_hidden, weight_input_hidden_gradient, weight_input_hidden); } //To initialize network parameters like size of hidden and output layers and initialize weight matrices. void initialize_network(int instances, int features, int classes, int hidden_size, float lr) { number_of_instances = instances; number_of_features = features; number_of_classes = classes; hidden_layer_size = hidden_size; printf("%d %d %d \n", number_of_classes, hidden_layer_size, learning_rate); //Allocate memory for weight matrices on device hipMalloc((void**)&weight_input_hidden, number_of_features * hidden_layer_size * sizeof(float)); checkCUDAErrorWithLine("hipMalloc weight_input_hidden failed!"); hipMalloc((void**)&weight_hidden_output, hidden_layer_size * number_of_classes * sizeof(float)); checkCUDAErrorWithLine("hipMalloc weight_hidden_output failed!"); //Randomnly initialize weights hiprandGenerator_t prng; hiprandCreateGenerator(&prng, HIPRAND_RNG_PSEUDO_DEFAULT); // Set the seed for the random number generator using the system clock //hiprandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) clock()); hiprandSetPseudoRandomGeneratorSeed(prng, 7); // Fill the array with random numbers on the device hiprandGenerateUniform(prng, weight_input_hidden, number_of_features * hidden_layer_size); hiprandGenerateUniform(prng, weight_hidden_output, hidden_layer_size * number_of_classes); dim3 fullBlocksPerGridUpSweep((number_of_features * hidden_layer_size + blockSize - 1) / blockSize); matrix_normalization << <fullBlocksPerGridUpSweep, blockSize >> > (number_of_features * hidden_layer_size, weight_input_hidden, 2, 1); dim3 fullBlocksPerGrid((hidden_layer_size * number_of_classes + blockSize - 1) / blockSize); matrix_normalization << <fullBlocksPerGrid, blockSize >> > (hidden_layer_size * number_of_classes, weight_hidden_output, 2, 1); //Allocate memory for hidden layer and output on device hipMalloc((void**)&hidden, hidden_layer_size * sizeof(float)); checkCUDAErrorWithLine("hipMalloc hidden failed!"); hipMalloc((void**)&output, number_of_classes * sizeof(float)); checkCUDAErrorWithLine("hipMalloc output failed!"); //Allocate memory for output of non-linear functions on device hipMalloc((void**)&hidden_non_linear, hidden_layer_size * sizeof(float)); checkCUDAErrorWithLine("hipMalloc hidden_non_linear failed!"); hipMalloc((void**)&output_non_linear, number_of_classes * sizeof(float)); checkCUDAErrorWithLine("hipMalloc output_non_linear failed!"); //Allocate memory for gradients on device hipMalloc((void**)&weight_input_hidden_gradient, number_of_features * hidden_layer_size * sizeof(float)); checkCUDAErrorWithLine("hipMalloc weight_input_hidden_gradient failed!"); hipMalloc((void**)&weight_hidden_output_gradient, hidden_layer_size * number_of_classes * sizeof(float)); checkCUDAErrorWithLine("hipMalloc weight_hidden_output_gradient failed!"); hipMalloc((void**)&learning_rate, sizeof(float)); checkCUDAErrorWithLine("hipMalloc sum failed!"); thrust::device_ptr<float> lr_ptr(learning_rate); thrust::fill(lr_ptr, lr_ptr+1, lr); //hipMemset(learning_rate, lr, sizeof(float)); } void print_predicted_label(int true_label) { float* predicted_probabilities = (float *)malloc(number_of_classes * sizeof(float)); hipMemcpy(predicted_probabilities, output_non_linear, sizeof(float) * number_of_classes, hipMemcpyDeviceToHost); //print_matrix(predicted_probabilities, number_of_classes, 1); float max = 0; int argmax = -1; for (int i = 0; i < number_of_classes; i++) { if (predicted_probabilities[i] > max) { max = predicted_probabilities[i]; argmax = i+1; } } printf("True label - %d, Predicted label: %d with probability %f\n", true_label, argmax, max); } //Returns training accuracy void train(float* input, float* true_labels, int number_of_epochs) { //Allocate memory for input and copy data on device hipMalloc((void**)&dev_input, number_of_instances * number_of_features * sizeof(float)); checkCUDAErrorWithLine("hipMalloc dev_input failed!"); hipMemcpy(dev_input, input, sizeof(float) * number_of_instances * number_of_features, hipMemcpyHostToDevice); //Allocate memory for true labels and copy data on device hipMalloc((void**)&dev_true_labels, number_of_instances * number_of_classes * sizeof(float)); checkCUDAErrorWithLine("hipMalloc dev_true_labels failed!"); hipMemcpy(dev_true_labels, true_labels, sizeof(float) * number_of_instances * number_of_classes, hipMemcpyHostToDevice); // Create a handle for CUBLAS hipblasHandle_t handle; hipblasCreate(&handle); hipMalloc((void**)&loss_per_epoch, sizeof(float)); checkCUDAErrorWithLine("hipMalloc loss_per_epoch failed!"); hipMalloc((void**)&all_losses, number_of_epochs * sizeof(float)); checkCUDAErrorWithLine("hipMalloc loss_per_epoch failed!"); for (int i = 0; i < number_of_epochs; i++) { thrust::device_ptr<float> loss_ptr(loss_per_epoch); thrust::fill(loss_ptr, loss_ptr + 1, 0); for (int j = 0; j < number_of_instances; j++) { //1. Forward Pass through network forward_pass(handle, dev_input, j); //3. Compute Gradients for all weight matrices compute_gradients(handle, dev_true_labels, dev_input, j); //Update loss for the epoch add << <1,1 >> > (loss_per_epoch, temp_loss); //4. Update weights update_weights(); //forward_pass(handle, dev_input, j); } //Print loss after each epoch float* loss_print = (float *)malloc(sizeof(float)); hipMemcpy(loss_print, loss_per_epoch, sizeof(float), hipMemcpyDeviceToHost); printf("EPOCH %d LOSS: %f \n", i, *loss_print/52); hipMemcpy(all_losses + i, loss_per_epoch, sizeof(float), hipMemcpyDeviceToDevice); } //Print weight matrices //float* output_to_print4 = (float *)malloc(number_of_features * hidden_layer_size * sizeof(float)); //hipMemcpy(output_to_print4, weight_input_hidden, sizeof(float) * number_of_features * hidden_layer_size, hipMemcpyDeviceToHost); //printf("Weights [Input - Hidden]: \n"); ////print_matrix_to_file(output_to_print4, number_of_features, hidden_layer_size, "C:\\Users\\saketk\\Project2-Number-Algorithms\\Project2-Character-Recognition\\data-set\\weights_input_hidden.xlsx"); //float* output_to_print5 = (float *)malloc(hidden_layer_size * number_of_classes * sizeof(float)); //hipMemcpy(output_to_print5, weight_hidden_output, sizeof(float) * hidden_layer_size * number_of_classes, hipMemcpyDeviceToHost); //printf("Weights [Hidden - Output]: \n"); ////print_matrix_to_file(output_to_print5, hidden_layer_size, number_of_classes, "C:\\Users\\saketk\\Project2-Number-Algorithms\\Project2-Character-Recognition\\data-set\\weights_hidden_output.xlsx"); float* all_losses_print = (float *)malloc(number_of_epochs * sizeof(float)); hipMemcpy(all_losses_print, all_losses, sizeof(float) * number_of_epochs, hipMemcpyDeviceToHost); printf("All losses \n"); print_matrix(all_losses_print, number_of_epochs, 1); // Destroy the handle hipblasDestroy(handle); } //Returns test acccuracy void test(float* test_input) { //Allocate memory for input and copy data on device hipMalloc((void**)&dev_test_input, number_of_instances * number_of_features * sizeof(float)); checkCUDAErrorWithLine("hipMalloc dev_test_input failed!"); hipMemcpy(dev_test_input, test_input, sizeof(float) * number_of_instances * number_of_features, hipMemcpyHostToDevice); // Create a handle for CUBLAS hipblasHandle_t handle; hipblasCreate(&handle); for (int j = number_of_instances-1; j >= 0; j--) { //1. Forward Pass through network forward_pass(handle, dev_test_input, j); print_predicted_label(j+1); } } // TODO: __global__ /** * Example of use case (follow how you did it in stream compaction) */ /*void scan(int n, int *odata, const int *idata) { timer().startGpuTimer(); // TODO timer().endGpuTimer(); } */ // TODO: implement required elements for MLP sections 1 and 2 here }
c05f0b0e009d05d25682e616dcbe213478090ac2.cu
#include <cuda.h> #include <cuda_runtime.h> #include <cublas_v2.h> #include <curand.h> #include "common.h" #include "mlp.h" #include "device_launch_parameters.h" #include <thrust/device_vector.h> #include <thrust/fill.h> #include <fstream> #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) #define blockSize 128 int number_of_instances; int number_of_features; int hidden_layer_size; int number_of_classes; float* learning_rate; float *weight_input_hidden; float* weight_hidden_output; float *weight_input_hidden_gradient; float* weight_hidden_output_gradient; float* dev_input; float* dev_true_labels; float* dev_test_input; float* output; float* hidden; float* output_non_linear; float* hidden_non_linear; float* temp_loss; float* loss_per_epoch; float* all_losses; //Print matrix A(nr_rows_A, nr_cols_A) storage in column-major format void print_matrix(const float *A, int nr_rows_A, int nr_cols_A) { for (int i = 0; i < nr_rows_A; ++i) { for (int j = 0; j < nr_cols_A; ++j) { printf("%f ", A[j * nr_rows_A + i]); } printf("\n"); } printf("\n"); } //Print matrix A(nr_rows_A, nr_cols_A) storage in column-major format void print_matrix_to_file(const float *A, int nr_rows_A, int nr_cols_A, std::string filename) { std::ofstream f(filename); if (f.is_open()) { for (int i = 0; i < nr_rows_A; ++i) { for (int j = 0; j < nr_cols_A; ++j) { //printf("%f ", A[j * nr_rows_A + i]); f << A[j * nr_rows_A + i] << " "; } f << "\n"; } f << "\n"; } f.close(); } // REF: https://solarianprogrammer.com/2012/05/31/matrix-multiplication-cuda-cublas-curand-thrust/ // Fill the array A(nr_rows_A, nr_cols_A) with random numbers on GPU void GPU_fill_rand(float *A, int nr_rows_A, int nr_cols_A) { // Create a pseudo-random number generator } // Multiply the arrays A and B on GPU and save the result in C // C(m,n) = A(m,k) * B(k,n) void gpu_blas_mmul(cublasHandle_t &handle, const float *A, const float *B, float *C, const int m, const int k, const int n) { int lda = m, ldb = k, ldc = m; const float alf = 1; const float bet = 0; const float *alpha = &alf; const float *beta = &bet; // Do the actual multiplication cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); } __global__ void sigmoid(int n, float const *input, float *output) { /* Computes the value of the sigmoid function f(x) = 1/(1 + e^-x). Inputs: input: array output: array, the results of the computation are to be stored here */ const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } output[index] = 1.0 / (1.0 + std::exp(-input[index])); } __global__ void exponential(int n, float* input, float* output) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } output[index] = std::exp(input[index]); } __global__ void upSweepOptimized(int n, int d, float* A) { int index = threadIdx.x + (blockIdx.x * blockDim.x); int other_index = 1 << d; int stride = other_index * 2; int new_index = stride * index; if (new_index >= n) { return; } A[new_index + stride - 1] += A[new_index + other_index - 1]; } __global__ void intermediate_calculation(int n, float* temp, float* h) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } temp[index] *= (h[index] * (1 - h[index])); } __global__ void matrix_subtraction(int n, float* A, float* B, float* C) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } C[index] = A[index] - B[index]; } __global__ void multiply_by_constant(int n, float* A, float* x) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } A[index] *= (*x); } void getArraySum(int n, float* input, float* sum) { float* padded_idata; int padded_size = 1 << (ilog2ceil(n)); cudaMalloc((void**)&padded_idata, padded_size * sizeof(float)); checkCUDAErrorWithLine("cudaMalloc padded_idata failed!"); cudaMemset(padded_idata, 0, padded_size * sizeof(float)); cudaMemcpy(padded_idata, input, sizeof(float) * n, cudaMemcpyDeviceToDevice); int iterations = ilog2(padded_size); int number_of_threads = padded_size; for (int d = 0; d < iterations; d++) { number_of_threads /= 2; dim3 fullBlocksPerGridUpSweep((number_of_threads + blockSize - 1) / blockSize); upSweepOptimized << <fullBlocksPerGridUpSweep, blockSize >> > (padded_size, d, padded_idata); } cudaMemcpy(sum, padded_idata + padded_size - 1, sizeof(float), cudaMemcpyDeviceToDevice); cudaFree(padded_idata); } __global__ void normalize(int n, float* input, float* sum) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } /*if(index == 1) printf("Sum: %f \n", *sum);*/ input[index] /= (*sum); } __global__ void cross_entropy_loss(int n, float* true_label, float* predicted, float* temp) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } temp[index] = -1 * (true_label[index] * std::log(predicted[index])); } __global__ void add(float* a, float* b) { *a = (*a) + (*b); } __global__ void matrix_normalization(int n, float* A, float alpha, float beta) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } A[index] = (A[index] * alpha) - beta; } void softmax(int n, float* input, float* softmax_output) { dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); exponential<< <fullBlocksPerGrid, blockSize >> >(n, input, softmax_output); float* sum; cudaMalloc((void**)&sum, sizeof(float)); checkCUDAErrorWithLine("cudaMalloc sum failed!"); getArraySum(n, softmax_output, sum); normalize << <fullBlocksPerGrid, blockSize >> > (n, softmax_output, sum); } namespace CharacterRecognition { using Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } //Performs one complete forward pass. Updates arrays - hidden and output void forward_pass(cublasHandle_t &handle, float* input, int instance_number) { //Compute hidden layer float* current_input; cudaMalloc((void**)&current_input, number_of_features * sizeof(float)); checkCUDAErrorWithLine("cudaMalloc current_input failed!"); cudaMemcpy(current_input, input + (instance_number * number_of_features), sizeof(float) * number_of_features, cudaMemcpyDeviceToDevice); /*float* output_to_print = (float *)malloc(number_of_features * sizeof(float)); cudaMemcpy(output_to_print, current_input, sizeof(float) * number_of_features, cudaMemcpyDeviceToHost); printf("Current Input: \n"); print_matrix(output_to_print, number_of_features, 1);*/ gpu_blas_mmul(handle, current_input, weight_input_hidden, hidden, 1, number_of_features, hidden_layer_size); /*float* output_to_print1 = (float *)malloc(hidden_layer_size * sizeof(float)); cudaMemcpy(output_to_print1, hidden, sizeof(float) * hidden_layer_size, cudaMemcpyDeviceToHost); printf("Hidden layer: \n"); print_matrix(output_to_print1, hidden_layer_size, 1);*/ //Compute sigmoid if hidden layer dim3 fullBlocksPerGrid((hidden_layer_size + blockSize - 1) / blockSize); sigmoid << <fullBlocksPerGrid, blockSize >> > (hidden_layer_size, hidden, hidden_non_linear); /*float* output_to_print2 = (float *)malloc(hidden_layer_size * sizeof(float)); cudaMemcpy(output_to_print2, hidden_non_linear, sizeof(float) * hidden_layer_size, cudaMemcpyDeviceToHost); printf("Hidden layer after sigmoid: \n"); print_matrix(output_to_print2, hidden_layer_size, 1);*/ //Compute output layer gpu_blas_mmul(handle, hidden_non_linear, weight_hidden_output, output, 1, hidden_layer_size, number_of_classes); /*float* output_to_print3 = (float *)malloc(number_of_classes * sizeof(float)); cudaMemcpy(output_to_print3, output, sizeof(float) * number_of_classes, cudaMemcpyDeviceToHost); printf("Output layer: \n"); print_matrix(output_to_print3, number_of_classes, 1);*/ //Compute softmax of output layer softmax(number_of_classes, output, output_non_linear); /*float* output_to_print4 = (float *)malloc(number_of_classes * sizeof(float)); cudaMemcpy(output_to_print4, output_non_linear, sizeof(float) * number_of_classes, cudaMemcpyDeviceToHost); printf("After Softmax: \n"); print_matrix(output_to_print4, number_of_classes, 1);*/ } //Returns the loss computed for the given iteration void compute_loss(float* true_output, float* loss) { float* temp; cudaMalloc((void**)&temp, number_of_classes * sizeof(float)); checkCUDAErrorWithLine("cudaMalloc temp failed!"); dim3 fullBlocksPerGridUpSweep((number_of_classes + blockSize - 1) / blockSize); cross_entropy_loss << <fullBlocksPerGridUpSweep, blockSize >> > (number_of_classes, true_output, output_non_linear, temp); getArraySum(number_of_classes, temp, loss); cudaFree(temp); } //Computes the gradient for the current pass. Updates - weight_input_hidden_gradient and weight_hidden_output_gradient void compute_gradients(cublasHandle_t &handle, float* true_output, float* input, int instance_number) { float* current_input; cudaMalloc((void**)&current_input, number_of_features * sizeof(float)); checkCUDAErrorWithLine("cudaMalloc current_input failed!"); cudaMemcpy(current_input, input + (instance_number * number_of_features), sizeof(float) * number_of_features, cudaMemcpyDeviceToDevice); float* current_output; cudaMalloc((void**)&current_output, number_of_classes * sizeof(float)); checkCUDAErrorWithLine("cudaMalloc current_output failed!"); cudaMemcpy(current_output, true_output + (instance_number * number_of_classes), sizeof(float) * number_of_classes, cudaMemcpyDeviceToDevice); //Compute gradient w.r.t weights between hidden and output layer float* temp; cudaMalloc((void**)&temp, number_of_classes * sizeof(float)); checkCUDAErrorWithLine("cudaMalloc temp failed!"); dim3 fullBlocksPerGridUpSweep((number_of_classes + blockSize - 1) / blockSize); matrix_subtraction << <fullBlocksPerGridUpSweep, blockSize >> > (number_of_classes, output_non_linear, current_output, temp); gpu_blas_mmul(handle, hidden_non_linear, temp, weight_hidden_output_gradient, hidden_layer_size, 1, number_of_classes); //Compute gradient w.r.t. weights between input and hidden layer float* temp1; cudaMalloc((void**)&temp1, hidden_layer_size * sizeof(float)); checkCUDAErrorWithLine("cudaMalloc temp1 failed!"); gpu_blas_mmul(handle, weight_hidden_output, temp, temp1, hidden_layer_size, number_of_classes, 1); dim3 fullBlocksPerGrid((hidden_layer_size + blockSize - 1) / blockSize); intermediate_calculation << <fullBlocksPerGrid, blockSize >> > (hidden_layer_size, temp1, hidden_non_linear); gpu_blas_mmul(handle, current_input, temp1, weight_input_hidden_gradient, number_of_features, 1, hidden_layer_size); //Compute loss cudaMalloc((void**)&temp_loss, sizeof(float)); checkCUDAErrorWithLine("cudaMalloc loss failed!"); compute_loss(current_output, temp_loss); cudaFree(temp); cudaFree(temp1); cudaFree(current_input); cudaFree(current_output); } //Updates the weights according to the learning rate. Updates - weight_input_hidden and weight_hidden_output void update_weights() { dim3 fullBlocksPerGridUpSweep(((hidden_layer_size * number_of_classes) + blockSize - 1) / blockSize); multiply_by_constant << <fullBlocksPerGridUpSweep, blockSize >> > (hidden_layer_size * number_of_classes, weight_hidden_output_gradient, learning_rate); matrix_subtraction << <fullBlocksPerGridUpSweep, blockSize >> > (hidden_layer_size * number_of_classes, weight_hidden_output, weight_hidden_output_gradient, weight_hidden_output); dim3 fullBlocksPerGrid(((number_of_features * hidden_layer_size) + blockSize - 1) / blockSize); multiply_by_constant << <fullBlocksPerGrid, blockSize >> > (number_of_features * hidden_layer_size, weight_input_hidden_gradient, learning_rate); matrix_subtraction << <fullBlocksPerGrid, blockSize >> > (number_of_features * hidden_layer_size, weight_input_hidden, weight_input_hidden_gradient, weight_input_hidden); } //To initialize network parameters like size of hidden and output layers and initialize weight matrices. void initialize_network(int instances, int features, int classes, int hidden_size, float lr) { number_of_instances = instances; number_of_features = features; number_of_classes = classes; hidden_layer_size = hidden_size; printf("%d %d %d \n", number_of_classes, hidden_layer_size, learning_rate); //Allocate memory for weight matrices on device cudaMalloc((void**)&weight_input_hidden, number_of_features * hidden_layer_size * sizeof(float)); checkCUDAErrorWithLine("cudaMalloc weight_input_hidden failed!"); cudaMalloc((void**)&weight_hidden_output, hidden_layer_size * number_of_classes * sizeof(float)); checkCUDAErrorWithLine("cudaMalloc weight_hidden_output failed!"); //Randomnly initialize weights curandGenerator_t prng; curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_DEFAULT); // Set the seed for the random number generator using the system clock //curandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) clock()); curandSetPseudoRandomGeneratorSeed(prng, 7); // Fill the array with random numbers on the device curandGenerateUniform(prng, weight_input_hidden, number_of_features * hidden_layer_size); curandGenerateUniform(prng, weight_hidden_output, hidden_layer_size * number_of_classes); dim3 fullBlocksPerGridUpSweep((number_of_features * hidden_layer_size + blockSize - 1) / blockSize); matrix_normalization << <fullBlocksPerGridUpSweep, blockSize >> > (number_of_features * hidden_layer_size, weight_input_hidden, 2, 1); dim3 fullBlocksPerGrid((hidden_layer_size * number_of_classes + blockSize - 1) / blockSize); matrix_normalization << <fullBlocksPerGrid, blockSize >> > (hidden_layer_size * number_of_classes, weight_hidden_output, 2, 1); //Allocate memory for hidden layer and output on device cudaMalloc((void**)&hidden, hidden_layer_size * sizeof(float)); checkCUDAErrorWithLine("cudaMalloc hidden failed!"); cudaMalloc((void**)&output, number_of_classes * sizeof(float)); checkCUDAErrorWithLine("cudaMalloc output failed!"); //Allocate memory for output of non-linear functions on device cudaMalloc((void**)&hidden_non_linear, hidden_layer_size * sizeof(float)); checkCUDAErrorWithLine("cudaMalloc hidden_non_linear failed!"); cudaMalloc((void**)&output_non_linear, number_of_classes * sizeof(float)); checkCUDAErrorWithLine("cudaMalloc output_non_linear failed!"); //Allocate memory for gradients on device cudaMalloc((void**)&weight_input_hidden_gradient, number_of_features * hidden_layer_size * sizeof(float)); checkCUDAErrorWithLine("cudaMalloc weight_input_hidden_gradient failed!"); cudaMalloc((void**)&weight_hidden_output_gradient, hidden_layer_size * number_of_classes * sizeof(float)); checkCUDAErrorWithLine("cudaMalloc weight_hidden_output_gradient failed!"); cudaMalloc((void**)&learning_rate, sizeof(float)); checkCUDAErrorWithLine("cudaMalloc sum failed!"); thrust::device_ptr<float> lr_ptr(learning_rate); thrust::fill(lr_ptr, lr_ptr+1, lr); //cudaMemset(learning_rate, lr, sizeof(float)); } void print_predicted_label(int true_label) { float* predicted_probabilities = (float *)malloc(number_of_classes * sizeof(float)); cudaMemcpy(predicted_probabilities, output_non_linear, sizeof(float) * number_of_classes, cudaMemcpyDeviceToHost); //print_matrix(predicted_probabilities, number_of_classes, 1); float max = 0; int argmax = -1; for (int i = 0; i < number_of_classes; i++) { if (predicted_probabilities[i] > max) { max = predicted_probabilities[i]; argmax = i+1; } } printf("True label - %d, Predicted label: %d with probability %f\n", true_label, argmax, max); } //Returns training accuracy void train(float* input, float* true_labels, int number_of_epochs) { //Allocate memory for input and copy data on device cudaMalloc((void**)&dev_input, number_of_instances * number_of_features * sizeof(float)); checkCUDAErrorWithLine("cudaMalloc dev_input failed!"); cudaMemcpy(dev_input, input, sizeof(float) * number_of_instances * number_of_features, cudaMemcpyHostToDevice); //Allocate memory for true labels and copy data on device cudaMalloc((void**)&dev_true_labels, number_of_instances * number_of_classes * sizeof(float)); checkCUDAErrorWithLine("cudaMalloc dev_true_labels failed!"); cudaMemcpy(dev_true_labels, true_labels, sizeof(float) * number_of_instances * number_of_classes, cudaMemcpyHostToDevice); // Create a handle for CUBLAS cublasHandle_t handle; cublasCreate(&handle); cudaMalloc((void**)&loss_per_epoch, sizeof(float)); checkCUDAErrorWithLine("cudaMalloc loss_per_epoch failed!"); cudaMalloc((void**)&all_losses, number_of_epochs * sizeof(float)); checkCUDAErrorWithLine("cudaMalloc loss_per_epoch failed!"); for (int i = 0; i < number_of_epochs; i++) { thrust::device_ptr<float> loss_ptr(loss_per_epoch); thrust::fill(loss_ptr, loss_ptr + 1, 0); for (int j = 0; j < number_of_instances; j++) { //1. Forward Pass through network forward_pass(handle, dev_input, j); //3. Compute Gradients for all weight matrices compute_gradients(handle, dev_true_labels, dev_input, j); //Update loss for the epoch add << <1,1 >> > (loss_per_epoch, temp_loss); //4. Update weights update_weights(); //forward_pass(handle, dev_input, j); } //Print loss after each epoch float* loss_print = (float *)malloc(sizeof(float)); cudaMemcpy(loss_print, loss_per_epoch, sizeof(float), cudaMemcpyDeviceToHost); printf("EPOCH %d LOSS: %f \n", i, *loss_print/52); cudaMemcpy(all_losses + i, loss_per_epoch, sizeof(float), cudaMemcpyDeviceToDevice); } //Print weight matrices //float* output_to_print4 = (float *)malloc(number_of_features * hidden_layer_size * sizeof(float)); //cudaMemcpy(output_to_print4, weight_input_hidden, sizeof(float) * number_of_features * hidden_layer_size, cudaMemcpyDeviceToHost); //printf("Weights [Input - Hidden]: \n"); ////print_matrix_to_file(output_to_print4, number_of_features, hidden_layer_size, "C:\\Users\\saketk\\Project2-Number-Algorithms\\Project2-Character-Recognition\\data-set\\weights_input_hidden.xlsx"); //float* output_to_print5 = (float *)malloc(hidden_layer_size * number_of_classes * sizeof(float)); //cudaMemcpy(output_to_print5, weight_hidden_output, sizeof(float) * hidden_layer_size * number_of_classes, cudaMemcpyDeviceToHost); //printf("Weights [Hidden - Output]: \n"); ////print_matrix_to_file(output_to_print5, hidden_layer_size, number_of_classes, "C:\\Users\\saketk\\Project2-Number-Algorithms\\Project2-Character-Recognition\\data-set\\weights_hidden_output.xlsx"); float* all_losses_print = (float *)malloc(number_of_epochs * sizeof(float)); cudaMemcpy(all_losses_print, all_losses, sizeof(float) * number_of_epochs, cudaMemcpyDeviceToHost); printf("All losses \n"); print_matrix(all_losses_print, number_of_epochs, 1); // Destroy the handle cublasDestroy(handle); } //Returns test acccuracy void test(float* test_input) { //Allocate memory for input and copy data on device cudaMalloc((void**)&dev_test_input, number_of_instances * number_of_features * sizeof(float)); checkCUDAErrorWithLine("cudaMalloc dev_test_input failed!"); cudaMemcpy(dev_test_input, test_input, sizeof(float) * number_of_instances * number_of_features, cudaMemcpyHostToDevice); // Create a handle for CUBLAS cublasHandle_t handle; cublasCreate(&handle); for (int j = number_of_instances-1; j >= 0; j--) { //1. Forward Pass through network forward_pass(handle, dev_test_input, j); print_predicted_label(j+1); } } // TODO: __global__ /** * Example of use case (follow how you did it in stream compaction) */ /*void scan(int n, int *odata, const int *idata) { timer().startGpuTimer(); // TODO timer().endGpuTimer(); } */ // TODO: implement required elements for MLP sections 1 and 2 here }