hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
09208855c5cf11975f66bb26a77bcde80a8373fc.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> __global__ void testKernel(int *in, int *out, int size) { bool oddeven=true; __shared__ bool swappedodd; __shared__ bool swappedeven; int temp,i,rem1; swappedodd=true; swappedeven=true; while(true) { if(oddeven==true) { __syncthreads(); swappedodd=false; __syncthreads(); if (threadIdx.y == 0) { int idx=threadIdx.x; if(idx<(size/2)) { if (in[2*idx]>in[2*idx+1]) { // swap(in[],in[2*idx+1]); temp= in[2*idx]; in[2*idx]=in[2*idx+1]; in[2*idx+1]=temp; swappedodd=true; } } } __syncthreads(); } else { __syncthreads(); swappedeven=false; __syncthreads(); if (threadIdx.y == 0) { int idx=threadIdx.x; if(idx<(size/2)-1) { if (in[2*idx+1]>in[2*idx+2]) { // swap(in[2*idx+1],in[2*idx+2]); temp= in[2*idx+1]; in[2*idx+1]=in[2*idx+2]; in[2*idx+2]=temp; swappedeven=true; } } } __syncthreads(); } if(!(swappedodd||swappedeven)) break; oddeven=!oddeven;//switch mode of sorting } __syncthreads(); int idx=threadIdx.x; if ( idx <size ) out[idx]=in[idx]; } int main(void) { int *a,*a_sorted,i; int *d_a,*d_sorted; int n=20; int size = sizeof(int)*n; hipMalloc((void**)&d_a,size); hipMalloc( (void**)&d_sorted, size); a=(int*)malloc(size); a_sorted=(int*)malloc(size); hipMalloc((void**)&d_sorted, size); printf("enter the unsorted numbers\n"); for(i=0;i<n;i++) { scanf("%d",&a[i]); } hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( testKernel), dim3(1),dim3(n), 0, 0, d_a, d_sorted,n); hipMemcpy(a, d_a, size, hipMemcpyDeviceToHost); hipMemcpy(a_sorted, d_sorted, size, hipMemcpyDeviceToHost); for (i=0;i<n;i++) { printf("%d",a_sorted[i]); printf("\t"); } free(a); free(a_sorted); hipFree(d_sorted); hipFree(d_a); }
09208855c5cf11975f66bb26a77bcde80a8373fc.cu
#include <stdio.h> #include <cuda.h> __global__ void testKernel(int *in, int *out, int size) { bool oddeven=true; __shared__ bool swappedodd; __shared__ bool swappedeven; int temp,i,rem1; swappedodd=true; swappedeven=true; while(true) { if(oddeven==true) { __syncthreads(); swappedodd=false; __syncthreads(); if (threadIdx.y == 0) { int idx=threadIdx.x; if(idx<(size/2)) { if (in[2*idx]>in[2*idx+1]) { // swap(in[],in[2*idx+1]); temp= in[2*idx]; in[2*idx]=in[2*idx+1]; in[2*idx+1]=temp; swappedodd=true; } } } __syncthreads(); } else { __syncthreads(); swappedeven=false; __syncthreads(); if (threadIdx.y == 0) { int idx=threadIdx.x; if(idx<(size/2)-1) { if (in[2*idx+1]>in[2*idx+2]) { // swap(in[2*idx+1],in[2*idx+2]); temp= in[2*idx+1]; in[2*idx+1]=in[2*idx+2]; in[2*idx+2]=temp; swappedeven=true; } } } __syncthreads(); } if(!(swappedodd||swappedeven)) break; oddeven=!oddeven;//switch mode of sorting } __syncthreads(); int idx=threadIdx.x; if ( idx <size ) out[idx]=in[idx]; } int main(void) { int *a,*a_sorted,i; int *d_a,*d_sorted; int n=20; int size = sizeof(int)*n; cudaMalloc((void**)&d_a,size); cudaMalloc( (void**)&d_sorted, size); a=(int*)malloc(size); a_sorted=(int*)malloc(size); cudaMalloc((void**)&d_sorted, size); printf("enter the unsorted numbers\n"); for(i=0;i<n;i++) { scanf("%d",&a[i]); } cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); testKernel<<<1,n>>>(d_a, d_sorted,n); cudaMemcpy(a, d_a, size, cudaMemcpyDeviceToHost); cudaMemcpy(a_sorted, d_sorted, size, cudaMemcpyDeviceToHost); for (i=0;i<n;i++) { printf("%d",a_sorted[i]); printf("\t"); } free(a); free(a_sorted); cudaFree(d_sorted); cudaFree(d_a); }
62a01b044f47e022504fa1433876d4fe8d2a8e0e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <rtabmap/core/odometry/cutil_math.h> #include <rtabmap/core/odometry/cutil_math2.h> #include <rtabmap/core/odometry/cov.h> __device__ float3 d_mean(const float3* pts, const int n){ float3 m = make_float3(0,0,0); for (int i = 0; i < n; ++i) { m += pts[i]; } m /= (n+0.0f); return m; } __device__ void d_cov(const float3* pts, float* C, const int n){ const float3 m = d_mean(pts,n); for (int i = 0; i < n; ++i) { float3 diff = pts[i]-m; cuda::outerAdd(diff,C); } float fac=1.0f/(n-1.0f); for (int i = 0; i < 9; ++i) { C[i] *=fac; } } __global__ void mean(const float* pts, float* m, const int n){ const float3 *pts1= (float3*) pts; //float3 *m3 = (float3*) m; float3 mm = d_mean(pts1,n); m[0]=mm.x; m[1]=mm.y; m[2]=mm.z; } // float C[9]={0,0,0, 0,0,0, 0,0,0}; __global__ void cov(const float* pts, float* C, const int n){ const float3 *pts1= (float3*) pts; d_cov(pts1,C,n); }
62a01b044f47e022504fa1433876d4fe8d2a8e0e.cu
#include <math.h> #include <rtabmap/core/odometry/cutil_math.h> #include <rtabmap/core/odometry/cutil_math2.h> #include <rtabmap/core/odometry/cov.h> __device__ float3 d_mean(const float3* pts, const int n){ float3 m = make_float3(0,0,0); for (int i = 0; i < n; ++i) { m += pts[i]; } m /= (n+0.0f); return m; } __device__ void d_cov(const float3* pts, float* C, const int n){ const float3 m = d_mean(pts,n); for (int i = 0; i < n; ++i) { float3 diff = pts[i]-m; cuda::outerAdd(diff,C); } float fac=1.0f/(n-1.0f); for (int i = 0; i < 9; ++i) { C[i] *=fac; } } __global__ void mean(const float* pts, float* m, const int n){ const float3 *pts1= (float3*) pts; //float3 *m3 = (float3*) m; float3 mm = d_mean(pts1,n); m[0]=mm.x; m[1]=mm.y; m[2]=mm.z; } // float C[9]={0,0,0, 0,0,0, 0,0,0}; __global__ void cov(const float* pts, float* C, const int n){ const float3 *pts1= (float3*) pts; d_cov(pts1,C,n); }
731eb5e6b0e596aa71013f4b6b625ba74b6cc062.hip
// !!! This is a file automatically generated by hipify!!! /* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // PFOR and PFOR-DELTA Compression and decompression routines #include <stdio.h> #include <fstream> #include <iomanip> #include <exception> #include <thrust/device_vector.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/extrema.h> #include "cm.h" using namespace std; unsigned long long int* raw_decomp = NULL; unsigned int raw_decomp_length = 0; std::map<string, unsigned int> cnt_counts; string curr_file; struct bool_to_int { __host__ __device__ unsigned int operator()(const bool x) { return (unsigned int)x; } }; struct ui_to_ll { __host__ __device__ long long int operator()(const unsigned int x) { return (long long int)x; } }; struct compress_functor_int { const int_type * source; unsigned long long int * dest; const long long int * start_val; const unsigned int * vals; compress_functor_int(const int_type * _source, unsigned long long int * _dest, const long long int * _start_val, const unsigned int * _vals): source(_source), dest(_dest), start_val(_start_val), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { long long int val = source[i] - start_val[0]; unsigned int shifted = vals[2] - vals[0] - (i%vals[1])*vals[0]; dest[i] = val << shifted; } }; struct compress_functor_float { const long long int * source; unsigned long long int * dest; const long long int * start_val; const unsigned int * vals; compress_functor_float(const long long int * _source, unsigned long long int * _dest, const long long int * _start_val, const unsigned int * _vals): source(_source), dest(_dest), start_val(_start_val), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { long long int val; unsigned int bits = vals[0]; unsigned int fit_count = vals[1]; unsigned int int_sz = vals[2]; val = source[i] - start_val[0]; unsigned int z = i%fit_count; unsigned int shifted = int_sz - bits - z*bits; dest[i] = val << shifted; } }; struct decompress_functor_int { const unsigned long long int * source; int_type * dest; const long long int * start_val; const unsigned int * vals; decompress_functor_int(const unsigned long long int * _source, int_type * _dest, const long long int * _start_val, const unsigned int * _vals): source(_source), dest(_dest), start_val(_start_val), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned int bits = vals[0]; unsigned int fit_count = vals[1]; unsigned int int_sz = vals[2]; //find the source index unsigned int src_idx = i/fit_count; // find the exact location unsigned int src_loc = i%fit_count; //right shift the values unsigned int shifted = int_sz - bits - src_loc*bits; unsigned long long int tmp = source[src_idx] >> shifted; // set the rest of bits to 0 tmp = tmp << (int_sz - bits); tmp = tmp >> (int_sz - bits); dest[i] = tmp + start_val[0]; } }; struct decompress_functor_float { const unsigned long long int * source; long long int * dest; const long long int * start_val; const unsigned int * vals; decompress_functor_float(const unsigned long long int * _source, long long int * _dest, const long long int * _start_val, const unsigned int * _vals): source(_source), dest(_dest), start_val(_start_val), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned int bits = vals[0]; unsigned int fit_count = vals[1]; unsigned int int_sz = vals[2]; //find the source index unsigned int src_idx = i/fit_count; // find the exact location unsigned int src_loc = i%fit_count; //right shift the values unsigned int shifted = int_sz - bits - src_loc*bits; unsigned long long int tmp = source[src_idx] >> shifted; // set the rest of bits to 0 tmp = tmp << (int_sz - bits); tmp = tmp >> (int_sz - bits); dest[i] = tmp + start_val[0]; } }; struct decompress_functor_str { const unsigned long long * source; unsigned int * dest; const unsigned int * vals; decompress_functor_str(const unsigned long long int * _source, unsigned int * _dest, const unsigned int * _vals): source(_source), dest(_dest), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned int bits = vals[0]; unsigned int fit_count = vals[1]; unsigned int int_sz = 64; //find the source index unsigned int src_idx = i/fit_count; // find the exact location unsigned int src_loc = i%fit_count; //right shift the values unsigned int shifted = ((fit_count-src_loc)-1)*bits; unsigned long long int tmp = source[src_idx] >> shifted; // set the rest of bits to 0 tmp = tmp << (int_sz - bits); tmp = tmp >> (int_sz - bits); dest[i] = tmp; } }; unsigned int pfor_decompress(void* destination, void* host, void* d_v, void* s_v) { unsigned int bits, cnt, fit_count, orig_recCount; long long int orig_lower_val; unsigned int bit_count = 64; unsigned int comp_type; long long int start_val; cnt = ((unsigned int*)host)[0]; orig_recCount = ((unsigned int*)host + cnt*2)[7]; bits = ((unsigned int*)host + cnt*2)[8]; orig_lower_val = ((long long int*)((unsigned int*)host + cnt*2 + 9))[0]; fit_count = ((unsigned int*)host + cnt*2)[11]; start_val = ((long long int*)((unsigned int*)host + cnt*2 + 12))[0]; comp_type = ((unsigned int*)host + cnt*2)[14]; //*mRecCount = orig_recCount; //cout << "Decomp Header " << orig_recCount << " " << bits << " " << orig_lower_val << " " << cnt << " " << fit_count << " " << comp_type << endl; if(raw_decomp_length < cnt*8) { if(raw_decomp != NULL) { hipFree(raw_decomp); }; hipMalloc((void **) &raw_decomp, cnt*8); raw_decomp_length = cnt*8; }; hipMemcpy( (void*)raw_decomp, (void*)((unsigned int*)host + 5), cnt*8, hipMemcpyHostToDevice); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v); thrust::device_ptr<long long int> dd_sv((long long int*)s_v); dd_sv[0] = orig_lower_val; dd_v[0] = bits; dd_v[1] = fit_count; dd_v[2] = bit_count; thrust::counting_iterator<unsigned int> begin(0); decompress_functor_int ff1(raw_decomp,(int_type*)destination, (long long int*)s_v, (unsigned int*)d_v); thrust::for_each(begin, begin + orig_recCount, ff1); if(comp_type == 1) { thrust::device_ptr<int_type> d_int((int_type*)destination); d_int[0] = start_val; thrust::inclusive_scan(d_int, d_int + orig_recCount, d_int); }; return orig_recCount; } template< typename T> unsigned long long int pfor_delta_compress(void* source, unsigned int source_len, char* file_name, thrust::host_vector<T, pinned_allocator<T> >& host, bool tp, unsigned long long int sz) { long long int orig_lower_val, orig_upper_val, start_val, real_lower, real_upper; unsigned int bits, recCount; unsigned int bit_count = 8*8; unsigned int fit_count; unsigned int comp_type = 1; // FOR-DELTA if(tp == 0) recCount = source_len/int_size; else recCount = source_len/float_size; void* ss; CUDA_SAFE_CALL(hipMalloc((void **) &ss, recCount*float_size)); if (tp == 0) { thrust::device_ptr<int_type> s((int_type*)source); thrust::device_ptr<int_type> d_ss((int_type*)ss); thrust::adjacent_difference(s, s+recCount, d_ss); start_val = d_ss[0]; if(recCount > 1) d_ss[0] = d_ss[1]; orig_lower_val = *(thrust::min_element(d_ss, d_ss + recCount)); orig_upper_val = *(thrust::max_element(d_ss, d_ss + recCount)); real_lower = s[0]; real_upper = s[recCount-1]; //cout << "orig " << orig_upper_val << " " << orig_lower_val << endl; //cout << "We need for delta " << (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))) << " bits to encode " << orig_upper_val-orig_lower_val << " values " << endl; bits = (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))); if (bits == 0) bits = 1; } else { thrust::device_ptr<long long int> s((long long int*)source); thrust::device_ptr<long long int> d_ss((long long int*)ss); thrust::adjacent_difference(s, s+recCount, d_ss); start_val = d_ss[0]; if(recCount > 1) d_ss[0] = d_ss[1]; orig_lower_val = *(thrust::min_element(d_ss, d_ss + recCount)); orig_upper_val = *(thrust::max_element(d_ss, d_ss + recCount)); real_lower = s[0]; real_upper = s[recCount-1]; //cout << "orig " << orig_upper_val << " " << orig_lower_val << endl; //cout << "We need for delta " << (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))) << " bits to encode " << orig_upper_val-orig_lower_val << " values" << endl; bits = (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))); if (bits == 0) bits = 1; }; thrust::counting_iterator<unsigned int> begin(0); fit_count = bit_count/bits; void* d_v1; CUDA_SAFE_CALL(hipMalloc((void **) &d_v1, 12)); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v1); void* s_v1; CUDA_SAFE_CALL(hipMalloc((void **) &s_v1, 8)); thrust::device_ptr<long long int> dd_sv((long long int*)s_v1); dd_sv[0] = orig_lower_val; dd_v[0] = bits; dd_v[1] = fit_count; dd_v[2] = bit_count; //void* d; //CUDA_SAFE_CALL(hipMalloc((void **) &d, recCount*float_size)); thrust::device_ptr<char> dd((char*)source); thrust::fill(dd, dd+source_len,0); //cout << "FF " << orig_lower_val << " " << bits << " " << fit_count << " " << bit_count << endl; if (tp == 0) { compress_functor_int ff((int_type*)ss,(unsigned long long int*)source, (long long int*)s_v1, (unsigned int*)d_v1); thrust::for_each(begin, begin + recCount, ff); } else { compress_functor_float ff((long long int*)ss,(unsigned long long int*)source, (long long int*)s_v1, (unsigned int*)d_v1); thrust::for_each(begin, begin + recCount, ff); }; thrust::device_ptr<unsigned long long int> s_copy1((unsigned long long int*)source); // make an addition sequence thrust::device_ptr<unsigned long long int> add_seq((unsigned long long int*)ss); thrust::constant_iterator<unsigned long long int> iter(fit_count); thrust::sequence(add_seq, add_seq + recCount, 0, 1); thrust::transform(add_seq, add_seq + recCount, iter, add_seq, thrust::divides<unsigned long long int>()); unsigned int cnt = (recCount)/fit_count; if (recCount%fit_count > 0) cnt++; thrust::device_ptr<unsigned long long int> fin_seq = thrust::device_malloc<unsigned long long int>(cnt); thrust::reduce_by_key(add_seq, add_seq+recCount,s_copy1,thrust::make_discard_iterator(), fin_seq); //for(int i = 0; i < 10;i++) // cout << "FIN " << fin_seq[i] << endl; // copy fin_seq to host unsigned long long int * raw_src = thrust::raw_pointer_cast(fin_seq); if(file_name) { hipMemcpy( host.data(), (void *)raw_src, cnt*8, hipMemcpyDeviceToHost); fstream binary_file(file_name,ios::out|ios::binary|ios::app); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&real_lower, 8); binary_file.write((char *)&real_upper, 8); binary_file.write((char *)host.data(),cnt*8); binary_file.write((char *)&comp_type, 4); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&recCount, 4); binary_file.write((char *)&bits, 4); binary_file.write((char *)&orig_lower_val, 8); binary_file.write((char *)&fit_count, 4); binary_file.write((char *)&start_val, 8); binary_file.write((char *)&comp_type, 4); binary_file.write((char *)&comp_type, 4); //filler binary_file.close(); if(cnt_counts[curr_file] < cnt) cnt_counts[curr_file] = cnt; } else { char* hh; //resize_compressed(host, sz, cnt*8 + 15*4, 0); host.resize(sz+cnt+8); hh = (char*)(host.data() + sz); ((unsigned int*)hh)[0] = cnt; ((long long int*)(hh+4))[0] = real_lower; ((long long int*)(hh+12))[0] = real_upper; hipMemcpy( hh + 20, (void *)raw_src, cnt*8, hipMemcpyDeviceToHost); ((unsigned int*)hh)[5+cnt*2] = comp_type; ((unsigned int*)hh)[6+cnt*2] = cnt; ((unsigned int*)hh)[7+cnt*2] = recCount; ((unsigned int*)hh)[8+cnt*2] = bits; ((long long int*)((char*)hh+36+cnt*8))[0] = orig_lower_val; ((unsigned int*)hh)[11+cnt*2] = fit_count; ((long long int*)((char*)hh+48+cnt*8))[0] = start_val; ((unsigned int*)hh)[14+cnt*2] = comp_type; }; thrust::device_free(fin_seq); hipFree(ss); hipFree(d_v1); hipFree(s_v1); return sz + cnt + 8; } template< typename T> unsigned long long int pfor_compress(void* source, unsigned int source_len, char* file_name, thrust::host_vector<T, pinned_allocator<T> >& host, bool tp, unsigned long long int sz) { unsigned int recCount; long long int orig_lower_val; long long int orig_upper_val; unsigned int bits; unsigned int bit_count = 8*8; unsigned int fit_count; unsigned int comp_type = 0; // FOR long long int start_val = 0; bool sorted = 0; // check if sorted if (tp == 0) { recCount = source_len/int_size; thrust::device_ptr<int_type> s((int_type*)source); sorted = thrust::is_sorted(s, s+recCount-1); } else { recCount = source_len/float_size; thrust::device_ptr<long long int> s((long long int*)source); sorted = thrust::is_sorted(s, s+recCount); }; //cout << "file " << file_name << " is sorted " << sorted << endl; if(sorted) return pfor_delta_compress(source, source_len, file_name, host, tp, sz); // sort the sequence if (tp == 0) { thrust::device_ptr<int_type> s((int_type*)source); orig_lower_val = *(thrust::min_element(s, s + recCount)); orig_upper_val = *(thrust::max_element(s, s + recCount)); //cout << "We need " << (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))) << " bits to encode original range of " << orig_lower_val << " to " << orig_upper_val << endl; bits = (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))); } else { thrust::device_ptr<long long int> s((long long int*)source); orig_lower_val = *(thrust::min_element(s, s + recCount)); orig_upper_val = *(thrust::max_element(s, s + recCount)); //cout << "We need " << (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))) << " bits to encode original range of " << orig_lower_val << " to " << orig_upper_val << endl; bits = (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))); }; thrust::counting_iterator<unsigned int> begin(0); fit_count = bit_count/bits; void* d_v1; CUDA_SAFE_CALL(hipMalloc((void **) &d_v1, 12)); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v1); void* s_v1; CUDA_SAFE_CALL(hipMalloc((void **) &s_v1, 8)); thrust::device_ptr<long long int> dd_sv((long long int*)s_v1); dd_sv[0] = orig_lower_val; dd_v[0] = bits; dd_v[1] = fit_count; dd_v[2] = bit_count; void* d; CUDA_SAFE_CALL(hipMalloc((void **) &d, recCount*float_size)); thrust::device_ptr<char> dd((char*)d); thrust::fill(dd, dd+source_len,0); if (tp == 0) { compress_functor_int ff((int_type*)source,(unsigned long long int*)d, (long long int*)s_v1, (unsigned int*)d_v1); thrust::for_each(begin, begin + recCount, ff); } else { compress_functor_float ff((long long int*)source,(unsigned long long int*)d, (long long int*)s_v1, (unsigned int*)d_v1); thrust::for_each(begin, begin + recCount, ff); }; thrust::device_ptr<unsigned long long int> s_copy1((unsigned long long int*)d); // make an addition sequence thrust::device_ptr<unsigned int> add_seq = thrust::device_malloc<unsigned int>(recCount); thrust::constant_iterator<unsigned int> iter(fit_count); thrust::sequence(add_seq, add_seq + recCount, 0, 1); thrust::transform(add_seq, add_seq + recCount, iter, add_seq, thrust::divides<unsigned int>()); unsigned int cnt = (recCount)/fit_count; if(cnt == 0) cnt = 1; // need at least 1 if (recCount%fit_count > 0) cnt++; //thrust::device_ptr<unsigned long long int> fin_seq = thrust::device_malloc<unsigned long long int>(cnt); thrust::device_ptr<unsigned long long int> fin_seq((unsigned long long int*)source); thrust::reduce_by_key(add_seq, add_seq+recCount,s_copy1,thrust::make_discard_iterator(), fin_seq); // copy fin_seq to host unsigned long long int * raw_src = thrust::raw_pointer_cast(fin_seq); //cout << file_name << " CNT " << cnt << endl; if(file_name) { hipMemcpy( host.data(), (void *)raw_src, cnt*8, hipMemcpyDeviceToHost); fstream binary_file(file_name,ios::out|ios::binary|ios::app); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&orig_lower_val, 8); binary_file.write((char *)&orig_upper_val, 8); binary_file.write((char *)host.data(),cnt*8); binary_file.write((char *)&comp_type, 4); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&recCount, 4); binary_file.write((char *)&bits, 4); binary_file.write((char *)&orig_lower_val, 8); binary_file.write((char *)&fit_count, 4); binary_file.write((char *)&start_val, 8); binary_file.write((char *)&comp_type, 4); binary_file.write((char *)&comp_type, 4); //filler binary_file.close(); if(cnt_counts[curr_file] < cnt) cnt_counts[curr_file] = cnt; } else { char* hh; // resize host to sz + cnt*8 + 15 host.resize(sz+cnt+8); hh = (char*)(host.data() + sz); ((unsigned int*)hh)[0] = cnt; ((long long int*)(hh+4))[0] = orig_lower_val; ((long long int*)(hh+12))[0] = orig_upper_val; hipMemcpy( hh + 20, (void *)raw_src, cnt*8, hipMemcpyDeviceToHost); ((unsigned int*)hh)[5+cnt*2] = comp_type; ((unsigned int*)hh)[6+cnt*2] = cnt; ((unsigned int*)hh)[7+cnt*2] = recCount; ((unsigned int*)hh)[8+cnt*2] = bits; ((long long int*)(hh+36+cnt*8))[0] = orig_lower_val; ((unsigned int*)hh)[11+cnt*2] = fit_count; ((long long int*)(hh+48+cnt*8))[0] = start_val; ((unsigned int*)hh)[14+cnt*2] = comp_type; }; thrust::device_free(add_seq); hipFree(d); hipFree(d_v1); hipFree(s_v1); return sz + cnt + 8; }
731eb5e6b0e596aa71013f4b6b625ba74b6cc062.cu
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // PFOR and PFOR-DELTA Compression and decompression routines #include <stdio.h> #include <fstream> #include <iomanip> #include <exception> #include <thrust/device_vector.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/extrema.h> #include "cm.h" using namespace std; unsigned long long int* raw_decomp = NULL; unsigned int raw_decomp_length = 0; std::map<string, unsigned int> cnt_counts; string curr_file; struct bool_to_int { __host__ __device__ unsigned int operator()(const bool x) { return (unsigned int)x; } }; struct ui_to_ll { __host__ __device__ long long int operator()(const unsigned int x) { return (long long int)x; } }; struct compress_functor_int { const int_type * source; unsigned long long int * dest; const long long int * start_val; const unsigned int * vals; compress_functor_int(const int_type * _source, unsigned long long int * _dest, const long long int * _start_val, const unsigned int * _vals): source(_source), dest(_dest), start_val(_start_val), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { long long int val = source[i] - start_val[0]; unsigned int shifted = vals[2] - vals[0] - (i%vals[1])*vals[0]; dest[i] = val << shifted; } }; struct compress_functor_float { const long long int * source; unsigned long long int * dest; const long long int * start_val; const unsigned int * vals; compress_functor_float(const long long int * _source, unsigned long long int * _dest, const long long int * _start_val, const unsigned int * _vals): source(_source), dest(_dest), start_val(_start_val), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { long long int val; unsigned int bits = vals[0]; unsigned int fit_count = vals[1]; unsigned int int_sz = vals[2]; val = source[i] - start_val[0]; unsigned int z = i%fit_count; unsigned int shifted = int_sz - bits - z*bits; dest[i] = val << shifted; } }; struct decompress_functor_int { const unsigned long long int * source; int_type * dest; const long long int * start_val; const unsigned int * vals; decompress_functor_int(const unsigned long long int * _source, int_type * _dest, const long long int * _start_val, const unsigned int * _vals): source(_source), dest(_dest), start_val(_start_val), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned int bits = vals[0]; unsigned int fit_count = vals[1]; unsigned int int_sz = vals[2]; //find the source index unsigned int src_idx = i/fit_count; // find the exact location unsigned int src_loc = i%fit_count; //right shift the values unsigned int shifted = int_sz - bits - src_loc*bits; unsigned long long int tmp = source[src_idx] >> shifted; // set the rest of bits to 0 tmp = tmp << (int_sz - bits); tmp = tmp >> (int_sz - bits); dest[i] = tmp + start_val[0]; } }; struct decompress_functor_float { const unsigned long long int * source; long long int * dest; const long long int * start_val; const unsigned int * vals; decompress_functor_float(const unsigned long long int * _source, long long int * _dest, const long long int * _start_val, const unsigned int * _vals): source(_source), dest(_dest), start_val(_start_val), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned int bits = vals[0]; unsigned int fit_count = vals[1]; unsigned int int_sz = vals[2]; //find the source index unsigned int src_idx = i/fit_count; // find the exact location unsigned int src_loc = i%fit_count; //right shift the values unsigned int shifted = int_sz - bits - src_loc*bits; unsigned long long int tmp = source[src_idx] >> shifted; // set the rest of bits to 0 tmp = tmp << (int_sz - bits); tmp = tmp >> (int_sz - bits); dest[i] = tmp + start_val[0]; } }; struct decompress_functor_str { const unsigned long long * source; unsigned int * dest; const unsigned int * vals; decompress_functor_str(const unsigned long long int * _source, unsigned int * _dest, const unsigned int * _vals): source(_source), dest(_dest), vals(_vals) {} template <typename IndexType> __host__ __device__ void operator()(const IndexType & i) { unsigned int bits = vals[0]; unsigned int fit_count = vals[1]; unsigned int int_sz = 64; //find the source index unsigned int src_idx = i/fit_count; // find the exact location unsigned int src_loc = i%fit_count; //right shift the values unsigned int shifted = ((fit_count-src_loc)-1)*bits; unsigned long long int tmp = source[src_idx] >> shifted; // set the rest of bits to 0 tmp = tmp << (int_sz - bits); tmp = tmp >> (int_sz - bits); dest[i] = tmp; } }; unsigned int pfor_decompress(void* destination, void* host, void* d_v, void* s_v) { unsigned int bits, cnt, fit_count, orig_recCount; long long int orig_lower_val; unsigned int bit_count = 64; unsigned int comp_type; long long int start_val; cnt = ((unsigned int*)host)[0]; orig_recCount = ((unsigned int*)host + cnt*2)[7]; bits = ((unsigned int*)host + cnt*2)[8]; orig_lower_val = ((long long int*)((unsigned int*)host + cnt*2 + 9))[0]; fit_count = ((unsigned int*)host + cnt*2)[11]; start_val = ((long long int*)((unsigned int*)host + cnt*2 + 12))[0]; comp_type = ((unsigned int*)host + cnt*2)[14]; //*mRecCount = orig_recCount; //cout << "Decomp Header " << orig_recCount << " " << bits << " " << orig_lower_val << " " << cnt << " " << fit_count << " " << comp_type << endl; if(raw_decomp_length < cnt*8) { if(raw_decomp != NULL) { cudaFree(raw_decomp); }; cudaMalloc((void **) &raw_decomp, cnt*8); raw_decomp_length = cnt*8; }; cudaMemcpy( (void*)raw_decomp, (void*)((unsigned int*)host + 5), cnt*8, cudaMemcpyHostToDevice); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v); thrust::device_ptr<long long int> dd_sv((long long int*)s_v); dd_sv[0] = orig_lower_val; dd_v[0] = bits; dd_v[1] = fit_count; dd_v[2] = bit_count; thrust::counting_iterator<unsigned int> begin(0); decompress_functor_int ff1(raw_decomp,(int_type*)destination, (long long int*)s_v, (unsigned int*)d_v); thrust::for_each(begin, begin + orig_recCount, ff1); if(comp_type == 1) { thrust::device_ptr<int_type> d_int((int_type*)destination); d_int[0] = start_val; thrust::inclusive_scan(d_int, d_int + orig_recCount, d_int); }; return orig_recCount; } template< typename T> unsigned long long int pfor_delta_compress(void* source, unsigned int source_len, char* file_name, thrust::host_vector<T, pinned_allocator<T> >& host, bool tp, unsigned long long int sz) { long long int orig_lower_val, orig_upper_val, start_val, real_lower, real_upper; unsigned int bits, recCount; unsigned int bit_count = 8*8; unsigned int fit_count; unsigned int comp_type = 1; // FOR-DELTA if(tp == 0) recCount = source_len/int_size; else recCount = source_len/float_size; void* ss; CUDA_SAFE_CALL(cudaMalloc((void **) &ss, recCount*float_size)); if (tp == 0) { thrust::device_ptr<int_type> s((int_type*)source); thrust::device_ptr<int_type> d_ss((int_type*)ss); thrust::adjacent_difference(s, s+recCount, d_ss); start_val = d_ss[0]; if(recCount > 1) d_ss[0] = d_ss[1]; orig_lower_val = *(thrust::min_element(d_ss, d_ss + recCount)); orig_upper_val = *(thrust::max_element(d_ss, d_ss + recCount)); real_lower = s[0]; real_upper = s[recCount-1]; //cout << "orig " << orig_upper_val << " " << orig_lower_val << endl; //cout << "We need for delta " << (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))) << " bits to encode " << orig_upper_val-orig_lower_val << " values " << endl; bits = (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))); if (bits == 0) bits = 1; } else { thrust::device_ptr<long long int> s((long long int*)source); thrust::device_ptr<long long int> d_ss((long long int*)ss); thrust::adjacent_difference(s, s+recCount, d_ss); start_val = d_ss[0]; if(recCount > 1) d_ss[0] = d_ss[1]; orig_lower_val = *(thrust::min_element(d_ss, d_ss + recCount)); orig_upper_val = *(thrust::max_element(d_ss, d_ss + recCount)); real_lower = s[0]; real_upper = s[recCount-1]; //cout << "orig " << orig_upper_val << " " << orig_lower_val << endl; //cout << "We need for delta " << (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))) << " bits to encode " << orig_upper_val-orig_lower_val << " values" << endl; bits = (unsigned int)ceil(log2((double)((orig_upper_val-orig_lower_val)+1))); if (bits == 0) bits = 1; }; thrust::counting_iterator<unsigned int> begin(0); fit_count = bit_count/bits; void* d_v1; CUDA_SAFE_CALL(cudaMalloc((void **) &d_v1, 12)); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v1); void* s_v1; CUDA_SAFE_CALL(cudaMalloc((void **) &s_v1, 8)); thrust::device_ptr<long long int> dd_sv((long long int*)s_v1); dd_sv[0] = orig_lower_val; dd_v[0] = bits; dd_v[1] = fit_count; dd_v[2] = bit_count; //void* d; //CUDA_SAFE_CALL(cudaMalloc((void **) &d, recCount*float_size)); thrust::device_ptr<char> dd((char*)source); thrust::fill(dd, dd+source_len,0); //cout << "FF " << orig_lower_val << " " << bits << " " << fit_count << " " << bit_count << endl; if (tp == 0) { compress_functor_int ff((int_type*)ss,(unsigned long long int*)source, (long long int*)s_v1, (unsigned int*)d_v1); thrust::for_each(begin, begin + recCount, ff); } else { compress_functor_float ff((long long int*)ss,(unsigned long long int*)source, (long long int*)s_v1, (unsigned int*)d_v1); thrust::for_each(begin, begin + recCount, ff); }; thrust::device_ptr<unsigned long long int> s_copy1((unsigned long long int*)source); // make an addition sequence thrust::device_ptr<unsigned long long int> add_seq((unsigned long long int*)ss); thrust::constant_iterator<unsigned long long int> iter(fit_count); thrust::sequence(add_seq, add_seq + recCount, 0, 1); thrust::transform(add_seq, add_seq + recCount, iter, add_seq, thrust::divides<unsigned long long int>()); unsigned int cnt = (recCount)/fit_count; if (recCount%fit_count > 0) cnt++; thrust::device_ptr<unsigned long long int> fin_seq = thrust::device_malloc<unsigned long long int>(cnt); thrust::reduce_by_key(add_seq, add_seq+recCount,s_copy1,thrust::make_discard_iterator(), fin_seq); //for(int i = 0; i < 10;i++) // cout << "FIN " << fin_seq[i] << endl; // copy fin_seq to host unsigned long long int * raw_src = thrust::raw_pointer_cast(fin_seq); if(file_name) { cudaMemcpy( host.data(), (void *)raw_src, cnt*8, cudaMemcpyDeviceToHost); fstream binary_file(file_name,ios::out|ios::binary|ios::app); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&real_lower, 8); binary_file.write((char *)&real_upper, 8); binary_file.write((char *)host.data(),cnt*8); binary_file.write((char *)&comp_type, 4); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&recCount, 4); binary_file.write((char *)&bits, 4); binary_file.write((char *)&orig_lower_val, 8); binary_file.write((char *)&fit_count, 4); binary_file.write((char *)&start_val, 8); binary_file.write((char *)&comp_type, 4); binary_file.write((char *)&comp_type, 4); //filler binary_file.close(); if(cnt_counts[curr_file] < cnt) cnt_counts[curr_file] = cnt; } else { char* hh; //resize_compressed(host, sz, cnt*8 + 15*4, 0); host.resize(sz+cnt+8); hh = (char*)(host.data() + sz); ((unsigned int*)hh)[0] = cnt; ((long long int*)(hh+4))[0] = real_lower; ((long long int*)(hh+12))[0] = real_upper; cudaMemcpy( hh + 20, (void *)raw_src, cnt*8, cudaMemcpyDeviceToHost); ((unsigned int*)hh)[5+cnt*2] = comp_type; ((unsigned int*)hh)[6+cnt*2] = cnt; ((unsigned int*)hh)[7+cnt*2] = recCount; ((unsigned int*)hh)[8+cnt*2] = bits; ((long long int*)((char*)hh+36+cnt*8))[0] = orig_lower_val; ((unsigned int*)hh)[11+cnt*2] = fit_count; ((long long int*)((char*)hh+48+cnt*8))[0] = start_val; ((unsigned int*)hh)[14+cnt*2] = comp_type; }; thrust::device_free(fin_seq); cudaFree(ss); cudaFree(d_v1); cudaFree(s_v1); return sz + cnt + 8; } template< typename T> unsigned long long int pfor_compress(void* source, unsigned int source_len, char* file_name, thrust::host_vector<T, pinned_allocator<T> >& host, bool tp, unsigned long long int sz) { unsigned int recCount; long long int orig_lower_val; long long int orig_upper_val; unsigned int bits; unsigned int bit_count = 8*8; unsigned int fit_count; unsigned int comp_type = 0; // FOR long long int start_val = 0; bool sorted = 0; // check if sorted if (tp == 0) { recCount = source_len/int_size; thrust::device_ptr<int_type> s((int_type*)source); sorted = thrust::is_sorted(s, s+recCount-1); } else { recCount = source_len/float_size; thrust::device_ptr<long long int> s((long long int*)source); sorted = thrust::is_sorted(s, s+recCount); }; //cout << "file " << file_name << " is sorted " << sorted << endl; if(sorted) return pfor_delta_compress(source, source_len, file_name, host, tp, sz); // sort the sequence if (tp == 0) { thrust::device_ptr<int_type> s((int_type*)source); orig_lower_val = *(thrust::min_element(s, s + recCount)); orig_upper_val = *(thrust::max_element(s, s + recCount)); //cout << "We need " << (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))) << " bits to encode original range of " << orig_lower_val << " to " << orig_upper_val << endl; bits = (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))); } else { thrust::device_ptr<long long int> s((long long int*)source); orig_lower_val = *(thrust::min_element(s, s + recCount)); orig_upper_val = *(thrust::max_element(s, s + recCount)); //cout << "We need " << (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))) << " bits to encode original range of " << orig_lower_val << " to " << orig_upper_val << endl; bits = (unsigned int)ceil(log2((double)((orig_upper_val - orig_lower_val) + 1))); }; thrust::counting_iterator<unsigned int> begin(0); fit_count = bit_count/bits; void* d_v1; CUDA_SAFE_CALL(cudaMalloc((void **) &d_v1, 12)); thrust::device_ptr<unsigned int> dd_v((unsigned int*)d_v1); void* s_v1; CUDA_SAFE_CALL(cudaMalloc((void **) &s_v1, 8)); thrust::device_ptr<long long int> dd_sv((long long int*)s_v1); dd_sv[0] = orig_lower_val; dd_v[0] = bits; dd_v[1] = fit_count; dd_v[2] = bit_count; void* d; CUDA_SAFE_CALL(cudaMalloc((void **) &d, recCount*float_size)); thrust::device_ptr<char> dd((char*)d); thrust::fill(dd, dd+source_len,0); if (tp == 0) { compress_functor_int ff((int_type*)source,(unsigned long long int*)d, (long long int*)s_v1, (unsigned int*)d_v1); thrust::for_each(begin, begin + recCount, ff); } else { compress_functor_float ff((long long int*)source,(unsigned long long int*)d, (long long int*)s_v1, (unsigned int*)d_v1); thrust::for_each(begin, begin + recCount, ff); }; thrust::device_ptr<unsigned long long int> s_copy1((unsigned long long int*)d); // make an addition sequence thrust::device_ptr<unsigned int> add_seq = thrust::device_malloc<unsigned int>(recCount); thrust::constant_iterator<unsigned int> iter(fit_count); thrust::sequence(add_seq, add_seq + recCount, 0, 1); thrust::transform(add_seq, add_seq + recCount, iter, add_seq, thrust::divides<unsigned int>()); unsigned int cnt = (recCount)/fit_count; if(cnt == 0) cnt = 1; // need at least 1 if (recCount%fit_count > 0) cnt++; //thrust::device_ptr<unsigned long long int> fin_seq = thrust::device_malloc<unsigned long long int>(cnt); thrust::device_ptr<unsigned long long int> fin_seq((unsigned long long int*)source); thrust::reduce_by_key(add_seq, add_seq+recCount,s_copy1,thrust::make_discard_iterator(), fin_seq); // copy fin_seq to host unsigned long long int * raw_src = thrust::raw_pointer_cast(fin_seq); //cout << file_name << " CNT " << cnt << endl; if(file_name) { cudaMemcpy( host.data(), (void *)raw_src, cnt*8, cudaMemcpyDeviceToHost); fstream binary_file(file_name,ios::out|ios::binary|ios::app); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&orig_lower_val, 8); binary_file.write((char *)&orig_upper_val, 8); binary_file.write((char *)host.data(),cnt*8); binary_file.write((char *)&comp_type, 4); binary_file.write((char *)&cnt, 4); binary_file.write((char *)&recCount, 4); binary_file.write((char *)&bits, 4); binary_file.write((char *)&orig_lower_val, 8); binary_file.write((char *)&fit_count, 4); binary_file.write((char *)&start_val, 8); binary_file.write((char *)&comp_type, 4); binary_file.write((char *)&comp_type, 4); //filler binary_file.close(); if(cnt_counts[curr_file] < cnt) cnt_counts[curr_file] = cnt; } else { char* hh; // resize host to sz + cnt*8 + 15 host.resize(sz+cnt+8); hh = (char*)(host.data() + sz); ((unsigned int*)hh)[0] = cnt; ((long long int*)(hh+4))[0] = orig_lower_val; ((long long int*)(hh+12))[0] = orig_upper_val; cudaMemcpy( hh + 20, (void *)raw_src, cnt*8, cudaMemcpyDeviceToHost); ((unsigned int*)hh)[5+cnt*2] = comp_type; ((unsigned int*)hh)[6+cnt*2] = cnt; ((unsigned int*)hh)[7+cnt*2] = recCount; ((unsigned int*)hh)[8+cnt*2] = bits; ((long long int*)(hh+36+cnt*8))[0] = orig_lower_val; ((unsigned int*)hh)[11+cnt*2] = fit_count; ((long long int*)(hh+48+cnt*8))[0] = start_val; ((unsigned int*)hh)[14+cnt*2] = comp_type; }; thrust::device_free(add_seq); cudaFree(d); cudaFree(d_v1); cudaFree(s_v1); return sz + cnt + 8; }
4988f2a1c0cd8833bf24763d9282b7280956db6c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void gaussLawKernel(float* efield, float* chargedensity, unsigned int* fieldsize, unsigned int* modelsize, unsigned int inputsperpoint, unsigned int blocksperpoint, float* gaussmodel) { // Allocate and initalize shared memory for parallel reduction extern __shared__ float blockdata[512]; blockdata[threadIdx.x] = 0.0; __syncthreads(); // Compute indices unsigned int fieldidx = blockIdx.x / blocksperpoint; int inpidx = (blockIdx.x % blocksperpoint) * blockDim.x + threadIdx.x; if (inpidx < inputsperpoint) { // Compute coordinates unsigned int d = fieldidx % 3; unsigned int fieldz = fieldidx / 3 % fieldsize[3]; unsigned int fieldy = fieldidx / 3 / fieldsize[3] % fieldsize[2]; unsigned int fieldx = fieldidx / 3 / fieldsize[3] / fieldsize[2] % fieldsize[1]; unsigned int t = fieldidx / 3 / fieldsize[3] / fieldsize[2] / fieldsize[1]; unsigned int inpz = inpidx % fieldsize[3]; unsigned int inpy = inpidx / fieldsize[3] % fieldsize[2]; unsigned int inpx = inpidx / fieldsize[3] / fieldsize[2]; unsigned int modeloffz = fieldsize[3] - fieldz - 1; unsigned int modeloffy = fieldsize[2] - fieldy - 1; unsigned int modeloffx = fieldsize[1] - fieldx - 1; unsigned int modelz = modeloffz + inpz; unsigned int modely = modeloffy + inpy; unsigned int modelx = modeloffx + inpx; // Compute model index unsigned int modelidx = t * 3 * modelsize[2] * modelsize[1] * modelsize[0] + modelx * 3 * modelsize[2] * modelsize[1] + modely * 3 * modelsize[2] + modelz * 3 + d; // Apply model to field blockdata[threadIdx.x] = gaussmodel[modelidx] * chargedensity[inpidx]; } __syncthreads(); // Parallel reduction - sum effects of all points in block for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (threadIdx.x < stride) { blockdata[threadIdx.x] += blockdata[threadIdx.x + stride]; } __syncthreads(); } // Sum effects of entire block group if (threadIdx.x == 0) { atomicAdd(&efield[fieldidx], blockdata[0]); } }
4988f2a1c0cd8833bf24763d9282b7280956db6c.cu
extern "C" __global__ void gaussLawKernel(float* efield, float* chargedensity, unsigned int* fieldsize, unsigned int* modelsize, unsigned int inputsperpoint, unsigned int blocksperpoint, float* gaussmodel) { // Allocate and initalize shared memory for parallel reduction extern __shared__ float blockdata[512]; blockdata[threadIdx.x] = 0.0; __syncthreads(); // Compute indices unsigned int fieldidx = blockIdx.x / blocksperpoint; int inpidx = (blockIdx.x % blocksperpoint) * blockDim.x + threadIdx.x; if (inpidx < inputsperpoint) { // Compute coordinates unsigned int d = fieldidx % 3; unsigned int fieldz = fieldidx / 3 % fieldsize[3]; unsigned int fieldy = fieldidx / 3 / fieldsize[3] % fieldsize[2]; unsigned int fieldx = fieldidx / 3 / fieldsize[3] / fieldsize[2] % fieldsize[1]; unsigned int t = fieldidx / 3 / fieldsize[3] / fieldsize[2] / fieldsize[1]; unsigned int inpz = inpidx % fieldsize[3]; unsigned int inpy = inpidx / fieldsize[3] % fieldsize[2]; unsigned int inpx = inpidx / fieldsize[3] / fieldsize[2]; unsigned int modeloffz = fieldsize[3] - fieldz - 1; unsigned int modeloffy = fieldsize[2] - fieldy - 1; unsigned int modeloffx = fieldsize[1] - fieldx - 1; unsigned int modelz = modeloffz + inpz; unsigned int modely = modeloffy + inpy; unsigned int modelx = modeloffx + inpx; // Compute model index unsigned int modelidx = t * 3 * modelsize[2] * modelsize[1] * modelsize[0] + modelx * 3 * modelsize[2] * modelsize[1] + modely * 3 * modelsize[2] + modelz * 3 + d; // Apply model to field blockdata[threadIdx.x] = gaussmodel[modelidx] * chargedensity[inpidx]; } __syncthreads(); // Parallel reduction - sum effects of all points in block for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (threadIdx.x < stride) { blockdata[threadIdx.x] += blockdata[threadIdx.x + stride]; } __syncthreads(); } // Sum effects of entire block group if (threadIdx.x == 0) { atomicAdd(&efield[fieldidx], blockdata[0]); } }
ac6195d42f9558ecf6f71baa5bf4bbf7f360b492.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void reduce( float *a, int size, int c) { int tid = blockIdx.x; //Handle the data at the index int index=c,j=0;//size=b for(j=index+1;j<size;j++) { a[((tid+index+1)*size + j)] = (float)(a[((tid+index+1)*size + j)] - (float)a[((tid+index+1)*size+index)] * a[((index*size) + j)]); } }
ac6195d42f9558ecf6f71baa5bf4bbf7f360b492.cu
#include "includes.h" __global__ void reduce( float *a, int size, int c) { int tid = blockIdx.x; //Handle the data at the index int index=c,j=0;//size=b for(j=index+1;j<size;j++) { a[((tid+index+1)*size + j)] = (float)(a[((tid+index+1)*size + j)] - (float)a[((tid+index+1)*size+index)] * a[((index*size) + j)]); } }
1cc976e6b67877c61387d588ae37620cfa7d18de.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #ifdef _WIN32 # define WINDOWS_LEAN_AND_MEAN # define NOMINMAX # include <windows.h> #endif // Includes CUDA #include <hip/hip_runtime.h> // Utilities and timing functions #include "helper_functions.h" // includes cuda.h and hip/hip_runtime_api.h // CUDA helper functions #include "helper_cuda.h" // helper functions for CUDA error check #define MAX_EPSILON_ERROR 5e-3f #define TILE_WIDTH 14 // #define w (TILE_WIDTH + filtsize - 1) //size of tile after padding was added i.e. 20 for 3x3 filter const char *imageFilename = "lena_bw.pgm"; //Function headers __global__ void sharedConvolute(float* dData, float* hData, int height, int width, float* filter, int masksize); // void callConst(float* hData, float* mask, int width, int height, unsigned int size, int masksize, char* imagePath); int main(int argc, char **argv) { //Load image float *hData = NULL; unsigned int width, height; char *imagePath = sdkFindFilePath(imageFilename, argv[0]); if (imagePath == NULL) { printf("Unable to source image file: %s\n", imageFilename); exit(EXIT_FAILURE); } sdkLoadPGM(imagePath, &hData, &width, &height); // printf("%d, %d\n", height,width); unsigned int size = width * height * sizeof(float); printf("Loaded '%s', %d x %d pixels\n", imageFilename, width, height); // float* hSharpen = (float*) malloc(filtsize*filtsize*sizeof(float)); // float* hEdge = (float*) malloc(filtsize*filtsize*sizeof(float)); // float* hAverage = (float*) malloc(filtsize*filtsize*sizeof(float)); float* dData = NULL; checkCudaErrors(hipMalloc((void** )&dData, size)); checkCudaErrors(hipMemcpy(dData, hData, size, hipMemcpyHostToDevice)); //dData now contains the image int masksize = 3; // int masksize = 5; // int masksize = 7; float hEdge3[] = {-1.0, 0.0, 1.0, -2.0, 0.0, 2.0, -1.0, 0.0, 1.0}; // float hSharpen3[] = {-1.0, -1.0, -1.0, -1.0, 9, -1.0, -1.0, -1.0, -1.0}; // float hAverage3[] = {0.111, 0.111, 0.111, 0.111, 0.111, 0.111, 0.111, 0.111, 0.111}; // // float hSharpen5[] = {-1.0, -1.0, -1.0, -1.0, -1.0, // -1.0, -1.0, -1.0, -1.0, -1.0, // -1.0, -1.0, 25, -1.0, -1.0, // -1.0, -1.0, -1.0, -1.0, -1.0, // -1.0, -1.0, -1.0, -1.0, -1.0}; // float hAverage5[] = {0.04, 0.04, 0.04, 0.04, 0.04, // 0.04, 0.04, 0.04, 0.04, 0.04, // 0.04, 0.04, 0.04, 0.04, 0.04, // 0.04, 0.04, 0.04, 0.04, 0.04, // 0.04, 0.04, 0.04, 0.04, 0.04,}; // // float hSharpen7[] = {-1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0, // -1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0, // -1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0, // -1.0, -1.0, -1.0, 49, -1.0, -1.0 ,-1.0, // -1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0, // -1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0, // -1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0}; // // float hAverage7[] = {1.0/49, 1.0/49, 1.0/7, 1.0/49, 1.0/49, 1.0/49, 1.0/49, // 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, // 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, // 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, // 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, // 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, // 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49}; // float* dFilter = NULL; float* dFilter = NULL; int fsize = masksize*masksize*sizeof(float); checkCudaErrors(hipMalloc((void** )&dFilter,fsize)); checkCudaErrors(hipMemcpy(dFilter, hEdge3, fsize, hipMemcpyHostToDevice)); // checkCudaErrors(hipMalloc((void** )&dFilter,fsize)); // checkCudaErrors(hipMemcpy(dFilter, hSharpen, fsize, hipMemcpyHostToDevice)); // checkCudaErrors(hipMemcpyToSymbol(filter, hSharpen7, fsize)); checkCudaErrors(hipDeviceSynchronize()); float* dOut = NULL; checkCudaErrors(hipMalloc((void** )&dOut, size)); dim3 dimBlock(16, 16); // dim3 dimGrid(width-1/TILE_WIDTH+1, height-1/TILE_WIDTH+1, 1); dim3 dimGrid(width/dimBlock.y, height/dimBlock.x); // checkCudaErrors(hipDeviceSynchronize()); StopWatchInterface *timer = NULL; sdkCreateTimer(&timer); sdkStartTimer(&timer); hipLaunchKernelGGL(( sharedConvolute), dim3(dimGrid), dim3(dimBlock), 0, 0, dOut, dData, height, width, hEdge3, masksize); checkCudaErrors(hipDeviceSynchronize()); sdkStopTimer(&timer); // printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer)); // printf("%.2f Mpixels/sec\n", // (width *height / (sdkGetTimerValue(&timer) / 1000.0f)) / 1e6); float time = sdkGetTimerValue(&timer); printf("Constant memory took %f \n", time/1000.0f); sdkDeleteTimer(&timer); // float time = sdkGetTimerValue(&timer) / 1000.0f; // printf("Shared memory took %f (ms) \n", time); // sdkDeleteTimer(&timer); float* hOut = (float* )malloc(size); hipMemcpy(hOut, dOut, size, hipMemcpyDeviceToHost); char outputFilename[1024]; strcpy(outputFilename, imagePath); strcpy(outputFilename + strlen(imagePath) - 6, "_sharedEdge.pgm"); sdkSavePGM(outputFilename, hOut, width, height); printf("Wrote '%s' to file\n", outputFilename); } __global__ void sharedConvolute(float* dData, float* hData, int height, int width, float* filter, int filtsize){ // int filtsize = 3; // int S = (filtsize-1)/2; // const int w = TILE_WIDTH + (2*S); // int w = 16; __shared__ float shared_tile[16][16]; int rowOut = threadIdx.y + blockIdx.y * 16; int colOut = threadIdx.x + blockIdx.x * 16; //Can only iterate through active part of tile as that is the output we set int rowT = rowOut; int colT = colOut; if ((rowT >= 0) && (rowT < height) && (colT >= 0) && (colT < width)){ shared_tile[threadIdx.y][threadIdx.x] = hData[rowT * width + colT]; } else{ shared_tile[threadIdx.y][threadIdx.x] = 0.0f; } __syncthreads(); // dData[pixPos] = 0.0; if(rowOut<height && colOut<width){ float sum = 0.0; if(threadIdx.y < TILE_WIDTH && threadIdx.x < TILE_WIDTH){ //maskP<filtsize*filtsize for(int maskrow = 0; maskrow < filtsize; maskrow++){ for(int maskcol = 0; maskcol < filtsize; maskcol++){ // int pixP = (rowT + maskrow)*width + (colT + maskcol); //maskrow - row + 1; int maskP = (maskrow)*filtsize + (maskcol); sum += filter[maskP] * shared_tile[maskrow+threadIdx.y][maskcol + threadIdx.x]; } } } else if(threadIdx.y < width && threadIdx.x < height){ for(int maskrow = 0; maskrow < filtsize; maskrow++){ for(int maskcol = 0; maskcol < filtsize; maskcol++){ int pixP = (rowT + maskrow)*width + (colT + maskcol); //maskrow - row + 1; int maskP = (maskrow)*filtsize + (maskcol); sum += filter[maskP] * hData[pixP]; } } } __syncthreads(); int pixPos = rowOut*width + colOut; //row*width+col dData[pixPos] = sum; if (dData[pixPos] < 0){ dData[pixPos] = 0; } else if(dData[pixPos] > 1){ dData[pixPos] = 1; } } }
1cc976e6b67877c61387d588ae37620cfa7d18de.cu
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #ifdef _WIN32 # define WINDOWS_LEAN_AND_MEAN # define NOMINMAX # include <windows.h> #endif // Includes CUDA #include <cuda_runtime.h> // Utilities and timing functions #include "helper_functions.h" // includes cuda.h and cuda_runtime_api.h // CUDA helper functions #include "helper_cuda.h" // helper functions for CUDA error check #define MAX_EPSILON_ERROR 5e-3f #define TILE_WIDTH 14 // #define w (TILE_WIDTH + filtsize - 1) //size of tile after padding was added i.e. 20 for 3x3 filter const char *imageFilename = "lena_bw.pgm"; //Function headers __global__ void sharedConvolute(float* dData, float* hData, int height, int width, float* filter, int masksize); // void callConst(float* hData, float* mask, int width, int height, unsigned int size, int masksize, char* imagePath); int main(int argc, char **argv) { //Load image float *hData = NULL; unsigned int width, height; char *imagePath = sdkFindFilePath(imageFilename, argv[0]); if (imagePath == NULL) { printf("Unable to source image file: %s\n", imageFilename); exit(EXIT_FAILURE); } sdkLoadPGM(imagePath, &hData, &width, &height); // printf("%d, %d\n", height,width); unsigned int size = width * height * sizeof(float); printf("Loaded '%s', %d x %d pixels\n", imageFilename, width, height); // float* hSharpen = (float*) malloc(filtsize*filtsize*sizeof(float)); // float* hEdge = (float*) malloc(filtsize*filtsize*sizeof(float)); // float* hAverage = (float*) malloc(filtsize*filtsize*sizeof(float)); float* dData = NULL; checkCudaErrors(cudaMalloc((void** )&dData, size)); checkCudaErrors(cudaMemcpy(dData, hData, size, cudaMemcpyHostToDevice)); //dData now contains the image int masksize = 3; // int masksize = 5; // int masksize = 7; float hEdge3[] = {-1.0, 0.0, 1.0, -2.0, 0.0, 2.0, -1.0, 0.0, 1.0}; // float hSharpen3[] = {-1.0, -1.0, -1.0, -1.0, 9, -1.0, -1.0, -1.0, -1.0}; // float hAverage3[] = {0.111, 0.111, 0.111, 0.111, 0.111, 0.111, 0.111, 0.111, 0.111}; // // float hSharpen5[] = {-1.0, -1.0, -1.0, -1.0, -1.0, // -1.0, -1.0, -1.0, -1.0, -1.0, // -1.0, -1.0, 25, -1.0, -1.0, // -1.0, -1.0, -1.0, -1.0, -1.0, // -1.0, -1.0, -1.0, -1.0, -1.0}; // float hAverage5[] = {0.04, 0.04, 0.04, 0.04, 0.04, // 0.04, 0.04, 0.04, 0.04, 0.04, // 0.04, 0.04, 0.04, 0.04, 0.04, // 0.04, 0.04, 0.04, 0.04, 0.04, // 0.04, 0.04, 0.04, 0.04, 0.04,}; // // float hSharpen7[] = {-1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0, // -1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0, // -1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0, // -1.0, -1.0, -1.0, 49, -1.0, -1.0 ,-1.0, // -1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0, // -1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0, // -1.0, -1.0, -1.0, -1.0, -1.0, -1.0 ,-1.0}; // // float hAverage7[] = {1.0/49, 1.0/49, 1.0/7, 1.0/49, 1.0/49, 1.0/49, 1.0/49, // 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, // 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, // 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, // 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, // 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, // 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49, 1.0/49}; // float* dFilter = NULL; float* dFilter = NULL; int fsize = masksize*masksize*sizeof(float); checkCudaErrors(cudaMalloc((void** )&dFilter,fsize)); checkCudaErrors(cudaMemcpy(dFilter, hEdge3, fsize, cudaMemcpyHostToDevice)); // checkCudaErrors(cudaMalloc((void** )&dFilter,fsize)); // checkCudaErrors(cudaMemcpy(dFilter, hSharpen, fsize, cudaMemcpyHostToDevice)); // checkCudaErrors(cudaMemcpyToSymbol(filter, hSharpen7, fsize)); checkCudaErrors(cudaDeviceSynchronize()); float* dOut = NULL; checkCudaErrors(cudaMalloc((void** )&dOut, size)); dim3 dimBlock(16, 16); // dim3 dimGrid(width-1/TILE_WIDTH+1, height-1/TILE_WIDTH+1, 1); dim3 dimGrid(width/dimBlock.y, height/dimBlock.x); // checkCudaErrors(cudaDeviceSynchronize()); StopWatchInterface *timer = NULL; sdkCreateTimer(&timer); sdkStartTimer(&timer); sharedConvolute<<<dimGrid, dimBlock, 0>>>(dOut, dData, height, width, hEdge3, masksize); checkCudaErrors(cudaDeviceSynchronize()); sdkStopTimer(&timer); // printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer)); // printf("%.2f Mpixels/sec\n", // (width *height / (sdkGetTimerValue(&timer) / 1000.0f)) / 1e6); float time = sdkGetTimerValue(&timer); printf("Constant memory took %f \n", time/1000.0f); sdkDeleteTimer(&timer); // float time = sdkGetTimerValue(&timer) / 1000.0f; // printf("Shared memory took %f (ms) \n", time); // sdkDeleteTimer(&timer); float* hOut = (float* )malloc(size); cudaMemcpy(hOut, dOut, size, cudaMemcpyDeviceToHost); char outputFilename[1024]; strcpy(outputFilename, imagePath); strcpy(outputFilename + strlen(imagePath) - 6, "_sharedEdge.pgm"); sdkSavePGM(outputFilename, hOut, width, height); printf("Wrote '%s' to file\n", outputFilename); } __global__ void sharedConvolute(float* dData, float* hData, int height, int width, float* filter, int filtsize){ // int filtsize = 3; // int S = (filtsize-1)/2; // const int w = TILE_WIDTH + (2*S); // int w = 16; __shared__ float shared_tile[16][16]; int rowOut = threadIdx.y + blockIdx.y * 16; int colOut = threadIdx.x + blockIdx.x * 16; //Can only iterate through active part of tile as that is the output we set int rowT = rowOut; int colT = colOut; if ((rowT >= 0) && (rowT < height) && (colT >= 0) && (colT < width)){ shared_tile[threadIdx.y][threadIdx.x] = hData[rowT * width + colT]; } else{ shared_tile[threadIdx.y][threadIdx.x] = 0.0f; } __syncthreads(); // dData[pixPos] = 0.0; if(rowOut<height && colOut<width){ float sum = 0.0; if(threadIdx.y < TILE_WIDTH && threadIdx.x < TILE_WIDTH){ //maskP<filtsize*filtsize for(int maskrow = 0; maskrow < filtsize; maskrow++){ for(int maskcol = 0; maskcol < filtsize; maskcol++){ // int pixP = (rowT + maskrow)*width + (colT + maskcol); //maskrow - row + 1; int maskP = (maskrow)*filtsize + (maskcol); sum += filter[maskP] * shared_tile[maskrow+threadIdx.y][maskcol + threadIdx.x]; } } } else if(threadIdx.y < width && threadIdx.x < height){ for(int maskrow = 0; maskrow < filtsize; maskrow++){ for(int maskcol = 0; maskcol < filtsize; maskcol++){ int pixP = (rowT + maskrow)*width + (colT + maskcol); //maskrow - row + 1; int maskP = (maskrow)*filtsize + (maskcol); sum += filter[maskP] * hData[pixP]; } } } __syncthreads(); int pixPos = rowOut*width + colOut; //row*width+col dData[pixPos] = sum; if (dData[pixPos] < 0){ dData[pixPos] = 0; } else if(dData[pixPos] > 1){ dData[pixPos] = 1; } } }
2c08aefad2a56fb03c221eef22d276dff75a3ee4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <vector> // CUDA device kernel __global__ void vector_add(const float *A, const float *B, float *C, size_t array_size) { // local thread id size_t id = threadIdx.x; // calculating global id size_t total_threads = gridDim.x * blockDim.x; for (size_t i = id; i < array_size; i += total_threads) { C[i] = A[i] + B[i]; } } int main() { const size_t array_size = 256; std::vector<float> A(array_size, 1.0f); std::vector<float> B(array_size, 1.0f); std::vector<float> C(array_size); // allocating device memory float *A_dev; float *B_dev; float *C_dev; hipMalloc((void **)&A_dev, array_size * sizeof(float)); hipMalloc((void **)&B_dev, array_size * sizeof(float)); hipMalloc((void **)&C_dev, array_size * sizeof(float)); // explicitly copying data from host to device hipMemcpy(A_dev, A.data(), array_size * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(B_dev, B.data(), array_size * sizeof(float), hipMemcpyHostToDevice); // getting device property in order to query device parameters hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); const size_t max_thread_per_block = prop.maxThreadsPerBlock; const size_t num_thread_per_block = ::min(max_thread_per_block, array_size); const size_t num_block_per_grid = (size_t)::ceil(((float)array_size) / num_thread_per_block); // constructing block size dim3 block_size(num_thread_per_block, 1, 1); // constructing number of blocks (grid size) dim3 num_blocks(num_block_per_grid, 1, 1); // launching and executing cuda kernel hipLaunchKernelGGL(( vector_add), dim3(num_blocks), dim3(block_size), 0, 0, A_dev, B_dev, C_dev, array_size); // retruning result to the host vector hipMemcpy(C.data(), C_dev, array_size * sizeof(float), hipMemcpyDeviceToHost); // releasing the cuda memory objects hipFree(A_dev); hipFree(B_dev); hipFree(C_dev); return EXIT_SUCCESS; }
2c08aefad2a56fb03c221eef22d276dff75a3ee4.cu
#include <stdio.h> #include <vector> // CUDA device kernel __global__ void vector_add(const float *A, const float *B, float *C, size_t array_size) { // local thread id size_t id = threadIdx.x; // calculating global id size_t total_threads = gridDim.x * blockDim.x; for (size_t i = id; i < array_size; i += total_threads) { C[i] = A[i] + B[i]; } } int main() { const size_t array_size = 256; std::vector<float> A(array_size, 1.0f); std::vector<float> B(array_size, 1.0f); std::vector<float> C(array_size); // allocating device memory float *A_dev; float *B_dev; float *C_dev; cudaMalloc((void **)&A_dev, array_size * sizeof(float)); cudaMalloc((void **)&B_dev, array_size * sizeof(float)); cudaMalloc((void **)&C_dev, array_size * sizeof(float)); // explicitly copying data from host to device cudaMemcpy(A_dev, A.data(), array_size * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(B_dev, B.data(), array_size * sizeof(float), cudaMemcpyHostToDevice); // getting device property in order to query device parameters cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); const size_t max_thread_per_block = prop.maxThreadsPerBlock; const size_t num_thread_per_block = std::min(max_thread_per_block, array_size); const size_t num_block_per_grid = (size_t)std::ceil(((float)array_size) / num_thread_per_block); // constructing block size dim3 block_size(num_thread_per_block, 1, 1); // constructing number of blocks (grid size) dim3 num_blocks(num_block_per_grid, 1, 1); // launching and executing cuda kernel vector_add<<<num_blocks, block_size>>>(A_dev, B_dev, C_dev, array_size); // retruning result to the host vector cudaMemcpy(C.data(), C_dev, array_size * sizeof(float), cudaMemcpyDeviceToHost); // releasing the cuda memory objects cudaFree(A_dev); cudaFree(B_dev); cudaFree(C_dev); return EXIT_SUCCESS; }
f92541890170d5e1dd082057535e2bb48f46b703.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from sparse/blas/zmgeelltmv.cu, normal z -> d, Thu Oct 8 23:05:47 2020 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 template<bool betazero> __global__ void dmgeelltmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, double alpha, double * dval, magma_index_t * dcolind, double * dx, double beta, double * dy) { extern __shared__ double dot[]; int row = blockDim.x * blockIdx.x + threadIdx.x; if(row < num_rows ) { for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_D_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_rows * n + row ]; double val = dval [ num_rows * n + row ]; for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[col + i * num_cols ]; } for( int i=0; i<num_vecs; i++ ) { if (betazero) { dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] *alpha; } else { dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * dy [ row + i*num_cols ]; } } } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, double alpha, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( double ); // num_vecs vectors if (beta == MAGMA_D_ZERO) { hipLaunchKernelGGL(( dmgeelltmv_kernel<true>), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream() , m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } else { hipLaunchKernelGGL(( dmgeelltmv_kernel<false>), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream() , m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } return MAGMA_SUCCESS; }
f92541890170d5e1dd082057535e2bb48f46b703.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from sparse/blas/zmgeelltmv.cu, normal z -> d, Thu Oct 8 23:05:47 2020 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 template<bool betazero> __global__ void dmgeelltmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, double alpha, double * dval, magma_index_t * dcolind, double * dx, double beta, double * dy) { extern __shared__ double dot[]; int row = blockDim.x * blockIdx.x + threadIdx.x; if(row < num_rows ) { for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_D_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_rows * n + row ]; double val = dval [ num_rows * n + row ]; for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[col + i * num_cols ]; } for( int i=0; i<num_vecs; i++ ) { if (betazero) { dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] *alpha; } else { dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * dy [ row + i*num_cols ]; } } } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, double alpha, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( double ); // num_vecs vectors if (beta == MAGMA_D_ZERO) { dmgeelltmv_kernel<true><<< grid, threads, MEM_SIZE, queue->cuda_stream() >>> ( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } else { dmgeelltmv_kernel<false><<< grid, threads, MEM_SIZE, queue->cuda_stream() >>> ( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); } return MAGMA_SUCCESS; }
258fe7a79e77ebba0e8d9e3c060a65aba30ef6b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "util.h" const char inputName256one[] = "data/input_one_14_1024.bin"; const char weightName256one[] = "data/weight_one_1024.bin"; const char bnBias_myKernel_Name256one[] = "data/bnBias_myKernel_one_1024.bin"; const char bnScale_myKernel_Name256one[] = "data/bnScale_myKernel_one_1024.bin"; __global__ void kernel_1024_one_256( const float *__restrict__ A, const float *__restrict__ B, const float *__restrict__ bnBias, const float *__restrict__ bnScale, float *__restrict__ C) { int tile = blockIdx.x, in_channel = threadIdx.x, line = threadIdx.y; int ind = line*256 + in_channel; extern __shared__ float shared_[]; float *__restrict__ weights = shared_ + 1024*4, *__restrict__ output = weights + 256*16, *__restrict__ input = shared_; float *__restrict__ bias = output + 4*256, *__restrict__ scale = bias + 256; for (int i = 0; i < 4; i++) input[ind + i*1024] = A[tile*4096 + i*1024 + ind]; bias[in_channel] = bnBias[in_channel]; scale[in_channel] = bnScale[in_channel]; output[ind] = 0.0f; __syncthreads(); for (int k = 0; k < 1024; k += 16) { const float *B_start = B + k*256; for (int i = 0; i < 4; i++) weights[ind + i*1024] = B_start[i*1024 + ind]; __syncthreads(); const float *A_start = input + k; for (int p = 0; p < 16; p++) { output[ind] += A_start[line*1024 + p] * weights[in_channel + p*256]; } __syncthreads(); } float *C_start = C + tile*1024, res = scale[in_channel] * output[ind] + bias[in_channel]; C_start[ind] = res > 0 ? res : 0; } __global__ void kernel_256_one_1024( const float *__restrict__ A, const float *__restrict__ B, const float *__restrict__ bnBias, const float *__restrict__ bnScale, float *__restrict__ C) { int tile = blockIdx.x, part = blockIdx.y, in_channel = threadIdx.x, line = threadIdx.y; int ind = line*256 + in_channel; extern __shared__ float shared_[]; float *weights = shared_ + 256*4, *output = weights + 256*32, *input = shared_; float *bias = output + 4*256, *scale = bias + 256; input[ind] = A[tile * 1024 + ind]; bias[in_channel] = bnBias[part*256 + in_channel]; scale[in_channel] = bnScale[part*256+ in_channel]; output[ind] = 0.0f; __syncthreads(); for (int k = 0; k < 256; k += 32) { for (int i = 0; i < 8; i++) weights[ind + 1024*i] = B[(k + i*4 + line)*1024 + part*256 + in_channel]; __syncthreads(); float *A_start = input + k; for (int p = 0; p < 32; p++) { output[ind] += A_start[line*256 + p] * weights[in_channel + p*256]; } __syncthreads(); } float *C_start = C + tile*4096 + part*256; C_start[line * 1024 + in_channel] = scale[in_channel] * output[ind] + bias[in_channel]; } void kernel_256_1_in(double &time, double &ktime) { float *input = get_parameter(inputName256one, 14*14*1024); float *weight = get_parameter(weightName256one, 256*1024); float *bnBias_myKernel = get_parameter(bnBias_myKernel_Name256one, 256); float *bnScale_myKernel = get_parameter(bnScale_myKernel_Name256one, 256); float *input_, *output_, *weight_, *bnBias_, *bnScale_; int nInput = 14*14*1024, nOutput = 14*14*256, nWeights = 256*1024; float result[nOutput]; auto start = std::chrono::steady_clock::now(); hipMalloc((void **) &input_, nInput<<2); hipMalloc((void **) &output_, nOutput<<2); hipMalloc((void **) &weight_, nWeights<<2); hipMalloc((void **) &bnBias_, 256<<2); hipMalloc((void **) &bnScale_, 256<<2); hipMemcpy(input_, input, nInput<<2, hipMemcpyHostToDevice); hipMemcpy(weight_, weight, nWeights<<2, hipMemcpyHostToDevice); hipMemcpy(bnBias_, bnBias_myKernel, 256<<2, hipMemcpyHostToDevice); hipMemcpy(bnScale_, bnScale_myKernel, 256<<2, hipMemcpyHostToDevice); hipDeviceSynchronize(); auto kstart = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( kernel_1024_one_256) , dim3(dim3(49)), dim3(dim3(256, 4)), (4*1024 + 16*256 + 4*256 + 2*256)<<2 , 0, 0, input_, weight_, bnBias_, bnScale_, output_); hipDeviceSynchronize(); auto kend = std::chrono::steady_clock::now(); ktime = std::chrono::duration_cast<std::chrono::nanoseconds>(kend - kstart).count(); hipMemcpy(result, output_, nOutput<<2, hipMemcpyDeviceToHost); auto end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); hipFree(input_); hipFree(output_); hipFree(weight_); hipFree(bnScale_); hipFree(bnBias_); #ifdef DEBUG double s = 0; for (int i = 0; i < nOutput; i++) { s += result[i]; } printf("Check sum: %lf\n", s); #endif free(input); free(weight); free(bnBias_myKernel); free(bnScale_myKernel); } void kernel_256_1_out(double &time, double &ktime) { float *input = get_parameter(inputName256one, 14*14*256); float *weight = get_parameter(weightName256one, 256*1024); float *bnBias_myKernel = get_parameter(bnBias_myKernel_Name256one, 1024); float *bnScale_myKernel = get_parameter(bnScale_myKernel_Name256one, 1024); float *input_, *output_, *weight_, *bnBias_, *bnScale_; int nInput = 14*14*256, nOutput = 14*14*1024, nWeights = 256*1024; float result[nOutput]; auto start = std::chrono::steady_clock::now(); hipMalloc((void **) &input_, nInput<<2); hipMalloc((void **) &output_, nOutput<<2); hipMalloc((void **) &weight_, nWeights<<2); hipMalloc((void **) &bnBias_, 1024<<2); hipMalloc((void **) &bnScale_, 1024<<2); hipMemcpy(input_, input, nInput<<2, hipMemcpyHostToDevice); hipMemcpy(weight_, weight, nWeights<<2, hipMemcpyHostToDevice); hipMemcpy(bnBias_, bnBias_myKernel, 1024<<2, hipMemcpyHostToDevice); hipMemcpy(bnScale_, bnScale_myKernel, 1024<<2, hipMemcpyHostToDevice); hipDeviceSynchronize(); auto kstart = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( kernel_256_one_1024) , dim3(dim3(49, 4)), dim3(dim3(256, 4)), (4*256 + 32*256 + 4*256 + 2*256)<<2 , 0, 0, input_, weight_, bnBias_, bnScale_, output_); hipDeviceSynchronize(); auto kend = std::chrono::steady_clock::now(); ktime = std::chrono::duration_cast<std::chrono::nanoseconds>(kend - kstart).count(); hipMemcpy(result, output_, nOutput<<2, hipMemcpyDeviceToHost); hipFree(input_); hipFree(output_); hipFree(weight_); hipFree(bnScale_); hipFree(bnBias_); auto end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); #ifdef DEBUG double s = 0; for (int i = 0; i < nOutput; i++) { s += result[i]; } printf("Check sum: %lf\n", s); #endif free(bnBias_myKernel); free(bnScale_myKernel); free(input); free(weight); }
258fe7a79e77ebba0e8d9e3c060a65aba30ef6b8.cu
#include "util.h" const char inputName256one[] = "data/input_one_14_1024.bin"; const char weightName256one[] = "data/weight_one_1024.bin"; const char bnBias_myKernel_Name256one[] = "data/bnBias_myKernel_one_1024.bin"; const char bnScale_myKernel_Name256one[] = "data/bnScale_myKernel_one_1024.bin"; __global__ void kernel_1024_one_256( const float *__restrict__ A, const float *__restrict__ B, const float *__restrict__ bnBias, const float *__restrict__ bnScale, float *__restrict__ C) { int tile = blockIdx.x, in_channel = threadIdx.x, line = threadIdx.y; int ind = line*256 + in_channel; extern __shared__ float shared_[]; float *__restrict__ weights = shared_ + 1024*4, *__restrict__ output = weights + 256*16, *__restrict__ input = shared_; float *__restrict__ bias = output + 4*256, *__restrict__ scale = bias + 256; for (int i = 0; i < 4; i++) input[ind + i*1024] = A[tile*4096 + i*1024 + ind]; bias[in_channel] = bnBias[in_channel]; scale[in_channel] = bnScale[in_channel]; output[ind] = 0.0f; __syncthreads(); for (int k = 0; k < 1024; k += 16) { const float *B_start = B + k*256; for (int i = 0; i < 4; i++) weights[ind + i*1024] = B_start[i*1024 + ind]; __syncthreads(); const float *A_start = input + k; for (int p = 0; p < 16; p++) { output[ind] += A_start[line*1024 + p] * weights[in_channel + p*256]; } __syncthreads(); } float *C_start = C + tile*1024, res = scale[in_channel] * output[ind] + bias[in_channel]; C_start[ind] = res > 0 ? res : 0; } __global__ void kernel_256_one_1024( const float *__restrict__ A, const float *__restrict__ B, const float *__restrict__ bnBias, const float *__restrict__ bnScale, float *__restrict__ C) { int tile = blockIdx.x, part = blockIdx.y, in_channel = threadIdx.x, line = threadIdx.y; int ind = line*256 + in_channel; extern __shared__ float shared_[]; float *weights = shared_ + 256*4, *output = weights + 256*32, *input = shared_; float *bias = output + 4*256, *scale = bias + 256; input[ind] = A[tile * 1024 + ind]; bias[in_channel] = bnBias[part*256 + in_channel]; scale[in_channel] = bnScale[part*256+ in_channel]; output[ind] = 0.0f; __syncthreads(); for (int k = 0; k < 256; k += 32) { for (int i = 0; i < 8; i++) weights[ind + 1024*i] = B[(k + i*4 + line)*1024 + part*256 + in_channel]; __syncthreads(); float *A_start = input + k; for (int p = 0; p < 32; p++) { output[ind] += A_start[line*256 + p] * weights[in_channel + p*256]; } __syncthreads(); } float *C_start = C + tile*4096 + part*256; C_start[line * 1024 + in_channel] = scale[in_channel] * output[ind] + bias[in_channel]; } void kernel_256_1_in(double &time, double &ktime) { float *input = get_parameter(inputName256one, 14*14*1024); float *weight = get_parameter(weightName256one, 256*1024); float *bnBias_myKernel = get_parameter(bnBias_myKernel_Name256one, 256); float *bnScale_myKernel = get_parameter(bnScale_myKernel_Name256one, 256); float *input_, *output_, *weight_, *bnBias_, *bnScale_; int nInput = 14*14*1024, nOutput = 14*14*256, nWeights = 256*1024; float result[nOutput]; auto start = std::chrono::steady_clock::now(); cudaMalloc((void **) &input_, nInput<<2); cudaMalloc((void **) &output_, nOutput<<2); cudaMalloc((void **) &weight_, nWeights<<2); cudaMalloc((void **) &bnBias_, 256<<2); cudaMalloc((void **) &bnScale_, 256<<2); cudaMemcpy(input_, input, nInput<<2, cudaMemcpyHostToDevice); cudaMemcpy(weight_, weight, nWeights<<2, cudaMemcpyHostToDevice); cudaMemcpy(bnBias_, bnBias_myKernel, 256<<2, cudaMemcpyHostToDevice); cudaMemcpy(bnScale_, bnScale_myKernel, 256<<2, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); auto kstart = std::chrono::steady_clock::now(); kernel_1024_one_256 <<<dim3(49), dim3(256, 4), (4*1024 + 16*256 + 4*256 + 2*256)<<2 >>> ( input_, weight_, bnBias_, bnScale_, output_); cudaDeviceSynchronize(); auto kend = std::chrono::steady_clock::now(); ktime = std::chrono::duration_cast<std::chrono::nanoseconds>(kend - kstart).count(); cudaMemcpy(result, output_, nOutput<<2, cudaMemcpyDeviceToHost); auto end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); cudaFree(input_); cudaFree(output_); cudaFree(weight_); cudaFree(bnScale_); cudaFree(bnBias_); #ifdef DEBUG double s = 0; for (int i = 0; i < nOutput; i++) { s += result[i]; } printf("Check sum: %lf\n", s); #endif free(input); free(weight); free(bnBias_myKernel); free(bnScale_myKernel); } void kernel_256_1_out(double &time, double &ktime) { float *input = get_parameter(inputName256one, 14*14*256); float *weight = get_parameter(weightName256one, 256*1024); float *bnBias_myKernel = get_parameter(bnBias_myKernel_Name256one, 1024); float *bnScale_myKernel = get_parameter(bnScale_myKernel_Name256one, 1024); float *input_, *output_, *weight_, *bnBias_, *bnScale_; int nInput = 14*14*256, nOutput = 14*14*1024, nWeights = 256*1024; float result[nOutput]; auto start = std::chrono::steady_clock::now(); cudaMalloc((void **) &input_, nInput<<2); cudaMalloc((void **) &output_, nOutput<<2); cudaMalloc((void **) &weight_, nWeights<<2); cudaMalloc((void **) &bnBias_, 1024<<2); cudaMalloc((void **) &bnScale_, 1024<<2); cudaMemcpy(input_, input, nInput<<2, cudaMemcpyHostToDevice); cudaMemcpy(weight_, weight, nWeights<<2, cudaMemcpyHostToDevice); cudaMemcpy(bnBias_, bnBias_myKernel, 1024<<2, cudaMemcpyHostToDevice); cudaMemcpy(bnScale_, bnScale_myKernel, 1024<<2, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); auto kstart = std::chrono::steady_clock::now(); kernel_256_one_1024 <<<dim3(49, 4), dim3(256, 4), (4*256 + 32*256 + 4*256 + 2*256)<<2 >>>( input_, weight_, bnBias_, bnScale_, output_); cudaDeviceSynchronize(); auto kend = std::chrono::steady_clock::now(); ktime = std::chrono::duration_cast<std::chrono::nanoseconds>(kend - kstart).count(); cudaMemcpy(result, output_, nOutput<<2, cudaMemcpyDeviceToHost); cudaFree(input_); cudaFree(output_); cudaFree(weight_); cudaFree(bnScale_); cudaFree(bnBias_); auto end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); #ifdef DEBUG double s = 0; for (int i = 0; i < nOutput; i++) { s += result[i]; } printf("Check sum: %lf\n", s); #endif free(bnBias_myKernel); free(bnScale_myKernel); free(input); free(weight); }
75034643bd0ea35a4958a79d0e2b524024c4fcb6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef __CUDNN__ #include "L2Normalize.hpp" // template class ConcatenateChannelWise<int>; template class L2Normalize<float>; // template class ConcatenateChannelWise<double>; __global__ void Normalize_kernel(int sizeOfInputImg, int batchSize, float *input, float* norm2List, float* result) { int batchNum = blockIdx.x * blockDim.x + threadIdx.x; if(batchNum < batchSize) { norm2List[batchNum] = 0; for (int elementIndex = 0; elementIndex < sizeOfInputImg; elementIndex++) { // printf("eidx: %d\n", elementIndex); norm2List[batchNum] += (input[batchNum * sizeOfInputImg + elementIndex] * input[batchNum * sizeOfInputImg + elementIndex]); } norm2List[batchNum] = sqrt(norm2List[batchNum]); for (int elementIndex = 0; elementIndex < sizeOfInputImg; elementIndex++) { // printf("norm2List[%d] = %f\n", batchNum, norm2List[batchNum]); result[batchNum * sizeOfInputImg + elementIndex] = (input[batchNum * sizeOfInputImg + elementIndex] / norm2List[batchNum]); } } } #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } template<typename DTYPE> int L2Normalize<DTYPE>::ForwardPropagateOnGPU(int pTime) { int noBlock = 3, threadsPerBlock = 128; Tensor<DTYPE> *result = this->GetResult(); Tensor<DTYPE> *input = NULL; int timesize = result->GetTimeSize(); int batchsize = result->GetBatchSize(); int channelsize = result->GetChannelSize(); int rowsize = result->GetRowSize(); int colsize = result->GetColSize(); Shape *resultTenShape = result->GetShape(); int sizeOfPlane = rowsize * colsize; int sizeOfInputImg = 0; DTYPE *result_gpu = result->GetGPUData(); DTYPE *input_gpu = NULL; int inputChannelSize = 0; int idOfDevice = result->GetDeviceID(); float test= 0.f; DTYPE* norm2ListGPUData; // std::cout << "L2 forward" << std::endl; Tensor<DTYPE>* testnorm2List = NULL; if(this->norm2ListGPU_); delete this->norm2ListGPU_; this->norm2ListGPU_ = Tensor<DTYPE>::Zeros(1, 1, 1, 1, batchsize); // testnorm2List = Tensor<DTYPE>::Zeros(1, 1, 1, 1, batchsize); // std::cout << "norm2 tensor: " << (*testnorm2List)[0] << std::endl; // for (int batchIndex = 0; batchIndex < batchsize; batchIndex++) // this->norm2ListGPU_[batchIndex] = 0; // std::cout << "testnorm2LIst zero index: " << (*testnorm2List)[0] << std::endl; // std::cout << "norm2 before get value: " << (*this->norm2ListGPU_)[0] << std::endl; if (this->norm2ListGPU_->GetDevice() != GPU) this->norm2ListGPU_->SetDeviceGPU(idOfDevice); // std::cout << "id: " << idOfDevice << std::endl; // testnorm2List->SetDeviceGPU(idOfDevice); // (*testnorm2List)[0]++; norm2ListGPUData = this->norm2ListGPU_->GetGPUData(); // norm2ListGPUData = testnorm2List->GetGPUData(); input = this->GetInput()[0]->GetResult(); input_gpu = input->GetGPUData(); // resultGPU = result->GetGPUData(); inputChannelSize = input->GetChannelSize(); sizeOfInputImg = inputChannelSize * sizeOfPlane; GetKernelParameters(batchsize, &noBlock, &threadsPerBlock); // std::cout << input->GetShape() << std::endl; // std::cout << "noblock: " << noBlock << " nothread: " << threadsPerBlock << std::endl;; // std::cout << "norm2 before getsum: " << (*this->norm2ListGPU_)[0] << std::endl; // std::cout << "norm2 before get value: " << (*this->norm2ListGPU_)[0] << std::endl; Normalize_kernel << < noBlock, threadsPerBlock >> > (sizeOfInputImg, batchsize, input_gpu, norm2ListGPUData, result_gpu); hipDeviceSynchronize(); // std::cout << "end of normalization" << std::endl; this->norm2ListGPU_->SetDeviceCPU(); // testnorm2List->SetDeviceCPU(); // (*this->norm2ListGPU_)[0] = 1.f; // std::cout << "normListgpu size: " << this->norm2ListGPU_->GetShape() << std::endl; // for(int i = 0; i < batchsize; i++) // std::cout << "real norm 2 [" << i << "]: " << (*this->norm2ListGPU_)[i] << std::endl; // std::cout << "normalized value" << std::endl; // std::cout << "after normlization" << std::endl; /* for(int h = 0; h < batchsize; h++) { std::cout << "input vallue [" << h << "]: [" << std::endl;; for(int i = 0; i < sizeOfInputImg; i++) std::cout << (*input)[h * sizeOfInputImg + i] << std::endl; std::cout << "]" << std::endl; std::cout << "real norm 2 [" << h << "]: " << (*this->norm2ListGPU_)[h] << std::endl; std::cout << "result vallue [" << h << "]: [" << std::endl;; for(int i = 0; i < sizeOfInputImg; i++) std::cout << (*result)[h * sizeOfInputImg + i] << std::endl; std::cout << "]" << std::endl; } */ // std::cout << "norm2 before sqrt: " << (norm2ListGPUData)[0] << std::endl; // std::cout << "norm2: " << (*testnorm2List)[0] << std::endl; // if (this->norm2ListGPU_->GetDevice() != GPU) // this->norm2ListGPU_->SetDeviceGPU(idOfDevice); // norm2ListGPUData = this->norm2ListGPU_->GetGPUData(); return TRUE; } __global__ void L2Backprop_kernel(int sizeOfInputImg, int batchSize, float *thisDelta, float *inputDelta, float* norm2List, float* result) { int batchNum = blockIdx.x * blockDim.x + threadIdx.x; if(batchNum < batchSize) { // printf("bathnum in kernel: %d\n", batchNum); // printf("size of sizeofINput: %d\n", sizeOfInputImg); float sumOfDelta = 0; for (int elementIndex = 0; elementIndex < sizeOfInputImg; elementIndex++) { sumOfDelta += (thisDelta[batchNum * sizeOfInputImg + elementIndex] * result[batchNum * sizeOfInputImg + elementIndex]); } for (int elementIndex = 0; elementIndex < sizeOfInputImg; elementIndex++) { // printf("norm2List[%d] = %f\n", batchNum, norm2List[batchNum]); inputDelta[batchNum * sizeOfInputImg + elementIndex] += (thisDelta[batchNum * sizeOfInputImg + elementIndex] - (result[batchNum * sizeOfInputImg + elementIndex] * sumOfDelta)) / norm2List[batchNum]; } } } template<typename DTYPE> int L2Normalize<DTYPE>::BackPropagateOnGPU(int pTime) { // std::cout << "backprooppp!!" << std::endl; int noBlock = 3, threadsPerBlock = 64; Tensor<DTYPE> *this_delta = this->GetGradient(); Tensor<DTYPE> *inputDelta = this->GetInput()[0]->GetDelta(); Tensor<DTYPE> *result = this->GetResult(); Tensor<DTYPE> *input = this->GetInput()[0]->GetResult(); DTYPE *result_gpu = result->GetGPUData(); int timesize = this_delta->GetTimeSize(); int batchsize = this_delta->GetBatchSize(); int channelsize = this_delta->GetChannelSize(); int rowsize = this_delta->GetRowSize(); int colsize = this_delta->GetColSize(); Shape *resultTenShape = this_delta->GetShape(); int sizeOfPlane = rowsize * colsize; int sizeOfResultImg = channelsize * sizeOfPlane; int sizeOfInputImg = 0; DTYPE *thisDeltaGPU = this_delta->GetGPUData(); DTYPE *input_delta_gpu = inputDelta->GetGPUData(); int idOfDevice = result->GetDeviceID(); DTYPE* norm2ListGPUData; // std::cout << "back nromlistgpu: " << this->norm2ListGPU_->GetShape() << std::endl; // std::cout << "norm2gpu test" << (*this->norm2ListGPU_)[0] << std::endl; // std::cout << "gpu id trial: " << idOfDevice << std::endl; // std::cout << "thisDeltagpu: " << this_delta->GetIdOfDevice() << std::endl; // if (sumOfDelta->GetDevice() != GPU) this->norm2ListGPU_->SetDeviceGPU(idOfDevice); norm2ListGPUData = this->norm2ListGPU_->GetGPUData(); GetKernelParameters(batchsize, &noBlock, &threadsPerBlock); L2Backprop_kernel << < noBlock, threadsPerBlock >> > (sizeOfResultImg, batchsize, thisDeltaGPU, input_delta_gpu, norm2ListGPUData, result_gpu); hipDeviceSynchronize(); // std::cout << "l2 bp endss" << std::endl; this->norm2ListGPU_->SetDeviceCPU(); // std::cout << "normListGPU[0]" << (*this->norm2ListGPU_)[0] << std::endl; /* for(int h = 5; h < 6; h++) { std::cout << "input vallue [" << h << "]: [" << std::endl;; for(int i = 0; i < sizeOfResultImg; i++) std::cout << (*input)[h * sizeOfResultImg + i] << std::endl; std::cout << "]" << std::endl; std::cout << "real norm 2 [" << h << "]: " << (*this->norm2ListGPU_)[h] << std::endl; std::cout << "result vallue [" << h << "]: [" << std::endl;; for(int i = 0; i < sizeOfResultImg; i++) std::cout << (*result)[h * sizeOfResultImg + i] << std::endl; std::cout << "]" << std::endl; std::cout << "thisDelta [" << h << "]: [" << std::endl;; for(int i = 0; i < sizeOfResultImg; i++) std::cout << (*this_delta)[h * sizeOfResultImg + i] << std::endl; std::cout << "]" << std::endl; std::cout << "inputDelta [" << h << "]: [" << std::endl;; for(int i = 0; i < sizeOfResultImg; i++) std::cout << (*inputDelta)[h * sizeOfResultImg + i] << std::endl; std::cout << "]" << std::endl; } */ return TRUE; } #endif // ifdef __CUDNN__
75034643bd0ea35a4958a79d0e2b524024c4fcb6.cu
#ifdef __CUDNN__ #include "L2Normalize.hpp" // template class ConcatenateChannelWise<int>; template class L2Normalize<float>; // template class ConcatenateChannelWise<double>; __global__ void Normalize_kernel(int sizeOfInputImg, int batchSize, float *input, float* norm2List, float* result) { int batchNum = blockIdx.x * blockDim.x + threadIdx.x; if(batchNum < batchSize) { norm2List[batchNum] = 0; for (int elementIndex = 0; elementIndex < sizeOfInputImg; elementIndex++) { // printf("eidx: %d\n", elementIndex); norm2List[batchNum] += (input[batchNum * sizeOfInputImg + elementIndex] * input[batchNum * sizeOfInputImg + elementIndex]); } norm2List[batchNum] = sqrt(norm2List[batchNum]); for (int elementIndex = 0; elementIndex < sizeOfInputImg; elementIndex++) { // printf("norm2List[%d] = %f\n", batchNum, norm2List[batchNum]); result[batchNum * sizeOfInputImg + elementIndex] = (input[batchNum * sizeOfInputImg + elementIndex] / norm2List[batchNum]); } } } #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } template<typename DTYPE> int L2Normalize<DTYPE>::ForwardPropagateOnGPU(int pTime) { int noBlock = 3, threadsPerBlock = 128; Tensor<DTYPE> *result = this->GetResult(); Tensor<DTYPE> *input = NULL; int timesize = result->GetTimeSize(); int batchsize = result->GetBatchSize(); int channelsize = result->GetChannelSize(); int rowsize = result->GetRowSize(); int colsize = result->GetColSize(); Shape *resultTenShape = result->GetShape(); int sizeOfPlane = rowsize * colsize; int sizeOfInputImg = 0; DTYPE *result_gpu = result->GetGPUData(); DTYPE *input_gpu = NULL; int inputChannelSize = 0; int idOfDevice = result->GetDeviceID(); float test= 0.f; DTYPE* norm2ListGPUData; // std::cout << "L2 forward" << std::endl; Tensor<DTYPE>* testnorm2List = NULL; if(this->norm2ListGPU_); delete this->norm2ListGPU_; this->norm2ListGPU_ = Tensor<DTYPE>::Zeros(1, 1, 1, 1, batchsize); // testnorm2List = Tensor<DTYPE>::Zeros(1, 1, 1, 1, batchsize); // std::cout << "norm2 tensor: " << (*testnorm2List)[0] << std::endl; // for (int batchIndex = 0; batchIndex < batchsize; batchIndex++) // this->norm2ListGPU_[batchIndex] = 0; // std::cout << "testnorm2LIst zero index: " << (*testnorm2List)[0] << std::endl; // std::cout << "norm2 before get value: " << (*this->norm2ListGPU_)[0] << std::endl; if (this->norm2ListGPU_->GetDevice() != GPU) this->norm2ListGPU_->SetDeviceGPU(idOfDevice); // std::cout << "id: " << idOfDevice << std::endl; // testnorm2List->SetDeviceGPU(idOfDevice); // (*testnorm2List)[0]++; norm2ListGPUData = this->norm2ListGPU_->GetGPUData(); // norm2ListGPUData = testnorm2List->GetGPUData(); input = this->GetInput()[0]->GetResult(); input_gpu = input->GetGPUData(); // resultGPU = result->GetGPUData(); inputChannelSize = input->GetChannelSize(); sizeOfInputImg = inputChannelSize * sizeOfPlane; GetKernelParameters(batchsize, &noBlock, &threadsPerBlock); // std::cout << input->GetShape() << std::endl; // std::cout << "noblock: " << noBlock << " nothread: " << threadsPerBlock << std::endl;; // std::cout << "norm2 before getsum: " << (*this->norm2ListGPU_)[0] << std::endl; // std::cout << "norm2 before get value: " << (*this->norm2ListGPU_)[0] << std::endl; Normalize_kernel << < noBlock, threadsPerBlock >> > (sizeOfInputImg, batchsize, input_gpu, norm2ListGPUData, result_gpu); cudaDeviceSynchronize(); // std::cout << "end of normalization" << std::endl; this->norm2ListGPU_->SetDeviceCPU(); // testnorm2List->SetDeviceCPU(); // (*this->norm2ListGPU_)[0] = 1.f; // std::cout << "normListgpu size: " << this->norm2ListGPU_->GetShape() << std::endl; // for(int i = 0; i < batchsize; i++) // std::cout << "real norm 2 [" << i << "]: " << (*this->norm2ListGPU_)[i] << std::endl; // std::cout << "normalized value" << std::endl; // std::cout << "after normlization" << std::endl; /* for(int h = 0; h < batchsize; h++) { std::cout << "input vallue [" << h << "]: [" << std::endl;; for(int i = 0; i < sizeOfInputImg; i++) std::cout << (*input)[h * sizeOfInputImg + i] << std::endl; std::cout << "]" << std::endl; std::cout << "real norm 2 [" << h << "]: " << (*this->norm2ListGPU_)[h] << std::endl; std::cout << "result vallue [" << h << "]: [" << std::endl;; for(int i = 0; i < sizeOfInputImg; i++) std::cout << (*result)[h * sizeOfInputImg + i] << std::endl; std::cout << "]" << std::endl; } */ // std::cout << "norm2 before sqrt: " << (norm2ListGPUData)[0] << std::endl; // std::cout << "norm2: " << (*testnorm2List)[0] << std::endl; // if (this->norm2ListGPU_->GetDevice() != GPU) // this->norm2ListGPU_->SetDeviceGPU(idOfDevice); // norm2ListGPUData = this->norm2ListGPU_->GetGPUData(); return TRUE; } __global__ void L2Backprop_kernel(int sizeOfInputImg, int batchSize, float *thisDelta, float *inputDelta, float* norm2List, float* result) { int batchNum = blockIdx.x * blockDim.x + threadIdx.x; if(batchNum < batchSize) { // printf("bathnum in kernel: %d\n", batchNum); // printf("size of sizeofINput: %d\n", sizeOfInputImg); float sumOfDelta = 0; for (int elementIndex = 0; elementIndex < sizeOfInputImg; elementIndex++) { sumOfDelta += (thisDelta[batchNum * sizeOfInputImg + elementIndex] * result[batchNum * sizeOfInputImg + elementIndex]); } for (int elementIndex = 0; elementIndex < sizeOfInputImg; elementIndex++) { // printf("norm2List[%d] = %f\n", batchNum, norm2List[batchNum]); inputDelta[batchNum * sizeOfInputImg + elementIndex] += (thisDelta[batchNum * sizeOfInputImg + elementIndex] - (result[batchNum * sizeOfInputImg + elementIndex] * sumOfDelta)) / norm2List[batchNum]; } } } template<typename DTYPE> int L2Normalize<DTYPE>::BackPropagateOnGPU(int pTime) { // std::cout << "backprooppp!!" << std::endl; int noBlock = 3, threadsPerBlock = 64; Tensor<DTYPE> *this_delta = this->GetGradient(); Tensor<DTYPE> *inputDelta = this->GetInput()[0]->GetDelta(); Tensor<DTYPE> *result = this->GetResult(); Tensor<DTYPE> *input = this->GetInput()[0]->GetResult(); DTYPE *result_gpu = result->GetGPUData(); int timesize = this_delta->GetTimeSize(); int batchsize = this_delta->GetBatchSize(); int channelsize = this_delta->GetChannelSize(); int rowsize = this_delta->GetRowSize(); int colsize = this_delta->GetColSize(); Shape *resultTenShape = this_delta->GetShape(); int sizeOfPlane = rowsize * colsize; int sizeOfResultImg = channelsize * sizeOfPlane; int sizeOfInputImg = 0; DTYPE *thisDeltaGPU = this_delta->GetGPUData(); DTYPE *input_delta_gpu = inputDelta->GetGPUData(); int idOfDevice = result->GetDeviceID(); DTYPE* norm2ListGPUData; // std::cout << "back nromlistgpu: " << this->norm2ListGPU_->GetShape() << std::endl; // std::cout << "norm2gpu test" << (*this->norm2ListGPU_)[0] << std::endl; // std::cout << "gpu id trial: " << idOfDevice << std::endl; // std::cout << "thisDeltagpu: " << this_delta->GetIdOfDevice() << std::endl; // if (sumOfDelta->GetDevice() != GPU) this->norm2ListGPU_->SetDeviceGPU(idOfDevice); norm2ListGPUData = this->norm2ListGPU_->GetGPUData(); GetKernelParameters(batchsize, &noBlock, &threadsPerBlock); L2Backprop_kernel << < noBlock, threadsPerBlock >> > (sizeOfResultImg, batchsize, thisDeltaGPU, input_delta_gpu, norm2ListGPUData, result_gpu); cudaDeviceSynchronize(); // std::cout << "l2 bp endss" << std::endl; this->norm2ListGPU_->SetDeviceCPU(); // std::cout << "normListGPU[0]" << (*this->norm2ListGPU_)[0] << std::endl; /* for(int h = 5; h < 6; h++) { std::cout << "input vallue [" << h << "]: [" << std::endl;; for(int i = 0; i < sizeOfResultImg; i++) std::cout << (*input)[h * sizeOfResultImg + i] << std::endl; std::cout << "]" << std::endl; std::cout << "real norm 2 [" << h << "]: " << (*this->norm2ListGPU_)[h] << std::endl; std::cout << "result vallue [" << h << "]: [" << std::endl;; for(int i = 0; i < sizeOfResultImg; i++) std::cout << (*result)[h * sizeOfResultImg + i] << std::endl; std::cout << "]" << std::endl; std::cout << "thisDelta [" << h << "]: [" << std::endl;; for(int i = 0; i < sizeOfResultImg; i++) std::cout << (*this_delta)[h * sizeOfResultImg + i] << std::endl; std::cout << "]" << std::endl; std::cout << "inputDelta [" << h << "]: [" << std::endl;; for(int i = 0; i < sizeOfResultImg; i++) std::cout << (*inputDelta)[h * sizeOfResultImg + i] << std::endl; std::cout << "]" << std::endl; } */ return TRUE; } #endif // ifdef __CUDNN__
c0f1ead973c4f718aabe9dfb9d5553d0138a9d1a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2017 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file deformable_psroi_pooling.cu * \brief * \author Yi Li, Guodong Zhang, Jifeng Dai */ /***************** Adapted by Charles Shang *********************/ // modify from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/cuda/deform_psroi_pooling_cuda.cu #include <ATen/ATen.h> #include <THH/THHAtomics.cuh> #include <stdio.h> #include <math.h> #include <algorithm> using namespace at; #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } template <typename scalar_t> __device__ scalar_t bilinear_interp( const scalar_t *data, const scalar_t x, const scalar_t y, const int width, const int height) { int x1 = floor(x); int x2 = ceil(x); int y1 = floor(y); int y2 = ceil(y); scalar_t dist_x = (scalar_t)(x - x1); scalar_t dist_y = (scalar_t)(y - y1); scalar_t value11 = data[y1 * width + x1]; scalar_t value12 = data[y2 * width + x1]; scalar_t value21 = data[y1 * width + x2]; scalar_t value22 = data[y2 * width + x2]; scalar_t value = (1 - dist_x) * (1 - dist_y) * value11 + (1 - dist_x) * dist_y * value12 + dist_x * (1 - dist_y) * value21 + dist_x * dist_y * value22; return value; } template <typename scalar_t> __global__ void DeformablePSROIPoolForwardKernel( const int count, const scalar_t *bottom_data, const scalar_t spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const scalar_t *bottom_rois, const scalar_t *bottom_trans, const int no_trans, const scalar_t trans_std, const int sample_per_part, const int output_dim, const int group_size, const int part_size, const int num_classes, const int channels_each_class, scalar_t *top_data, scalar_t *top_count) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const scalar_t *offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; scalar_t roi_start_w = (scalar_t)(round(offset_bottom_rois[1])) * spatial_scale - 0.5; scalar_t roi_start_h = (scalar_t)(round(offset_bottom_rois[2])) * spatial_scale - 0.5; scalar_t roi_end_w = (scalar_t)(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; scalar_t roi_end_h = (scalar_t)(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; // Force too small ROIs to be 1x1 scalar_t roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 scalar_t roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom scalar_t bin_size_h = roi_height / (scalar_t)(pooled_height); scalar_t bin_size_w = roi_width / (scalar_t)(pooled_width); scalar_t sub_bin_size_h = bin_size_h / (scalar_t)(sample_per_part); scalar_t sub_bin_size_w = bin_size_w / (scalar_t)(sample_per_part); int part_h = floor((scalar_t)(ph) / pooled_height * part_size); int part_w = floor((scalar_t)(pw) / pooled_width * part_size); int class_id = ctop / channels_each_class; scalar_t trans_x = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; scalar_t trans_y = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; scalar_t wstart = (scalar_t)(pw)*bin_size_w + roi_start_w; wstart += trans_x * roi_width; scalar_t hstart = (scalar_t)(ph)*bin_size_h + roi_start_h; hstart += trans_y * roi_height; scalar_t sum = 0; int count = 0; int gw = floor((scalar_t)(pw)*group_size / pooled_width); int gh = floor((scalar_t)(ph)*group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); const scalar_t *offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width; for (int ih = 0; ih < sample_per_part; ih++) { for (int iw = 0; iw < sample_per_part; iw++) { scalar_t w = wstart + iw * sub_bin_size_w; scalar_t h = hstart + ih * sub_bin_size_h; // bilinear interpolation if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) { continue; } w = min(max(w, 0.), width - 1.); h = min(max(h, 0.), height - 1.); int c = (ctop * group_size + gh) * group_size + gw; scalar_t val = bilinear_interp(offset_bottom_data + c * height * width, w, h, width, height); sum += val; count++; } } top_data[index] = count == 0 ? (scalar_t)(0) : sum / count; top_count[index] = count; } } template <typename scalar_t> __global__ void DeformablePSROIPoolBackwardAccKernel( const int count, const scalar_t *top_diff, const scalar_t *top_count, const int num_rois, const scalar_t spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, scalar_t *bottom_data_diff, scalar_t *bottom_trans_diff, const scalar_t *bottom_data, const scalar_t *bottom_rois, const scalar_t *bottom_trans, const int no_trans, const scalar_t trans_std, const int sample_per_part, const int group_size, const int part_size, const int num_classes, const int channels_each_class) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const scalar_t *offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; scalar_t roi_start_w = (scalar_t)(round(offset_bottom_rois[1])) * spatial_scale - 0.5; scalar_t roi_start_h = (scalar_t)(round(offset_bottom_rois[2])) * spatial_scale - 0.5; scalar_t roi_end_w = (scalar_t)(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; scalar_t roi_end_h = (scalar_t)(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; // Force too small ROIs to be 1x1 scalar_t roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 scalar_t roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom scalar_t bin_size_h = roi_height / (scalar_t)(pooled_height); scalar_t bin_size_w = roi_width / (scalar_t)(pooled_width); scalar_t sub_bin_size_h = bin_size_h / (scalar_t)(sample_per_part); scalar_t sub_bin_size_w = bin_size_w / (scalar_t)(sample_per_part); int part_h = floor((scalar_t)(ph) / pooled_height * part_size); int part_w = floor((scalar_t)(pw) / pooled_width * part_size); int class_id = ctop / channels_each_class; scalar_t trans_x = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; scalar_t trans_y = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; scalar_t wstart = (scalar_t)(pw)*bin_size_w + roi_start_w; wstart += trans_x * roi_width; scalar_t hstart = (scalar_t)(ph)*bin_size_h + roi_start_h; hstart += trans_y * roi_height; if (top_count[index] <= 0) { continue; } scalar_t diff_val = top_diff[index] / top_count[index]; const scalar_t *offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width; scalar_t *offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width; int gw = floor((scalar_t)(pw)*group_size / pooled_width); int gh = floor((scalar_t)(ph)*group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); for (int ih = 0; ih < sample_per_part; ih++) { for (int iw = 0; iw < sample_per_part; iw++) { scalar_t w = wstart + iw * sub_bin_size_w; scalar_t h = hstart + ih * sub_bin_size_h; // bilinear interpolation if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) { continue; } w = min(max(w, 0.), width - 1.); h = min(max(h, 0.), height - 1.); int c = (ctop * group_size + gh) * group_size + gw; // backward on feature int x0 = floor(w); int x1 = ceil(w); int y0 = floor(h); int y1 = ceil(h); scalar_t dist_x = w - x0, dist_y = h - y0; scalar_t q00 = (1 - dist_x) * (1 - dist_y); scalar_t q01 = (1 - dist_x) * dist_y; scalar_t q10 = dist_x * (1 - dist_y); scalar_t q11 = dist_x * dist_y; int bottom_index_base = c * height * width; atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val); if (no_trans) { continue; } scalar_t U00 = offset_bottom_data[bottom_index_base + y0 * width + x0]; scalar_t U01 = offset_bottom_data[bottom_index_base + y1 * width + x0]; scalar_t U10 = offset_bottom_data[bottom_index_base + y0 * width + x1]; scalar_t U11 = offset_bottom_data[bottom_index_base + y1 * width + x1]; scalar_t diff_x = (U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y)) * trans_std * diff_val; diff_x *= roi_width; scalar_t diff_y = (U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x)) * trans_std * diff_val; diff_y *= roi_height; atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w, diff_x); atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w, diff_y); } } } } void DeformablePSROIPoolForward(const at::Tensor data, const at::Tensor bbox, const at::Tensor trans, at::Tensor out, at::Tensor top_count, const int batch, const int channels, const int height, const int width, const int num_bbox, const int channels_trans, const int no_trans, const float spatial_scale, const int output_dim, const int group_size, const int pooled_size, const int part_size, const int sample_per_part, const float trans_std) { const int pooled_height = pooled_size; const int pooled_width = pooled_size; const int count = num_bbox * output_dim * pooled_height * pooled_width; const int num_classes = no_trans ? 1 : channels_trans / 2; const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data.type(), "deformable_psroi_pool_forward", ([&] { const scalar_t *bottom_data = data.data<scalar_t>(); const scalar_t *bottom_rois = bbox.data<scalar_t>(); const scalar_t *bottom_trans = no_trans ? NULL : trans.data<scalar_t>(); scalar_t *top_data = out.data<scalar_t>(); scalar_t *top_count_data = top_count.data<scalar_t>(); hipLaunchKernelGGL(( DeformablePSROIPoolForwardKernel), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, bottom_data, (scalar_t)spatial_scale, channels, height, width, pooled_height, pooled_width, bottom_rois, bottom_trans, no_trans, (scalar_t)trans_std, sample_per_part, output_dim, group_size, part_size, num_classes, channels_each_class, top_data, top_count_data); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in DeformablePSROIPoolForward: %s\n", hipGetErrorString(err)); } } void DeformablePSROIPoolBackwardAcc(const at::Tensor out_grad, const at::Tensor data, const at::Tensor bbox, const at::Tensor trans, const at::Tensor top_count, at::Tensor in_grad, at::Tensor trans_grad, const int batch, const int channels, const int height, const int width, const int num_bbox, const int channels_trans, const int no_trans, const float spatial_scale, const int output_dim, const int group_size, const int pooled_size, const int part_size, const int sample_per_part, const float trans_std) { // LOG(INFO) << "DeformablePSROIPoolBackward"; const int num_rois = num_bbox; const int pooled_height = pooled_size; const int pooled_width = pooled_size; const int count = num_bbox * output_dim * pooled_height * pooled_width; const int num_classes = no_trans ? 1 : channels_trans / 2; const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; AT_DISPATCH_FLOATING_TYPES_AND_HALF( out_grad.type(), "deformable_psroi_pool_backward_acc", ([&] { const scalar_t *top_diff = out_grad.data<scalar_t>(); const scalar_t *bottom_data = data.data<scalar_t>(); const scalar_t *bottom_rois = bbox.data<scalar_t>(); const scalar_t *bottom_trans = no_trans ? NULL : trans.data<scalar_t>(); scalar_t *bottom_data_diff = in_grad.data<scalar_t>(); scalar_t *bottom_trans_diff = no_trans ? NULL : trans_grad.data<scalar_t>(); const scalar_t *top_count_data = top_count.data<scalar_t>(); hipLaunchKernelGGL(( DeformablePSROIPoolBackwardAccKernel), dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, 0, count, top_diff, top_count_data, num_rois, (scalar_t)spatial_scale, channels, height, width, pooled_height, pooled_width, output_dim, bottom_data_diff, bottom_trans_diff, bottom_data, bottom_rois, bottom_trans, no_trans, (scalar_t)trans_std, sample_per_part, group_size, part_size, num_classes, channels_each_class); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in DeformablePSROIPoolForward: %s\n", hipGetErrorString(err)); } }
c0f1ead973c4f718aabe9dfb9d5553d0138a9d1a.cu
/*! * Copyright (c) 2017 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file deformable_psroi_pooling.cu * \brief * \author Yi Li, Guodong Zhang, Jifeng Dai */ /***************** Adapted by Charles Shang *********************/ // modify from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/cuda/deform_psroi_pooling_cuda.cu #include <ATen/ATen.h> #include <THC/THCAtomics.cuh> #include <stdio.h> #include <math.h> #include <algorithm> using namespace at; #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } template <typename scalar_t> __device__ scalar_t bilinear_interp( const scalar_t *data, const scalar_t x, const scalar_t y, const int width, const int height) { int x1 = floor(x); int x2 = ceil(x); int y1 = floor(y); int y2 = ceil(y); scalar_t dist_x = (scalar_t)(x - x1); scalar_t dist_y = (scalar_t)(y - y1); scalar_t value11 = data[y1 * width + x1]; scalar_t value12 = data[y2 * width + x1]; scalar_t value21 = data[y1 * width + x2]; scalar_t value22 = data[y2 * width + x2]; scalar_t value = (1 - dist_x) * (1 - dist_y) * value11 + (1 - dist_x) * dist_y * value12 + dist_x * (1 - dist_y) * value21 + dist_x * dist_y * value22; return value; } template <typename scalar_t> __global__ void DeformablePSROIPoolForwardKernel( const int count, const scalar_t *bottom_data, const scalar_t spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const scalar_t *bottom_rois, const scalar_t *bottom_trans, const int no_trans, const scalar_t trans_std, const int sample_per_part, const int output_dim, const int group_size, const int part_size, const int num_classes, const int channels_each_class, scalar_t *top_data, scalar_t *top_count) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const scalar_t *offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; scalar_t roi_start_w = (scalar_t)(round(offset_bottom_rois[1])) * spatial_scale - 0.5; scalar_t roi_start_h = (scalar_t)(round(offset_bottom_rois[2])) * spatial_scale - 0.5; scalar_t roi_end_w = (scalar_t)(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; scalar_t roi_end_h = (scalar_t)(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; // Force too small ROIs to be 1x1 scalar_t roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 scalar_t roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom scalar_t bin_size_h = roi_height / (scalar_t)(pooled_height); scalar_t bin_size_w = roi_width / (scalar_t)(pooled_width); scalar_t sub_bin_size_h = bin_size_h / (scalar_t)(sample_per_part); scalar_t sub_bin_size_w = bin_size_w / (scalar_t)(sample_per_part); int part_h = floor((scalar_t)(ph) / pooled_height * part_size); int part_w = floor((scalar_t)(pw) / pooled_width * part_size); int class_id = ctop / channels_each_class; scalar_t trans_x = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; scalar_t trans_y = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; scalar_t wstart = (scalar_t)(pw)*bin_size_w + roi_start_w; wstart += trans_x * roi_width; scalar_t hstart = (scalar_t)(ph)*bin_size_h + roi_start_h; hstart += trans_y * roi_height; scalar_t sum = 0; int count = 0; int gw = floor((scalar_t)(pw)*group_size / pooled_width); int gh = floor((scalar_t)(ph)*group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); const scalar_t *offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width; for (int ih = 0; ih < sample_per_part; ih++) { for (int iw = 0; iw < sample_per_part; iw++) { scalar_t w = wstart + iw * sub_bin_size_w; scalar_t h = hstart + ih * sub_bin_size_h; // bilinear interpolation if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) { continue; } w = min(max(w, 0.), width - 1.); h = min(max(h, 0.), height - 1.); int c = (ctop * group_size + gh) * group_size + gw; scalar_t val = bilinear_interp(offset_bottom_data + c * height * width, w, h, width, height); sum += val; count++; } } top_data[index] = count == 0 ? (scalar_t)(0) : sum / count; top_count[index] = count; } } template <typename scalar_t> __global__ void DeformablePSROIPoolBackwardAccKernel( const int count, const scalar_t *top_diff, const scalar_t *top_count, const int num_rois, const scalar_t spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, scalar_t *bottom_data_diff, scalar_t *bottom_trans_diff, const scalar_t *bottom_data, const scalar_t *bottom_rois, const scalar_t *bottom_trans, const int no_trans, const scalar_t trans_std, const int sample_per_part, const int group_size, const int part_size, const int num_classes, const int channels_each_class) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const scalar_t *offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; scalar_t roi_start_w = (scalar_t)(round(offset_bottom_rois[1])) * spatial_scale - 0.5; scalar_t roi_start_h = (scalar_t)(round(offset_bottom_rois[2])) * spatial_scale - 0.5; scalar_t roi_end_w = (scalar_t)(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5; scalar_t roi_end_h = (scalar_t)(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5; // Force too small ROIs to be 1x1 scalar_t roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 scalar_t roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom scalar_t bin_size_h = roi_height / (scalar_t)(pooled_height); scalar_t bin_size_w = roi_width / (scalar_t)(pooled_width); scalar_t sub_bin_size_h = bin_size_h / (scalar_t)(sample_per_part); scalar_t sub_bin_size_w = bin_size_w / (scalar_t)(sample_per_part); int part_h = floor((scalar_t)(ph) / pooled_height * part_size); int part_w = floor((scalar_t)(pw) / pooled_width * part_size); int class_id = ctop / channels_each_class; scalar_t trans_x = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; scalar_t trans_y = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std; scalar_t wstart = (scalar_t)(pw)*bin_size_w + roi_start_w; wstart += trans_x * roi_width; scalar_t hstart = (scalar_t)(ph)*bin_size_h + roi_start_h; hstart += trans_y * roi_height; if (top_count[index] <= 0) { continue; } scalar_t diff_val = top_diff[index] / top_count[index]; const scalar_t *offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width; scalar_t *offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width; int gw = floor((scalar_t)(pw)*group_size / pooled_width); int gh = floor((scalar_t)(ph)*group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); for (int ih = 0; ih < sample_per_part; ih++) { for (int iw = 0; iw < sample_per_part; iw++) { scalar_t w = wstart + iw * sub_bin_size_w; scalar_t h = hstart + ih * sub_bin_size_h; // bilinear interpolation if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5) { continue; } w = min(max(w, 0.), width - 1.); h = min(max(h, 0.), height - 1.); int c = (ctop * group_size + gh) * group_size + gw; // backward on feature int x0 = floor(w); int x1 = ceil(w); int y0 = floor(h); int y1 = ceil(h); scalar_t dist_x = w - x0, dist_y = h - y0; scalar_t q00 = (1 - dist_x) * (1 - dist_y); scalar_t q01 = (1 - dist_x) * dist_y; scalar_t q10 = dist_x * (1 - dist_y); scalar_t q11 = dist_x * dist_y; int bottom_index_base = c * height * width; atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val); atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val); if (no_trans) { continue; } scalar_t U00 = offset_bottom_data[bottom_index_base + y0 * width + x0]; scalar_t U01 = offset_bottom_data[bottom_index_base + y1 * width + x0]; scalar_t U10 = offset_bottom_data[bottom_index_base + y0 * width + x1]; scalar_t U11 = offset_bottom_data[bottom_index_base + y1 * width + x1]; scalar_t diff_x = (U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y)) * trans_std * diff_val; diff_x *= roi_width; scalar_t diff_y = (U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x)) * trans_std * diff_val; diff_y *= roi_height; atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w, diff_x); atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w, diff_y); } } } } void DeformablePSROIPoolForward(const at::Tensor data, const at::Tensor bbox, const at::Tensor trans, at::Tensor out, at::Tensor top_count, const int batch, const int channels, const int height, const int width, const int num_bbox, const int channels_trans, const int no_trans, const float spatial_scale, const int output_dim, const int group_size, const int pooled_size, const int part_size, const int sample_per_part, const float trans_std) { const int pooled_height = pooled_size; const int pooled_width = pooled_size; const int count = num_bbox * output_dim * pooled_height * pooled_width; const int num_classes = no_trans ? 1 : channels_trans / 2; const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data.type(), "deformable_psroi_pool_forward", ([&] { const scalar_t *bottom_data = data.data<scalar_t>(); const scalar_t *bottom_rois = bbox.data<scalar_t>(); const scalar_t *bottom_trans = no_trans ? NULL : trans.data<scalar_t>(); scalar_t *top_data = out.data<scalar_t>(); scalar_t *top_count_data = top_count.data<scalar_t>(); DeformablePSROIPoolForwardKernel<<<GET_BLOCKS(count), CUDA_NUM_THREADS>>>( count, bottom_data, (scalar_t)spatial_scale, channels, height, width, pooled_height, pooled_width, bottom_rois, bottom_trans, no_trans, (scalar_t)trans_std, sample_per_part, output_dim, group_size, part_size, num_classes, channels_each_class, top_data, top_count_data); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in DeformablePSROIPoolForward: %s\n", cudaGetErrorString(err)); } } void DeformablePSROIPoolBackwardAcc(const at::Tensor out_grad, const at::Tensor data, const at::Tensor bbox, const at::Tensor trans, const at::Tensor top_count, at::Tensor in_grad, at::Tensor trans_grad, const int batch, const int channels, const int height, const int width, const int num_bbox, const int channels_trans, const int no_trans, const float spatial_scale, const int output_dim, const int group_size, const int pooled_size, const int part_size, const int sample_per_part, const float trans_std) { // LOG(INFO) << "DeformablePSROIPoolBackward"; const int num_rois = num_bbox; const int pooled_height = pooled_size; const int pooled_width = pooled_size; const int count = num_bbox * output_dim * pooled_height * pooled_width; const int num_classes = no_trans ? 1 : channels_trans / 2; const int channels_each_class = no_trans ? output_dim : output_dim / num_classes; AT_DISPATCH_FLOATING_TYPES_AND_HALF( out_grad.type(), "deformable_psroi_pool_backward_acc", ([&] { const scalar_t *top_diff = out_grad.data<scalar_t>(); const scalar_t *bottom_data = data.data<scalar_t>(); const scalar_t *bottom_rois = bbox.data<scalar_t>(); const scalar_t *bottom_trans = no_trans ? NULL : trans.data<scalar_t>(); scalar_t *bottom_data_diff = in_grad.data<scalar_t>(); scalar_t *bottom_trans_diff = no_trans ? NULL : trans_grad.data<scalar_t>(); const scalar_t *top_count_data = top_count.data<scalar_t>(); DeformablePSROIPoolBackwardAccKernel<<<GET_BLOCKS(count), CUDA_NUM_THREADS>>>( count, top_diff, top_count_data, num_rois, (scalar_t)spatial_scale, channels, height, width, pooled_height, pooled_width, output_dim, bottom_data_diff, bottom_trans_diff, bottom_data, bottom_rois, bottom_trans, no_trans, (scalar_t)trans_std, sample_per_part, group_size, part_size, num_classes, channels_each_class); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in DeformablePSROIPoolForward: %s\n", cudaGetErrorString(err)); } }
8b8f059f0ad74eda822ac5d77d21705b2af714d6.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zgesellcmmv.cu normal z -> d, Fri Jan 30 19:00:29 2015 */ #include "hip/hip_runtime.h" #include <stdio.h> #include "common_magma.h" #include "sm_32_intrinsics.h" #define PRECISION_d //#define TEXTURE /* // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_4_ldg( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, const double * __restrict__ dx, double beta, double * dy) { #if defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; double x1, x2, v1, v2; dcolind += offset + ldx ; dval += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = __ldg( dx+ i1 ); x2 = __ldg( dx+ i2 ); v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = __ldg( dx + dcolind[ block*kk] ); v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ){ shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } #endif } */ // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning one thread to each row - 1D kernel __global__ void zgesellptmv2d_kernel_1( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // threads assigned to rows int Idx = blockDim.x * blockIdx.x + threadIdx.x ; int offset = drowptr[ blockIdx.x ]; int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize; if(Idx < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int n = 0; n < border; n++){ int col = dcolind [offset+ blocksize * n + threadIdx.x ]; double val = dval[offset+ blocksize * n + threadIdx.x]; if( val != 0){ dot=dot+val*dx[col]; } } dy[ Idx ] = dot * alpha + beta * dy [ Idx ]; } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_4( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; double x1, x2, v1, v2; dcolind += offset + ldx ; dval += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = dx[ i1 ]; x2 = dx[ i2 ]; v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = dx[ dcolind[ block*kk] ]; v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ){ shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_8( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; double x1, x2, v1, v2; dcolind += offset + ldx ; dval += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = dx[ i1 ]; x2 = dx[ i2 ]; v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = dx[ dcolind[ block*kk] ]; v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ){ shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_16( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * dx[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ){ shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_32( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * dx[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ){ shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } /************************* same but using texture mem *************************/ #if defined(PRECISION_d) && defined(TEXTURE) __inline__ __device__ double read_from_tex( hipTextureObject_t texdx, const int& i){ int2 temp = tex1Dfetch<int2>( texdx, i ); return __hiloint2double(temp.y,temp.x); } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_4_tex( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, hipTextureObject_t texdx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; double x1, x2, v1, v2; dcolind += offset + ldx ; dval += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = read_from_tex( texdx, i1 ); x2 = read_from_tex( texdx, i2 ); v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = read_from_tex( texdx, dcolind[ block*kk] ); v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ){ shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_8_tex( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, hipTextureObject_t texdx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; double x1, x2, v1, v2; dcolind += offset + ldx ; dval += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = read_from_tex( texdx, i1 ); x2 = read_from_tex( texdx, i2 ); v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = read_from_tex( texdx, dcolind[ block*kk] ); v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ){ shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_16_tex( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, hipTextureObject_t texdx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * read_from_tex( texdx, col ); } shared[ldx] = dot; __syncthreads(); if( idx < 8 ){ shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_32_tex( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, hipTextureObject_t texdx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * read_from_tex( texdx, col ); } shared[ldx] = dot; __syncthreads(); if( idx < 16 ){ shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } #endif /********************* end of texture versions **************************/ /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is SELLP. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] blocksize magma_int_t number of rows in one ELL-slice @param[in] slices magma_int_t number of slices in matrix @param[in] alignment magma_int_t number of threads assigned to one row @param[in] alpha double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in SELLP @param[in] dcolind magmaIndex_ptr columnindices of A in SELLP @param[in] drowptr magmaIndex_ptr rowpointer of SELLP @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dgesellpmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t blocksize, magma_int_t slices, magma_int_t alignment, double alpha, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowptr, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy, magma_queue_t queue ) { // using a 2D thread grid int num_threads = blocksize*alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); dim3 block( blocksize, alignment, 1); int dimgrid1 = (int) sqrt( (double)slices ); int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1; dim3 grid( dimgrid1, dimgrid2, 1); int Ms = num_threads * sizeof( double ); #if defined(PRECISION_d) && defined(TEXTURE) // Create channel. hipChannelFormatDesc channel_desc; channel_desc = hipCreateChannelDesc(32, 32, 0, 0, hipChannelFormatKindSigned); // Create resource descriptor. struct hipResourceDesc resDescdx; memset(&resDescdx, 0, sizeof(resDescdx)); resDescdx.resType = hipResourceTypeLinear; resDescdx.res.linear.devPtr = (void*)dx; resDescdx.res.linear.desc = channel_desc; resDescdx.res.linear.sizeInBytes = m*sizeof(double); // Specify texture object parameters. struct hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = hipAddressModeClamp; texDesc.filterMode = hipFilterModePoint; texDesc.readMode = hipReadModeElementType; // Create texture object. hipTextureObject_t texdx = 0; hipCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL); hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); if ( alignment == 4) hipLaunchKernelGGL(( zgesellptmv2d_kernel_4_tex), dim3(grid), dim3(block), Ms, queue , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); else if ( alignment == 8) hipLaunchKernelGGL(( zgesellptmv2d_kernel_8_tex), dim3(grid), dim3(block), Ms, queue , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); else if ( alignment == 16) hipLaunchKernelGGL(( zgesellptmv2d_kernel_16_tex), dim3(grid), dim3(block), Ms, queue , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); else if ( alignment == 32) hipLaunchKernelGGL(( zgesellptmv2d_kernel_32_tex), dim3(grid), dim3(block), Ms, queue , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); else { printf("error: alignment %d not supported.\n", alignment); return MAGMA_ERR_NOT_SUPPORTED; } hipDestroyTextureObject(texdx); #else if ( alignment == 1) hipLaunchKernelGGL(( zgesellptmv2d_kernel_1), dim3(grid), dim3(block), Ms, queue , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); else if ( alignment == 4) hipLaunchKernelGGL(( zgesellptmv2d_kernel_4), dim3(grid), dim3(block), Ms, queue , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); else if ( alignment == 8) hipLaunchKernelGGL(( zgesellptmv2d_kernel_8), dim3(grid), dim3(block), Ms, queue , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); else if ( alignment == 16) hipLaunchKernelGGL(( zgesellptmv2d_kernel_16), dim3(grid), dim3(block), Ms, queue , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); else if ( alignment == 32) hipLaunchKernelGGL(( zgesellptmv2d_kernel_32), dim3(grid), dim3(block), Ms, queue , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); else { printf("error: alignment %d not supported.\n", alignment); return MAGMA_ERR_NOT_SUPPORTED; } #endif return MAGMA_SUCCESS; }
8b8f059f0ad74eda822ac5d77d21705b2af714d6.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zgesellcmmv.cu normal z -> d, Fri Jan 30 19:00:29 2015 */ #include "cuda_runtime.h" #include <stdio.h> #include "common_magma.h" #include "sm_32_intrinsics.h" #define PRECISION_d //#define TEXTURE /* // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_4_ldg( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, const double * __restrict__ dx, double beta, double * dy) { #if defined(TEXTURE) && (__CUDA_ARCH__ >= 300) // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; double x1, x2, v1, v2; dcolind += offset + ldx ; dval += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = __ldg( dx+ i1 ); x2 = __ldg( dx+ i2 ); v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = __ldg( dx + dcolind[ block*kk] ); v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ){ shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } #endif } */ // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning one thread to each row - 1D kernel __global__ void zgesellptmv2d_kernel_1( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // threads assigned to rows int Idx = blockDim.x * blockIdx.x + threadIdx.x ; int offset = drowptr[ blockIdx.x ]; int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize; if(Idx < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); for ( int n = 0; n < border; n++){ int col = dcolind [offset+ blocksize * n + threadIdx.x ]; double val = dval[offset+ blocksize * n + threadIdx.x]; if( val != 0){ dot=dot+val*dx[col]; } } dy[ Idx ] = dot * alpha + beta * dy [ Idx ]; } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_4( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; double x1, x2, v1, v2; dcolind += offset + ldx ; dval += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = dx[ i1 ]; x2 = dx[ i2 ]; v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = dx[ dcolind[ block*kk] ]; v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ){ shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_8( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; double x1, x2, v1, v2; dcolind += offset + ldx ; dval += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = dx[ i1 ]; x2 = dx[ i2 ]; v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = dx[ dcolind[ block*kk] ]; v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ){ shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_16( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * dx[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ){ shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_32( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, double * dx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * dx[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ){ shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } /************************* same but using texture mem *************************/ #if defined(PRECISION_d) && defined(TEXTURE) __inline__ __device__ double read_from_tex( cudaTextureObject_t texdx, const int& i){ int2 temp = tex1Dfetch<int2>( texdx, i ); return __hiloint2double(temp.y,temp.x); } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_4_tex( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, cudaTextureObject_t texdx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; double x1, x2, v1, v2; dcolind += offset + ldx ; dval += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = read_from_tex( texdx, i1 ); x2 = read_from_tex( texdx, i2 ); v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = read_from_tex( texdx, dcolind[ block*kk] ); v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ){ shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_8_tex( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, cudaTextureObject_t texdx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; double x1, x2, v1, v2; dcolind += offset + ldx ; dval += offset + ldx; for ( kk = 0; kk < max_-1 ; kk+=2 ){ i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = read_from_tex( texdx, i1 ); x2 = read_from_tex( texdx, i2 ); v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_){ x1 = read_from_tex( texdx, dcolind[ block*kk] ); v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ){ shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_16_tex( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, cudaTextureObject_t texdx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * read_from_tex( texdx, col ); } shared[ldx] = dot; __syncthreads(); if( idx < 8 ){ shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel __global__ void zgesellptmv2d_kernel_32_tex( int num_rows, int num_cols, int blocksize, int T, double alpha, double * dval, magma_index_t * dcolind, magma_index_t * drowptr, cudaTextureObject_t texdx, double beta, double * dy) { // T threads assigned to each row int idx = threadIdx.y ; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ double shared[]; if(row < num_rows ){ double dot = MAGMA_D_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ double val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * read_from_tex( texdx, col ); } shared[ldx] = dot; __syncthreads(); if( idx < 16 ){ shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } #endif /********************* end of texture versions **************************/ /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is SELLP. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] blocksize magma_int_t number of rows in one ELL-slice @param[in] slices magma_int_t number of slices in matrix @param[in] alignment magma_int_t number of threads assigned to one row @param[in] alpha double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in SELLP @param[in] dcolind magmaIndex_ptr columnindices of A in SELLP @param[in] drowptr magmaIndex_ptr rowpointer of SELLP @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dgesellpmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t blocksize, magma_int_t slices, magma_int_t alignment, double alpha, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowptr, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy, magma_queue_t queue ) { // using a 2D thread grid int num_threads = blocksize*alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); dim3 block( blocksize, alignment, 1); int dimgrid1 = (int) sqrt( (double)slices ); int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1; dim3 grid( dimgrid1, dimgrid2, 1); int Ms = num_threads * sizeof( double ); #if defined(PRECISION_d) && defined(TEXTURE) // Create channel. cudaChannelFormatDesc channel_desc; channel_desc = cudaCreateChannelDesc(32, 32, 0, 0, cudaChannelFormatKindSigned); // Create resource descriptor. struct cudaResourceDesc resDescdx; memset(&resDescdx, 0, sizeof(resDescdx)); resDescdx.resType = cudaResourceTypeLinear; resDescdx.res.linear.devPtr = (void*)dx; resDescdx.res.linear.desc = channel_desc; resDescdx.res.linear.sizeInBytes = m*sizeof(double); // Specify texture object parameters. struct cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeClamp; texDesc.filterMode = cudaFilterModePoint; texDesc.readMode = cudaReadModeElementType; // Create texture object. cudaTextureObject_t texdx = 0; cudaCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL); cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); if ( alignment == 4) zgesellptmv2d_kernel_4_tex<<< grid, block, Ms, queue >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); else if ( alignment == 8) zgesellptmv2d_kernel_8_tex<<< grid, block, Ms, queue >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); else if ( alignment == 16) zgesellptmv2d_kernel_16_tex<<< grid, block, Ms, queue >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); else if ( alignment == 32) zgesellptmv2d_kernel_32_tex<<< grid, block, Ms, queue >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); else { printf("error: alignment %d not supported.\n", alignment); return MAGMA_ERR_NOT_SUPPORTED; } cudaDestroyTextureObject(texdx); #else if ( alignment == 1) zgesellptmv2d_kernel_1<<< grid, block, Ms, queue >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); else if ( alignment == 4) zgesellptmv2d_kernel_4<<< grid, block, Ms, queue >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); else if ( alignment == 8) zgesellptmv2d_kernel_8<<< grid, block, Ms, queue >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); else if ( alignment == 16) zgesellptmv2d_kernel_16<<< grid, block, Ms, queue >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); else if ( alignment == 32) zgesellptmv2d_kernel_32<<< grid, block, Ms, queue >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); else { printf("error: alignment %d not supported.\n", alignment); return MAGMA_ERR_NOT_SUPPORTED; } #endif return MAGMA_SUCCESS; }
1ee94a360e12fcb3c47e17d7fd9e21b993eba070.hip
// !!! This is a file automatically generated by hipify!!! // How to use? // =========== // Single precision Serial mode: // // clear;rm a.out; nvcc -O3 q3.cu ;./a.out -s // // Single precision PThreads mode: // // clear;rm a.out; nvcc -O3 q3.cu ;./a.out -p <num of threads> // // Single precision Cuda simple calculation mode: // // clear;rm a.out; nvcc -O3 q3.cu ;./a.out -c // // Single precision Cuda tiled mode: // // clear;rm a.out; nvcc -O3 q3.cu ;./a.out -c -t // // To use with Double precision, use the -D DP option when compiling. Also for // CUDA based Double Precision calculation, a -arch sm_20 flag is recommended // to turn off the "Double is not supported. Demoting to float" warning. // // Add a -v flag at the end when running the code if verification is needed. // ex - clear;rm a.out; nvcc -O3 q3.cu ;./a.out -c -t -v // #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <math.h> #include <time.h> #include <hiprand/hiprand_kernel.h> #include <pthread.h> #include <time.h> #include <errno.h> #define GET_TIME(x); if (clock_gettime(CLOCK_MONOTONIC, &(x)) < 0) \ { perror("clock_gettime( ):"); exit(EXIT_FAILURE); } #define MATRIX_DIM 1800 #define MIN_ERROR 0.1 // CUDA related #define BLOCK_SIZE 32 // PThread related #define MAX_PTHREADS 8 //Code to check for GPU errors #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code),\ file, line); if (abort) exit(code); } } //Help code for switching between Single Precision and Double Precision #ifdef DP typedef double Real; #else typedef float Real; #endif /** * Measures the time differences * @param begin begin time * @param end end time * @param sec resulting time in seconds * @param nsec resulting time in nano-seconds * @return the time taken */ float elapsed_time_msec(struct timespec *begin, struct timespec *end, long *sec, long *nsec) { if (end->tv_nsec < begin->tv_nsec) { *nsec = 1000000000 - (begin->tv_nsec - end->tv_nsec); *sec = end->tv_sec - begin->tv_sec -1; } else { *nsec = end->tv_nsec - begin->tv_nsec; *sec = end->tv_sec - begin->tv_sec; } return (float) (*sec) * 1000 + ((float) (*nsec)) / 1000000; } static unsigned long inKB(unsigned long bytes) { return bytes/1024; } static unsigned long inMB(unsigned long bytes) { return bytes/(1024*1024); } /** * Used to print memory states in the GPU */ static void printStats() { size_t free, total; hipError_t res = cuMemGetInfo(&free, &total); if(res != hipSuccess){ printf("!!!! cuMemGetInfo failed! (status = %x)", res); return; } printf("---------------------------------------------------------------\n"); printf("^^^^ Free : %lu bytes (%lu KB) (%lu MB)\n", free, inKB(free), \ inMB(free)); printf("^^^^ Total: %lu bytes (%lu KB) (%lu MB)\n", total, inKB(total), \ inMB(total)); printf("^^^^ %f%% free, %f%% used\n", 100.0*free/(double)total, \ 100.0*(total - free)/(double)total); printf("---------------------------------------------------------------\n"); } /** * Carries out a simple square matrix multiplication where each thread * calculates a single item in the resulting matrix. * @param A First matrix * @param B Second matrix * @param C Results matrix */ __global__ void cuda_simple_mat_mul(Real* A, Real* B, Real* C) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; //check for bounds if(row < MATRIX_DIM && col < MATRIX_DIM) { Real sum = 0.f; for (int i = 0; i < MATRIX_DIM; i++) sum += A[row * MATRIX_DIM + i] * B[i * MATRIX_DIM + col]; C[row * MATRIX_DIM + col] = sum; } } /** * Initializes the given matrix to a set of float/Double values between 1-2 */ void init_matrix(Real matrix[MATRIX_DIM][MATRIX_DIM]) { for(int i=0; i < MATRIX_DIM; i++) { for(int j=0; j < MATRIX_DIM; j++) { matrix[i][j] = 1 + (Real)rand()/(Real)RAND_MAX; } } } /** * Prints the given matrix to the stdout */ void print_matrix(Real matrix[MATRIX_DIM][MATRIX_DIM]) { for(int i = 0; i < MATRIX_DIM; i++) { printf("["); for(int j = 0; j < MATRIX_DIM; j++) { #ifdef DP printf("%20.18f ", matrix[i][j]); #else printf("%f ", matrix[i][j]); #endif } printf("] \n"); } printf("\n"); } /** * Compares the given two matrices. */ void compare_matrices(Real matrix1[MATRIX_DIM][MATRIX_DIM],\ Real matrix2[MATRIX_DIM][MATRIX_DIM]) { for(int i = 0; i < MATRIX_DIM; i++) { for(int j = 0; j < MATRIX_DIM; j++) { if((matrix1[i][j] - matrix2[i][j] > MIN_ERROR) && (matrix1[i][j] - matrix2[i][j] > 0)) { printf("Error i=%d : j=%d mat1=%f mat2=%f\n",i,j,\ matrix1[i][j], matrix2[i][j]); return; } } } printf("Matrices Match! \n"); } /** * carries out a serial matrix multiplication */ void serial_mat_mul(Real A[MATRIX_DIM][MATRIX_DIM], \ Real B[MATRIX_DIM][MATRIX_DIM], Real C[MATRIX_DIM][MATRIX_DIM]) { float sum; for (int row=0; row<MATRIX_DIM; row++){ for (int col=0; col<MATRIX_DIM; col++){ sum = 0.f; for (int n=0; n<MATRIX_DIM; n++){ sum += A[row][n]*B[n][col]; } C[row][col] = sum; } } } /** * Shows the usage of the program. */ void print_usage(){ printf("Wrong usage!\n"); } /** * Does a matrix multiplication using the "tiled" approach in the GPU * @param A First matrix * @param B Second matrix * @param C Results matrix */ __global__ void cuda_tiled_mat_mul(Real * A, Real * B, Real * C) { float CValue = 0; int Row = blockIdx.y*BLOCK_SIZE + threadIdx.y; int Col = blockIdx.x*BLOCK_SIZE + threadIdx.x; __shared__ Real As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ Real Bs[BLOCK_SIZE][BLOCK_SIZE]; for (int k = 0; k < (BLOCK_SIZE + MATRIX_DIM - 1)/BLOCK_SIZE; k++) { // check ranges for the matrices and check for left out parts where // MATRIX_DIM is not an exact multiplication of tile size(BLOCK_SIZE) if (k*BLOCK_SIZE + threadIdx.x < MATRIX_DIM && Row < MATRIX_DIM){ As[threadIdx.y][threadIdx.x] = A[Row*MATRIX_DIM + \ k*BLOCK_SIZE + threadIdx.x]; } else{ As[threadIdx.y][threadIdx.x] = 0.0; } if (k*BLOCK_SIZE + threadIdx.y < MATRIX_DIM && Col < MATRIX_DIM){ Bs[threadIdx.y][threadIdx.x] = B[(k*BLOCK_SIZE + \ threadIdx.y)*MATRIX_DIM + Col]; } else{ Bs[threadIdx.y][threadIdx.x] = 0.0; } // Wait till all the threads finish before calculating the results __syncthreads(); for (int n = 0; n < BLOCK_SIZE; ++n) CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; __syncthreads(); } // Calculate the result if (Row < MATRIX_DIM && Col < MATRIX_DIM) C[((blockIdx.y * blockDim.y + threadIdx.y)*MATRIX_DIM)+\ (blockIdx.x*blockDim.x)+threadIdx.x]=CValue; } //struct for parameter passing between pthread calls struct pthread_arg_struct { int tid; int total_threads; Real (*A)[MATRIX_DIM]; Real (*B)[MATRIX_DIM]; Real (*C)[MATRIX_DIM]; }; /** * PThread code for assigning tasks to pthreads * @param arguments an instance of pthread_arg_struct */ void* pthread_mat_mul(void* arguments) { struct pthread_arg_struct *args = (struct pthread_arg_struct *)arguments; int total_threads = args -> total_threads; int tid = args -> tid; //obtain the value of thread id Real (*A)[MATRIX_DIM]=args -> A; Real (*B)[MATRIX_DIM]=args -> B; // get the workload for one thread int chunk_size=MATRIX_DIM/total_threads; // check for the row ranges the thread needs to calculate int min_row = chunk_size * tid; int max_row = (min_row+chunk_size-1) < MATRIX_DIM ? (min_row+chunk_size-1) : MATRIX_DIM; float sum=0.f; // loop the matrix entries that belongs to this thread for(;min_row<=max_row;min_row++){ for(int col=0;col<MATRIX_DIM;col++){ for (int n=0; n<MATRIX_DIM; n++){ sum += A[min_row][n]*B[n][col]; } args->C[min_row][col] = sum; sum=0; } } pthread_exit((void*)0); } int main(int argc, char const *argv[]) { if(argc<2){ print_usage(); } struct timespec t1, t2; long sec, nsec; float comp_time; // in milli seconds // Initialize the random seed srand(time(NULL)); // Create the matrices static Real A[MATRIX_DIM][MATRIX_DIM]; static Real B[MATRIX_DIM][MATRIX_DIM]; static Real C[MATRIX_DIM][MATRIX_DIM]; static Real serial_C[MATRIX_DIM][MATRIX_DIM]; // Initialize the matrices init_matrix(A); init_matrix(B); // print_matrix(A); // print_matrix(B); if (0 == strcmp(argv[1], "-s")) { GET_TIME(t1); printf("serial mode\n\n"); // get the serial output serial_mat_mul(A,B,serial_C); GET_TIME(t2); comp_time = elapsed_time_msec(&t1, &t2, &sec, &nsec); printf("N=%d: CPU Time(ms)=%.2f \n", MATRIX_DIM, comp_time); } else if (0 == strcmp(argv[1], "-p")) { printf("pthread mode\n\n"); int num_of_threads; // check whether the given # of threads is valid if(argc <3){ print_usage(); return -1; } num_of_threads=atoi(argv[2]); if(num_of_threads>MAX_PTHREADS){ printf("[ERROR-PTHREADS] - Only up to 8 threads can be created\n"); return -1; } pthread_t threads[num_of_threads]; int rc; long t; void *status; GET_TIME(t1); //initialize the threads for(t=0;t<num_of_threads;t++){ struct pthread_arg_struct* args=(\ struct pthread_arg_struct*)malloc(sizeof *args); args->total_threads=num_of_threads; args->tid=t; args-> A=A; args-> B=B; args-> C=C; rc = pthread_create(&threads[t], NULL, pthread_mat_mul,(void *)args); if (rc){ printf("ERROR; return code from pthread_create() is %d\n", rc); exit(-1); } } //join the threads for(t=0;t<num_of_threads;t++){ pthread_join(threads[t], &status); } GET_TIME(t2); comp_time = elapsed_time_msec(&t1, &t2, &sec, &nsec); printf("N=%d: PThreads(%d threads) Time(ms)=%.2f \n", MATRIX_DIM,num_of_threads, comp_time); // if verification is needed if((argc ==4) && (0 == strcmp(argv[3], "-v"))){ GET_TIME(t1); // get the serial output serial_mat_mul(A,B,serial_C); GET_TIME(t1); comp_time = elapsed_time_msec(&t1, &t2, &sec, &nsec); printf("N=%d: CPU Time(ms)=%.2f \n", MATRIX_DIM, comp_time); // print_matrix(serial_C); // print_matrix(C); // Compare the reuslts compare_matrices(serial_C,C); } } else if (0 == strcmp(argv[1], "-c")) { long matrix_size=MATRIX_DIM*MATRIX_DIM*sizeof(Real); // printf("%ld\n",matrix_size ); GET_TIME(t1); Real* _A; gpuErrchk(hipMalloc((void**) &_A, matrix_size)); // printStats(); Real* _B; gpuErrchk(hipMalloc((void**) &_B, matrix_size)); // printStats(); Real* _C; gpuErrchk(hipMalloc((void**) &_C, matrix_size)); // printStats(); // copy the matrices to device hipMemcpy(_A, A, matrix_size, hipMemcpyHostToDevice); hipMemcpy(_B, B, matrix_size, hipMemcpyHostToDevice); // If the tiled mode needs to be enabled if (argc>2 && 0 == strcmp(argv[2], "-t")){ printf("cuda tiled mode\n"); // set the grid and block sizes dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 dimGrid; dimGrid.x = (MATRIX_DIM + dimBlock.x - 1)/dimBlock.x; dimGrid.y = (MATRIX_DIM + dimBlock.y - 1)/dimBlock.y; // GET_TIME(t1); // execute the workload in the GPU hipLaunchKernelGGL(( cuda_tiled_mat_mul), dim3(dimGrid) , dim3(dimBlock), 0, 0, _A,_B,_C); // Copy back the result hipMemcpy(C,_C,matrix_size,hipMemcpyDeviceToHost); GET_TIME(t2); comp_time = elapsed_time_msec(&t1, &t2, &sec, &nsec); printf("N=%d: CUDA Time(ms)=%.2f \n", MATRIX_DIM, comp_time); // if verification is needed if((argc ==4) && (0 == strcmp(argv[3], "-v"))){ GET_TIME(t1); // get the serial output serial_mat_mul(A,B,serial_C); GET_TIME(t2); comp_time = elapsed_time_msec(&t1, &t2, &sec, &nsec); printf("N=%d: CPU Time(ms)=%.2f \n", MATRIX_DIM, comp_time); // print_matrix(serial_C); // print_matrix(C); // Compare the reuslts compare_matrices(serial_C,C); } // free device memory hipFree(_A); hipFree(_B); hipFree(_C); } else{ printf("cuda mode\n"); int K=100; dim3 threadBlock(BLOCK_SIZE,BLOCK_SIZE); dim3 grid(K,K); // GET_TIME(t1); // call the GPU hipLaunchKernelGGL(( cuda_simple_mat_mul), dim3(grid),dim3(threadBlock), 0, 0, _A,_B,_C); // Copy back the result hipMemcpy(C,_C,matrix_size,hipMemcpyDeviceToHost); GET_TIME(t2); comp_time = elapsed_time_msec(&t1, &t2, &sec, &nsec); printf("N=%d: CUDA Time(ms)=%.2f \n", MATRIX_DIM, comp_time); // if verification is needed if((argc ==3) && (0 == strcmp(argv[2], "-v"))){ GET_TIME(t1); // get the serial output serial_mat_mul(A,B,serial_C); GET_TIME(t2); comp_time = elapsed_time_msec(&t1, &t2, &sec, &nsec); printf("N=%d: CPU Time(ms)=%.2f \n", MATRIX_DIM, comp_time); // print_matrix(serial_C); // print_matrix(C); compare_matrices(serial_C,C); } // free device memory hipFree(_A); hipFree(_B); hipFree(_C); } } else{ print_usage(); } return 0; }
1ee94a360e12fcb3c47e17d7fd9e21b993eba070.cu
// How to use? // =========== // Single precision Serial mode: // // clear;rm a.out; nvcc -O3 q3.cu ;./a.out -s // // Single precision PThreads mode: // // clear;rm a.out; nvcc -O3 q3.cu ;./a.out -p <num of threads> // // Single precision Cuda simple calculation mode: // // clear;rm a.out; nvcc -O3 q3.cu ;./a.out -c // // Single precision Cuda tiled mode: // // clear;rm a.out; nvcc -O3 q3.cu ;./a.out -c -t // // To use with Double precision, use the -D DP option when compiling. Also for // CUDA based Double Precision calculation, a -arch sm_20 flag is recommended // to turn off the "Double is not supported. Demoting to float" warning. // // Add a -v flag at the end when running the code if verification is needed. // ex - clear;rm a.out; nvcc -O3 q3.cu ;./a.out -c -t -v // #include <stdlib.h> #include <stdio.h> #include <cuda.h> #include <math.h> #include <time.h> #include <curand_kernel.h> #include <pthread.h> #include <time.h> #include <errno.h> #define GET_TIME(x); if (clock_gettime(CLOCK_MONOTONIC, &(x)) < 0) \ { perror("clock_gettime( ):"); exit(EXIT_FAILURE); } #define MATRIX_DIM 1800 #define MIN_ERROR 0.1 // CUDA related #define BLOCK_SIZE 32 // PThread related #define MAX_PTHREADS 8 //Code to check for GPU errors #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code),\ file, line); if (abort) exit(code); } } //Help code for switching between Single Precision and Double Precision #ifdef DP typedef double Real; #else typedef float Real; #endif /** * Measures the time differences * @param begin begin time * @param end end time * @param sec resulting time in seconds * @param nsec resulting time in nano-seconds * @return the time taken */ float elapsed_time_msec(struct timespec *begin, struct timespec *end, long *sec, long *nsec) { if (end->tv_nsec < begin->tv_nsec) { *nsec = 1000000000 - (begin->tv_nsec - end->tv_nsec); *sec = end->tv_sec - begin->tv_sec -1; } else { *nsec = end->tv_nsec - begin->tv_nsec; *sec = end->tv_sec - begin->tv_sec; } return (float) (*sec) * 1000 + ((float) (*nsec)) / 1000000; } static unsigned long inKB(unsigned long bytes) { return bytes/1024; } static unsigned long inMB(unsigned long bytes) { return bytes/(1024*1024); } /** * Used to print memory states in the GPU */ static void printStats() { size_t free, total; CUresult res = cuMemGetInfo(&free, &total); if(res != CUDA_SUCCESS){ printf("!!!! cuMemGetInfo failed! (status = %x)", res); return; } printf("---------------------------------------------------------------\n"); printf("^^^^ Free : %lu bytes (%lu KB) (%lu MB)\n", free, inKB(free), \ inMB(free)); printf("^^^^ Total: %lu bytes (%lu KB) (%lu MB)\n", total, inKB(total), \ inMB(total)); printf("^^^^ %f%% free, %f%% used\n", 100.0*free/(double)total, \ 100.0*(total - free)/(double)total); printf("---------------------------------------------------------------\n"); } /** * Carries out a simple square matrix multiplication where each thread * calculates a single item in the resulting matrix. * @param A First matrix * @param B Second matrix * @param C Results matrix */ __global__ void cuda_simple_mat_mul(Real* A, Real* B, Real* C) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; //check for bounds if(row < MATRIX_DIM && col < MATRIX_DIM) { Real sum = 0.f; for (int i = 0; i < MATRIX_DIM; i++) sum += A[row * MATRIX_DIM + i] * B[i * MATRIX_DIM + col]; C[row * MATRIX_DIM + col] = sum; } } /** * Initializes the given matrix to a set of float/Double values between 1-2 */ void init_matrix(Real matrix[MATRIX_DIM][MATRIX_DIM]) { for(int i=0; i < MATRIX_DIM; i++) { for(int j=0; j < MATRIX_DIM; j++) { matrix[i][j] = 1 + (Real)rand()/(Real)RAND_MAX; } } } /** * Prints the given matrix to the stdout */ void print_matrix(Real matrix[MATRIX_DIM][MATRIX_DIM]) { for(int i = 0; i < MATRIX_DIM; i++) { printf("["); for(int j = 0; j < MATRIX_DIM; j++) { #ifdef DP printf("%20.18f ", matrix[i][j]); #else printf("%f ", matrix[i][j]); #endif } printf("] \n"); } printf("\n"); } /** * Compares the given two matrices. */ void compare_matrices(Real matrix1[MATRIX_DIM][MATRIX_DIM],\ Real matrix2[MATRIX_DIM][MATRIX_DIM]) { for(int i = 0; i < MATRIX_DIM; i++) { for(int j = 0; j < MATRIX_DIM; j++) { if((matrix1[i][j] - matrix2[i][j] > MIN_ERROR) && (matrix1[i][j] - matrix2[i][j] > 0)) { printf("Error i=%d : j=%d mat1=%f mat2=%f\n",i,j,\ matrix1[i][j], matrix2[i][j]); return; } } } printf("Matrices Match! \n"); } /** * carries out a serial matrix multiplication */ void serial_mat_mul(Real A[MATRIX_DIM][MATRIX_DIM], \ Real B[MATRIX_DIM][MATRIX_DIM], Real C[MATRIX_DIM][MATRIX_DIM]) { float sum; for (int row=0; row<MATRIX_DIM; row++){ for (int col=0; col<MATRIX_DIM; col++){ sum = 0.f; for (int n=0; n<MATRIX_DIM; n++){ sum += A[row][n]*B[n][col]; } C[row][col] = sum; } } } /** * Shows the usage of the program. */ void print_usage(){ printf("Wrong usage!\n"); } /** * Does a matrix multiplication using the "tiled" approach in the GPU * @param A First matrix * @param B Second matrix * @param C Results matrix */ __global__ void cuda_tiled_mat_mul(Real * A, Real * B, Real * C) { float CValue = 0; int Row = blockIdx.y*BLOCK_SIZE + threadIdx.y; int Col = blockIdx.x*BLOCK_SIZE + threadIdx.x; __shared__ Real As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ Real Bs[BLOCK_SIZE][BLOCK_SIZE]; for (int k = 0; k < (BLOCK_SIZE + MATRIX_DIM - 1)/BLOCK_SIZE; k++) { // check ranges for the matrices and check for left out parts where // MATRIX_DIM is not an exact multiplication of tile size(BLOCK_SIZE) if (k*BLOCK_SIZE + threadIdx.x < MATRIX_DIM && Row < MATRIX_DIM){ As[threadIdx.y][threadIdx.x] = A[Row*MATRIX_DIM + \ k*BLOCK_SIZE + threadIdx.x]; } else{ As[threadIdx.y][threadIdx.x] = 0.0; } if (k*BLOCK_SIZE + threadIdx.y < MATRIX_DIM && Col < MATRIX_DIM){ Bs[threadIdx.y][threadIdx.x] = B[(k*BLOCK_SIZE + \ threadIdx.y)*MATRIX_DIM + Col]; } else{ Bs[threadIdx.y][threadIdx.x] = 0.0; } // Wait till all the threads finish before calculating the results __syncthreads(); for (int n = 0; n < BLOCK_SIZE; ++n) CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; __syncthreads(); } // Calculate the result if (Row < MATRIX_DIM && Col < MATRIX_DIM) C[((blockIdx.y * blockDim.y + threadIdx.y)*MATRIX_DIM)+\ (blockIdx.x*blockDim.x)+threadIdx.x]=CValue; } //struct for parameter passing between pthread calls struct pthread_arg_struct { int tid; int total_threads; Real (*A)[MATRIX_DIM]; Real (*B)[MATRIX_DIM]; Real (*C)[MATRIX_DIM]; }; /** * PThread code for assigning tasks to pthreads * @param arguments an instance of pthread_arg_struct */ void* pthread_mat_mul(void* arguments) { struct pthread_arg_struct *args = (struct pthread_arg_struct *)arguments; int total_threads = args -> total_threads; int tid = args -> tid; //obtain the value of thread id Real (*A)[MATRIX_DIM]=args -> A; Real (*B)[MATRIX_DIM]=args -> B; // get the workload for one thread int chunk_size=MATRIX_DIM/total_threads; // check for the row ranges the thread needs to calculate int min_row = chunk_size * tid; int max_row = (min_row+chunk_size-1) < MATRIX_DIM ? (min_row+chunk_size-1) : MATRIX_DIM; float sum=0.f; // loop the matrix entries that belongs to this thread for(;min_row<=max_row;min_row++){ for(int col=0;col<MATRIX_DIM;col++){ for (int n=0; n<MATRIX_DIM; n++){ sum += A[min_row][n]*B[n][col]; } args->C[min_row][col] = sum; sum=0; } } pthread_exit((void*)0); } int main(int argc, char const *argv[]) { if(argc<2){ print_usage(); } struct timespec t1, t2; long sec, nsec; float comp_time; // in milli seconds // Initialize the random seed srand(time(NULL)); // Create the matrices static Real A[MATRIX_DIM][MATRIX_DIM]; static Real B[MATRIX_DIM][MATRIX_DIM]; static Real C[MATRIX_DIM][MATRIX_DIM]; static Real serial_C[MATRIX_DIM][MATRIX_DIM]; // Initialize the matrices init_matrix(A); init_matrix(B); // print_matrix(A); // print_matrix(B); if (0 == strcmp(argv[1], "-s")) { GET_TIME(t1); printf("serial mode\n\n"); // get the serial output serial_mat_mul(A,B,serial_C); GET_TIME(t2); comp_time = elapsed_time_msec(&t1, &t2, &sec, &nsec); printf("N=%d: CPU Time(ms)=%.2f \n", MATRIX_DIM, comp_time); } else if (0 == strcmp(argv[1], "-p")) { printf("pthread mode\n\n"); int num_of_threads; // check whether the given # of threads is valid if(argc <3){ print_usage(); return -1; } num_of_threads=atoi(argv[2]); if(num_of_threads>MAX_PTHREADS){ printf("[ERROR-PTHREADS] - Only up to 8 threads can be created\n"); return -1; } pthread_t threads[num_of_threads]; int rc; long t; void *status; GET_TIME(t1); //initialize the threads for(t=0;t<num_of_threads;t++){ struct pthread_arg_struct* args=(\ struct pthread_arg_struct*)malloc(sizeof *args); args->total_threads=num_of_threads; args->tid=t; args-> A=A; args-> B=B; args-> C=C; rc = pthread_create(&threads[t], NULL, pthread_mat_mul,(void *)args); if (rc){ printf("ERROR; return code from pthread_create() is %d\n", rc); exit(-1); } } //join the threads for(t=0;t<num_of_threads;t++){ pthread_join(threads[t], &status); } GET_TIME(t2); comp_time = elapsed_time_msec(&t1, &t2, &sec, &nsec); printf("N=%d: PThreads(%d threads) Time(ms)=%.2f \n", MATRIX_DIM,num_of_threads, comp_time); // if verification is needed if((argc ==4) && (0 == strcmp(argv[3], "-v"))){ GET_TIME(t1); // get the serial output serial_mat_mul(A,B,serial_C); GET_TIME(t1); comp_time = elapsed_time_msec(&t1, &t2, &sec, &nsec); printf("N=%d: CPU Time(ms)=%.2f \n", MATRIX_DIM, comp_time); // print_matrix(serial_C); // print_matrix(C); // Compare the reuslts compare_matrices(serial_C,C); } } else if (0 == strcmp(argv[1], "-c")) { long matrix_size=MATRIX_DIM*MATRIX_DIM*sizeof(Real); // printf("%ld\n",matrix_size ); GET_TIME(t1); Real* _A; gpuErrchk(cudaMalloc((void**) &_A, matrix_size)); // printStats(); Real* _B; gpuErrchk(cudaMalloc((void**) &_B, matrix_size)); // printStats(); Real* _C; gpuErrchk(cudaMalloc((void**) &_C, matrix_size)); // printStats(); // copy the matrices to device cudaMemcpy(_A, A, matrix_size, cudaMemcpyHostToDevice); cudaMemcpy(_B, B, matrix_size, cudaMemcpyHostToDevice); // If the tiled mode needs to be enabled if (argc>2 && 0 == strcmp(argv[2], "-t")){ printf("cuda tiled mode\n"); // set the grid and block sizes dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 dimGrid; dimGrid.x = (MATRIX_DIM + dimBlock.x - 1)/dimBlock.x; dimGrid.y = (MATRIX_DIM + dimBlock.y - 1)/dimBlock.y; // GET_TIME(t1); // execute the workload in the GPU cuda_tiled_mat_mul<<<dimGrid , dimBlock>>>(_A,_B,_C); // Copy back the result cudaMemcpy(C,_C,matrix_size,cudaMemcpyDeviceToHost); GET_TIME(t2); comp_time = elapsed_time_msec(&t1, &t2, &sec, &nsec); printf("N=%d: CUDA Time(ms)=%.2f \n", MATRIX_DIM, comp_time); // if verification is needed if((argc ==4) && (0 == strcmp(argv[3], "-v"))){ GET_TIME(t1); // get the serial output serial_mat_mul(A,B,serial_C); GET_TIME(t2); comp_time = elapsed_time_msec(&t1, &t2, &sec, &nsec); printf("N=%d: CPU Time(ms)=%.2f \n", MATRIX_DIM, comp_time); // print_matrix(serial_C); // print_matrix(C); // Compare the reuslts compare_matrices(serial_C,C); } // free device memory cudaFree(_A); cudaFree(_B); cudaFree(_C); } else{ printf("cuda mode\n"); int K=100; dim3 threadBlock(BLOCK_SIZE,BLOCK_SIZE); dim3 grid(K,K); // GET_TIME(t1); // call the GPU cuda_simple_mat_mul<<<grid,threadBlock>>>(_A,_B,_C); // Copy back the result cudaMemcpy(C,_C,matrix_size,cudaMemcpyDeviceToHost); GET_TIME(t2); comp_time = elapsed_time_msec(&t1, &t2, &sec, &nsec); printf("N=%d: CUDA Time(ms)=%.2f \n", MATRIX_DIM, comp_time); // if verification is needed if((argc ==3) && (0 == strcmp(argv[2], "-v"))){ GET_TIME(t1); // get the serial output serial_mat_mul(A,B,serial_C); GET_TIME(t2); comp_time = elapsed_time_msec(&t1, &t2, &sec, &nsec); printf("N=%d: CPU Time(ms)=%.2f \n", MATRIX_DIM, comp_time); // print_matrix(serial_C); // print_matrix(C); compare_matrices(serial_C,C); } // free device memory cudaFree(_A); cudaFree(_B); cudaFree(_C); } } else{ print_usage(); } return 0; }
a6949e60b7e1f93356e816a8a6ad60608b3497d6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <assert.h> #include <iostream> #include <random> #include "nodeaggregator.hh" //#include "csr_graph.cu" //#include "csr_graph.h" #include "nn_exception.hh" #include <cusparse_v2.h> __global__ void print_kernel_agg(float *A, int size, std::string str) { for(int i=1433; i<1433+size; i++) { if(A[i] != 0.0) { printf("The value of %s[%d, %d] = %f\n", str, 1, i-1433*1, A[i]); } } for(int i=1433*344; i<344*1433+size; i++) { if(A[i] != 0.0) { printf("The value of %s[%d, %d] = %f\n", str, 344, i-1433*344, A[i]); } } } /* __global__ void print_kernel_agg(float* A, int size, std::string str) { printf("The arr is %s\n", str); for(int i=1433; i<(1433+size); i=i+1) { if(A[i] != 0.0) { printf("The val of [1, %d] = %f\n", i-1433, A[i]); } } for(int i=344*1433; i<344*1433+size; i=i+1) { if(A[i] != 0.0) { printf("The val of [344, %d] = %f\n", i-1433*344, A[i]); } } } */ __global__ void agg(float* nnz_data, int* row, int* col, float* d_B, float* d_C, int FV_size, int m, int nnz) { float val; int start_dst_ind; int num_dst; int var = 0; //for (int index = blockIdx.x * FV_size + threadIdx.x; index < (blockIdx.x + 1) * FV_size; index = index + blockDim.x) { // start_dst_ind = row[blockIdx.x]; // num_dst = row[(blockIdx.x) + 1]; // val = 0; // for (int i = start_dst_ind; i < num_dst; i=i+1) { // val += d_B[col[i]*FV_size +blockDim.x*var + threadIdx.x]*nnz_data[i]; // //if(blockIdx.x == 1) { // // printf("val = % // //} // } // d_C[index] = val; // var++; //} int index = blockIdx.x * blockDim.x + threadIdx.x; int row_start_node = index/FV_size; start_dst_ind = row[row_start_node]; num_dst = row[row_start_node + 1]; for (int i = start_dst_ind; i < num_dst; i= i +1) { val += d_B[col[i]*FV_size + (index%FV_size)]*nnz_data[i]; } d_C[index] = val; } void NodeAggregator::node_SpMM(float* nnz_data, int* row, int* col, float* d_B, float* d_C, int FV_size, int m, int nnz) { int n = FV_size; hipsparseHandle_t cusparse1 = NULL; hipsparseMatDescr_t descrA ; hipsparseCreateMatDescr(&descrA); hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ZERO); const float alp = 1; const float bet = 0; const float* alpha = &alp; const float* beta = &bet; hipsparseStatus_t result; hipsparseCreate(&cusparse1); //A : mxn, B: mxm, C: mxn result = cusparseSgemmi(cusparse1, n, m , m, nnz, alpha, d_B, n, nnz_data,row, col, beta, d_C, n); if(result != HIPSPARSE_STATUS_SUCCESS) { printf("Cusparse failed\n"); } return; } Matrix& NodeAggregator::forward(Matrix& A,bool training,bool freeMatrix){ //std::cout<<"Nodeagg forward\n"; //std::cout << "A:" << A.data_device << "\n"; //this->A = A; //std::cout << "A:" << A.data_device << "\n"; //std::cout << "this.A" << this->A.data_device << "\n"; Z.allocateCuda(A.shape); //Z = A; /* dim3 block_size(256); dim3 num_of_blocks((A.shape.x*A.shape.y + block_size.x - 1) / block_size.x); //print_kernel_agg<<<1,1>>>(A.data_device, 20, "A - agg in"); agg<<<num_of_blocks,block_size>>>(nnz_data, row, col, A.data_device, Z.data_device, A.shape.y, nodes, nnz); hipDeviceSynchronize(); //print_kernel_agg<<<1,1>>>(Z.data_device, 20, "Z - agg out"); */ //print_kernel_agg<<<1,1>>>(A.data_device,50, "agg - in - agg layer"); node_SpMM(nnz_data, row, col, A.data_device, Z.data_device, A.shape.y, nodes, nnz); //print_kernel_agg<<<1,1>>>(Z.data_device, 50, "agg - out - agg layer"); //std::cout << " NodeAgg forward shape.x:" << Z.shape.x << "\n"; // std::cout << " NodeAgg forward shape.y:" << Z.shape.y << "\n"; NNException::throwIfDeviceErrorOccurred("Error found in NN AGG forward"); if(freeMatrix) A.freeMem(); //std::cout<<"Nodeagg ptr:" << Z.data_device << "\n"; return Z; } Matrix& NodeAggregator::backprop(Matrix& dZ, float learning_rate, bool freeMatrix) { this->dZ = dZ; //std::cout<<"Nodeagg backward\n"; // dA.allocateCuda(dZ.shape); //dA = dZ; //std::cout<<"Nodeagg backward\n"; // std::cout<<"dA.Shape.x:" << dA.shape.x << "\n"; // std::cout<<"dA.Shape.x:" << dA.shape.y << "\n"; /*dim3 block_size(256); dim3 num_of_blocks((dZ.shape.x*dZ.shape.y + block_size.x - 1) / block_size.x); agg<<<num_of_blocks,block_size>>>(nnz_data, row, col, dZ.data_device, dA.data_device, dZ.shape.y, nodes, nnz); */ node_SpMM(nnz_data, row, col, dZ.data_device, dA.data_device, dZ.shape.y, nodes, nnz); // std::cout << " NodeAgg backward shape.x:" << dA.shape.x << "\n"; // std::cout << " NodeAgg backward shape.y:" << dA.shape.y << "\n"; // NNException::throwIfDeviceErrorOccurred("Error found in NN Agg backward 1"); dZ.freeMem(); // NNException::throwIfDeviceErrorOccurred("Error found in NN Agg backward 2"); return dA; } //nn.addLayer(new NodeAggregator("nodeagg1", d_edge_data, d_row_start, d_edge_dst, 2708, nnz)); NodeAggregator::NodeAggregator(std::string name, float* nnz_data, int* row, int*col, int nodes, int nnz, Shape dA_shape): dA(dA_shape) { this->name = name; this->nnz_data = nnz_data; this->row = row; this->col = col; this->nodes = nodes; this->nnz = nnz; dA.allocateMemory(); } NodeAggregator::~NodeAggregator() { } void NodeAggregator::free_matrix() { dA.freeMem(); }
a6949e60b7e1f93356e816a8a6ad60608b3497d6.cu
#include <stdlib.h> #include <assert.h> #include <iostream> #include <random> #include "nodeaggregator.hh" //#include "csr_graph.cu" //#include "csr_graph.h" #include "nn_exception.hh" #include <cusparse_v2.h> __global__ void print_kernel_agg(float *A, int size, std::string str) { for(int i=1433; i<1433+size; i++) { if(A[i] != 0.0) { printf("The value of %s[%d, %d] = %f\n", str, 1, i-1433*1, A[i]); } } for(int i=1433*344; i<344*1433+size; i++) { if(A[i] != 0.0) { printf("The value of %s[%d, %d] = %f\n", str, 344, i-1433*344, A[i]); } } } /* __global__ void print_kernel_agg(float* A, int size, std::string str) { printf("The arr is %s\n", str); for(int i=1433; i<(1433+size); i=i+1) { if(A[i] != 0.0) { printf("The val of [1, %d] = %f\n", i-1433, A[i]); } } for(int i=344*1433; i<344*1433+size; i=i+1) { if(A[i] != 0.0) { printf("The val of [344, %d] = %f\n", i-1433*344, A[i]); } } } */ __global__ void agg(float* nnz_data, int* row, int* col, float* d_B, float* d_C, int FV_size, int m, int nnz) { float val; int start_dst_ind; int num_dst; int var = 0; //for (int index = blockIdx.x * FV_size + threadIdx.x; index < (blockIdx.x + 1) * FV_size; index = index + blockDim.x) { // start_dst_ind = row[blockIdx.x]; // num_dst = row[(blockIdx.x) + 1]; // val = 0; // for (int i = start_dst_ind; i < num_dst; i=i+1) { // val += d_B[col[i]*FV_size +blockDim.x*var + threadIdx.x]*nnz_data[i]; // //if(blockIdx.x == 1) { // // printf("val = % // //} // } // d_C[index] = val; // var++; //} int index = blockIdx.x * blockDim.x + threadIdx.x; int row_start_node = index/FV_size; start_dst_ind = row[row_start_node]; num_dst = row[row_start_node + 1]; for (int i = start_dst_ind; i < num_dst; i= i +1) { val += d_B[col[i]*FV_size + (index%FV_size)]*nnz_data[i]; } d_C[index] = val; } void NodeAggregator::node_SpMM(float* nnz_data, int* row, int* col, float* d_B, float* d_C, int FV_size, int m, int nnz) { int n = FV_size; cusparseHandle_t cusparse1 = NULL; cusparseMatDescr_t descrA ; cusparseCreateMatDescr(&descrA); cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ZERO); const float alp = 1; const float bet = 0; const float* alpha = &alp; const float* beta = &bet; cusparseStatus_t result; cusparseCreate(&cusparse1); //A : mxn, B: mxm, C: mxn result = cusparseSgemmi(cusparse1, n, m , m, nnz, alpha, d_B, n, nnz_data,row, col, beta, d_C, n); if(result != CUSPARSE_STATUS_SUCCESS) { printf("Cusparse failed\n"); } return; } Matrix& NodeAggregator::forward(Matrix& A,bool training,bool freeMatrix){ //std::cout<<"Nodeagg forward\n"; //std::cout << "A:" << A.data_device << "\n"; //this->A = A; //std::cout << "A:" << A.data_device << "\n"; //std::cout << "this.A" << this->A.data_device << "\n"; Z.allocateCuda(A.shape); //Z = A; /* dim3 block_size(256); dim3 num_of_blocks((A.shape.x*A.shape.y + block_size.x - 1) / block_size.x); //print_kernel_agg<<<1,1>>>(A.data_device, 20, "A - agg in"); agg<<<num_of_blocks,block_size>>>(nnz_data, row, col, A.data_device, Z.data_device, A.shape.y, nodes, nnz); cudaDeviceSynchronize(); //print_kernel_agg<<<1,1>>>(Z.data_device, 20, "Z - agg out"); */ //print_kernel_agg<<<1,1>>>(A.data_device,50, "agg - in - agg layer"); node_SpMM(nnz_data, row, col, A.data_device, Z.data_device, A.shape.y, nodes, nnz); //print_kernel_agg<<<1,1>>>(Z.data_device, 50, "agg - out - agg layer"); //std::cout << " NodeAgg forward shape.x:" << Z.shape.x << "\n"; // std::cout << " NodeAgg forward shape.y:" << Z.shape.y << "\n"; NNException::throwIfDeviceErrorOccurred("Error found in NN AGG forward"); if(freeMatrix) A.freeMem(); //std::cout<<"Nodeagg ptr:" << Z.data_device << "\n"; return Z; } Matrix& NodeAggregator::backprop(Matrix& dZ, float learning_rate, bool freeMatrix) { this->dZ = dZ; //std::cout<<"Nodeagg backward\n"; // dA.allocateCuda(dZ.shape); //dA = dZ; //std::cout<<"Nodeagg backward\n"; // std::cout<<"dA.Shape.x:" << dA.shape.x << "\n"; // std::cout<<"dA.Shape.x:" << dA.shape.y << "\n"; /*dim3 block_size(256); dim3 num_of_blocks((dZ.shape.x*dZ.shape.y + block_size.x - 1) / block_size.x); agg<<<num_of_blocks,block_size>>>(nnz_data, row, col, dZ.data_device, dA.data_device, dZ.shape.y, nodes, nnz); */ node_SpMM(nnz_data, row, col, dZ.data_device, dA.data_device, dZ.shape.y, nodes, nnz); // std::cout << " NodeAgg backward shape.x:" << dA.shape.x << "\n"; // std::cout << " NodeAgg backward shape.y:" << dA.shape.y << "\n"; // NNException::throwIfDeviceErrorOccurred("Error found in NN Agg backward 1"); dZ.freeMem(); // NNException::throwIfDeviceErrorOccurred("Error found in NN Agg backward 2"); return dA; } //nn.addLayer(new NodeAggregator("nodeagg1", d_edge_data, d_row_start, d_edge_dst, 2708, nnz)); NodeAggregator::NodeAggregator(std::string name, float* nnz_data, int* row, int*col, int nodes, int nnz, Shape dA_shape): dA(dA_shape) { this->name = name; this->nnz_data = nnz_data; this->row = row; this->col = col; this->nodes = nodes; this->nnz = nnz; dA.allocateMemory(); } NodeAggregator::~NodeAggregator() { } void NodeAggregator::free_matrix() { dA.freeMem(); }
27a532840248b8ad1eab3215f47c492014bf7d21.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2021 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 with the LLVM exception * (the "License"); you may not use this file except in compliance with * the License. * * You may obtain a copy of the License at * * http://llvm.org/foundation/relicensing/LICENSE.txt * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <nvbench/nvbench.cuh> // Grab some testing kernels from NVBench: #include <nvbench/test_kernels.cuh> // Thrust vectors simplify memory management: #include <thrust/device_vector.h> // std::enable_if_t #include <type_traits> //============================================================================== // `runtime_skip` demonstrates how to skip benchmarks at runtime. // // Two parameter axes are swept (see axes.cu), but some configurations are // skipped by calling `state.skip` with a skip reason string. This reason // is printed to the log and captured in JSON output. void runtime_skip(nvbench::state &state) { const auto duration = state.get_float64("Duration"); const auto kramble = state.get_string("Kramble"); // Skip Baz benchmarks with < 0.8 ms duration. if (kramble == "Baz" && duration < 0.8e-3) { state.skip("Short 'Baz' benchmarks are skipped."); return; } // Skip Foo benchmarks with > 0.3 ms duration. if (kramble == "Foo" && duration > 0.3e-3) { state.skip("Long 'Foo' benchmarks are skipped."); return; } // Run all others: state.exec([duration](nvbench::launch &launch) { hipLaunchKernelGGL(( nvbench::sleep_kernel), dim3(1), dim3(1), 0, launch.get_stream(), duration); }); } NVBENCH_BENCH(runtime_skip) // 0, 0.25, 0.5, 0.75, and 1.0 milliseconds .add_float64_axis("Duration", nvbench::range(0., 1.1e-3, // .1e-3 slop for fp precision 0.25e-3)) .add_string_axis("Kramble", {"Foo", "Bar", "Baz"}); //============================================================================== // `skip_overload` demonstrates how to skip benchmarks at compile-time via // overload resolution. // // Two type axes are swept, but configurations where InputType == OutputType are // skipped. template <typename InputType, typename OutputType> void skip_overload(nvbench::state &state, nvbench::type_list<InputType, OutputType>) { // This is a contrived example that focuses on the skip overloads, so this is // just a sleep kernel: state.exec([](nvbench::launch &launch) { hipLaunchKernelGGL(( nvbench::sleep_kernel), dim3(1), dim3(1), 0, launch.get_stream(), 1e-3); }); } // Overload of skip_overload that is called when InputType == OutputType. template <typename T> void skip_overload(nvbench::state &state, nvbench::type_list<T, T>) { state.skip("InputType == OutputType."); } // The same type_list is used for both inputs/outputs. using sst_types = nvbench::type_list<nvbench::int32_t, nvbench::int64_t>; // Setup benchmark: NVBENCH_BENCH_TYPES(skip_overload, NVBENCH_TYPE_AXES(sst_types, sst_types)) .set_type_axes_names({"In", "Out"}); //============================================================================== // `skip_sfinae` demonstrates how to skip benchmarks at compile-time using // SFINAE to handle more complex skip conditions. // // Two type axes are swept, but configurations where sizeof(InputType) > // sizeof(OutputType) are skipped. // Enable this overload if InputType is not larger than OutputType template <typename InputType, typename OutputType> std::enable_if_t<(sizeof(InputType) <= sizeof(OutputType)), void> skip_sfinae(nvbench::state &state, nvbench::type_list<InputType, OutputType>) { // This is a contrived example that focuses on the skip overloads, so this is // just a sleep kernel: state.exec([](nvbench::launch &launch) { hipLaunchKernelGGL(( nvbench::sleep_kernel), dim3(1), dim3(1), 0, launch.get_stream(), 1e-3); }); } // Enable this overload if InputType is larger than OutputType template <typename InputType, typename OutputType> std::enable_if_t<(sizeof(InputType) > sizeof(OutputType)), void> skip_sfinae(nvbench::state &state, nvbench::type_list<InputType, OutputType>) { state.skip("sizeof(InputType) > sizeof(OutputType)."); } // The same type_list is used for both inputs/outputs. using sn_types = nvbench::type_list<nvbench::int8_t, nvbench::int16_t, nvbench::int32_t, nvbench::int64_t>; // Setup benchmark: NVBENCH_BENCH_TYPES(skip_sfinae, NVBENCH_TYPE_AXES(sn_types, sn_types)) .set_type_axes_names({"In", "Out"});
27a532840248b8ad1eab3215f47c492014bf7d21.cu
/* * Copyright 2021 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 with the LLVM exception * (the "License"); you may not use this file except in compliance with * the License. * * You may obtain a copy of the License at * * http://llvm.org/foundation/relicensing/LICENSE.txt * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <nvbench/nvbench.cuh> // Grab some testing kernels from NVBench: #include <nvbench/test_kernels.cuh> // Thrust vectors simplify memory management: #include <thrust/device_vector.h> // std::enable_if_t #include <type_traits> //============================================================================== // `runtime_skip` demonstrates how to skip benchmarks at runtime. // // Two parameter axes are swept (see axes.cu), but some configurations are // skipped by calling `state.skip` with a skip reason string. This reason // is printed to the log and captured in JSON output. void runtime_skip(nvbench::state &state) { const auto duration = state.get_float64("Duration"); const auto kramble = state.get_string("Kramble"); // Skip Baz benchmarks with < 0.8 ms duration. if (kramble == "Baz" && duration < 0.8e-3) { state.skip("Short 'Baz' benchmarks are skipped."); return; } // Skip Foo benchmarks with > 0.3 ms duration. if (kramble == "Foo" && duration > 0.3e-3) { state.skip("Long 'Foo' benchmarks are skipped."); return; } // Run all others: state.exec([duration](nvbench::launch &launch) { nvbench::sleep_kernel<<<1, 1, 0, launch.get_stream()>>>(duration); }); } NVBENCH_BENCH(runtime_skip) // 0, 0.25, 0.5, 0.75, and 1.0 milliseconds .add_float64_axis("Duration", nvbench::range(0., 1.1e-3, // .1e-3 slop for fp precision 0.25e-3)) .add_string_axis("Kramble", {"Foo", "Bar", "Baz"}); //============================================================================== // `skip_overload` demonstrates how to skip benchmarks at compile-time via // overload resolution. // // Two type axes are swept, but configurations where InputType == OutputType are // skipped. template <typename InputType, typename OutputType> void skip_overload(nvbench::state &state, nvbench::type_list<InputType, OutputType>) { // This is a contrived example that focuses on the skip overloads, so this is // just a sleep kernel: state.exec([](nvbench::launch &launch) { nvbench::sleep_kernel<<<1, 1, 0, launch.get_stream()>>>(1e-3); }); } // Overload of skip_overload that is called when InputType == OutputType. template <typename T> void skip_overload(nvbench::state &state, nvbench::type_list<T, T>) { state.skip("InputType == OutputType."); } // The same type_list is used for both inputs/outputs. using sst_types = nvbench::type_list<nvbench::int32_t, nvbench::int64_t>; // Setup benchmark: NVBENCH_BENCH_TYPES(skip_overload, NVBENCH_TYPE_AXES(sst_types, sst_types)) .set_type_axes_names({"In", "Out"}); //============================================================================== // `skip_sfinae` demonstrates how to skip benchmarks at compile-time using // SFINAE to handle more complex skip conditions. // // Two type axes are swept, but configurations where sizeof(InputType) > // sizeof(OutputType) are skipped. // Enable this overload if InputType is not larger than OutputType template <typename InputType, typename OutputType> std::enable_if_t<(sizeof(InputType) <= sizeof(OutputType)), void> skip_sfinae(nvbench::state &state, nvbench::type_list<InputType, OutputType>) { // This is a contrived example that focuses on the skip overloads, so this is // just a sleep kernel: state.exec([](nvbench::launch &launch) { nvbench::sleep_kernel<<<1, 1, 0, launch.get_stream()>>>(1e-3); }); } // Enable this overload if InputType is larger than OutputType template <typename InputType, typename OutputType> std::enable_if_t<(sizeof(InputType) > sizeof(OutputType)), void> skip_sfinae(nvbench::state &state, nvbench::type_list<InputType, OutputType>) { state.skip("sizeof(InputType) > sizeof(OutputType)."); } // The same type_list is used for both inputs/outputs. using sn_types = nvbench::type_list<nvbench::int8_t, nvbench::int16_t, nvbench::int32_t, nvbench::int64_t>; // Setup benchmark: NVBENCH_BENCH_TYPES(skip_sfinae, NVBENCH_TYPE_AXES(sn_types, sn_types)) .set_type_axes_names({"In", "Out"});
c6a792c71703bd773aa2dc3828b39a80943ed124.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "common/book.h" #define N 10 __global__ void vectorAddGPU(float *a, float *b, float *c){ int tid = threadIdx.x; if(tid < N){ c[tid] = a[tid] + b[tid]; } } void vectorAddSerial(float *a, float *b, float *c){ int tid = 0; while(tid < N){ c[tid] = a[tid] + b[tid]; tid++; } } int main(void){ float *h_a, *h_b, *h_c; float *d_a, *d_b, *d_c; h_a = (float*) malloc(N*sizeof(float)); h_b = (float*) malloc(N*sizeof(float)); h_c = (float*) malloc(N*sizeof(float)); hipMalloc( (void**) &d_a, N*sizeof(float)); hipMalloc( (void**) &d_b, N*sizeof(float)); hipMalloc( (void**) &d_c, N*sizeof(float)); for(int i = 0; i < N; i++){ h_a[i] = (float)-i; h_b[i] = (float)(i*i); } hipMemcpy(d_a, h_a, N * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, N * sizeof(float), hipMemcpyHostToDevice); //vectorAddSerial(a,b,c); hipLaunchKernelGGL(( vectorAddGPU), dim3(1), dim3(N), 0, 0, d_a, d_b, d_c); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("%s \n", hipGetErrorString(err)); } hipDeviceSynchronize(); hipMemcpy(h_c, d_c, N*sizeof(float), hipMemcpyDeviceToHost); for (int i = 0; i < N; i++) { printf("%f + %f = %f \n", h_a[i], h_b[i], h_c[i]); } hipFree(d_a); hipFree(d_b); hipFree(d_c); free(h_a); free(h_b); free(h_c); return 0; }
c6a792c71703bd773aa2dc3828b39a80943ed124.cu
#include <stdio.h> #include "common/book.h" #define N 10 __global__ void vectorAddGPU(float *a, float *b, float *c){ int tid = threadIdx.x; if(tid < N){ c[tid] = a[tid] + b[tid]; } } void vectorAddSerial(float *a, float *b, float *c){ int tid = 0; while(tid < N){ c[tid] = a[tid] + b[tid]; tid++; } } int main(void){ float *h_a, *h_b, *h_c; float *d_a, *d_b, *d_c; h_a = (float*) malloc(N*sizeof(float)); h_b = (float*) malloc(N*sizeof(float)); h_c = (float*) malloc(N*sizeof(float)); cudaMalloc( (void**) &d_a, N*sizeof(float)); cudaMalloc( (void**) &d_b, N*sizeof(float)); cudaMalloc( (void**) &d_c, N*sizeof(float)); for(int i = 0; i < N; i++){ h_a[i] = (float)-i; h_b[i] = (float)(i*i); } cudaMemcpy(d_a, h_a, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, N * sizeof(float), cudaMemcpyHostToDevice); //vectorAddSerial(a,b,c); vectorAddGPU<<<1, N>>>(d_a, d_b, d_c); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("%s \n", cudaGetErrorString(err)); } cudaThreadSynchronize(); cudaMemcpy(h_c, d_c, N*sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < N; i++) { printf("%f + %f = %f \n", h_a[i], h_b[i], h_c[i]); } cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(h_a); free(h_b); free(h_c); return 0; }
e2da4ddb315408cd7ced631614170abd129a367f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <float.h> #include <math.h> #include <time.h> #include "../../constants.h" #define N_RADIUS 4 #define N_THREADS_PLANE_DIM 8 #define N_THREADS_THIRD_DIM 8 #define N_THREADS_PML_I_DIM 9 #define N_THREADS_PML_J_DIM 9 #define N_THREADS_PML_K_DIM 9 __global__ void target_inner_3d_kernel( llint nx, llint ny, llint nz, llint x3, llint x4, llint y3, llint y4, llint z3, llint z4, llint lx, llint ly, llint lz, float hdx_2, float hdy_2, float hdz_2, float coef0, float coefx_1, float coefx_2, float coefx_3, float coefx_4, float coefy_1, float coefy_2, float coefy_3, float coefy_4, float coefz_1, float coefz_2, float coefz_3, float coefz_4, const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp, const float *__restrict__ phi, const float *__restrict__ eta ) { const llint k0 = z3 + blockIdx.x * blockDim.x; const llint j0 = y3 + blockIdx.y * blockDim.y; const llint i0 = x3 + blockIdx.z * blockDim.z; const llint i = i0 + threadIdx.z; const llint j = j0 + threadIdx.y; const llint k = k0 + threadIdx.x; if (i > x4-1 || j > y4-1 || k > z4-1) { return; } float lap = __fmaf_rn(coef0, u[IDX3_l(i,j,k)] , __fmaf_rn(coefx_1, __fadd_rn(u[IDX3_l(i+1,j,k)],u[IDX3_l(i-1,j,k)]) , __fmaf_rn(coefy_1, __fadd_rn(u[IDX3_l(i,j+1,k)],u[IDX3_l(i,j-1,k)]) , __fmaf_rn(coefz_1, __fadd_rn(u[IDX3_l(i,j,k+1)],u[IDX3_l(i,j,k-1)]) , __fmaf_rn(coefx_2, __fadd_rn(u[IDX3_l(i+2,j,k)],u[IDX3_l(i-2,j,k)]) , __fmaf_rn(coefy_2, __fadd_rn(u[IDX3_l(i,j+2,k)],u[IDX3_l(i,j-2,k)]) , __fmaf_rn(coefz_2, __fadd_rn(u[IDX3_l(i,j,k+2)],u[IDX3_l(i,j,k-2)]) , __fmaf_rn(coefx_3, __fadd_rn(u[IDX3_l(i+3,j,k)],u[IDX3_l(i-3,j,k)]) , __fmaf_rn(coefy_3, __fadd_rn(u[IDX3_l(i,j+3,k)],u[IDX3_l(i,j-3,k)]) , __fmaf_rn(coefz_3, __fadd_rn(u[IDX3_l(i,j,k+3)],u[IDX3_l(i,j,k-3)]) , __fmaf_rn(coefx_4, __fadd_rn(u[IDX3_l(i+4,j,k)],u[IDX3_l(i-4,j,k)]) , __fmaf_rn(coefy_4, __fadd_rn(u[IDX3_l(i,j+4,k)],u[IDX3_l(i,j-4,k)]) , __fmul_rn(coefz_4, __fadd_rn(u[IDX3_l(i,j,k+4)],u[IDX3_l(i,j,k-4)]) ))))))))))))); v[IDX3_l(i,j,k)] = __fmaf_rn(2.f, u[IDX3_l(i,j,k)], __fmaf_rn(vp[IDX3(i,j,k)], lap, -v[IDX3_l(i,j,k)]) ); } __global__ void target_pml_3d_kernel( llint nx, llint ny, llint nz, llint x3, llint x4, llint y3, llint y4, llint z3, llint z4, llint lx, llint ly, llint lz, float hdx_2, float hdy_2, float hdz_2, float coef0, float coefx_1, float coefx_2, float coefx_3, float coefx_4, float coefy_1, float coefy_2, float coefy_3, float coefy_4, float coefz_1, float coefz_2, float coefz_3, float coefz_4, const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp, float *__restrict__ phi, const float *__restrict__ eta ) { const llint k0 = z3 + blockIdx.x * blockDim.x; const llint j0 = y3 + blockIdx.y * blockDim.y; const llint i0 = x3 + blockIdx.z * blockDim.z; const llint i = i0 + threadIdx.z; const llint j = j0 + threadIdx.y; const llint k = k0 + threadIdx.x; if (i > x4-1 || j > y4-1 || k > z4-1) { return; } float lap = __fmaf_rn(coef0, u[IDX3_l(i,j,k)] , __fmaf_rn(coefx_1, __fadd_rn(u[IDX3_l(i+1,j,k)],u[IDX3_l(i-1,j,k)]) , __fmaf_rn(coefy_1, __fadd_rn(u[IDX3_l(i,j+1,k)],u[IDX3_l(i,j-1,k)]) , __fmaf_rn(coefz_1, __fadd_rn(u[IDX3_l(i,j,k+1)],u[IDX3_l(i,j,k-1)]) , __fmaf_rn(coefx_2, __fadd_rn(u[IDX3_l(i+2,j,k)],u[IDX3_l(i-2,j,k)]) , __fmaf_rn(coefy_2, __fadd_rn(u[IDX3_l(i,j+2,k)],u[IDX3_l(i,j-2,k)]) , __fmaf_rn(coefz_2, __fadd_rn(u[IDX3_l(i,j,k+2)],u[IDX3_l(i,j,k-2)]) , __fmaf_rn(coefx_3, __fadd_rn(u[IDX3_l(i+3,j,k)],u[IDX3_l(i-3,j,k)]) , __fmaf_rn(coefy_3, __fadd_rn(u[IDX3_l(i,j+3,k)],u[IDX3_l(i,j-3,k)]) , __fmaf_rn(coefz_3, __fadd_rn(u[IDX3_l(i,j,k+3)],u[IDX3_l(i,j,k-3)]) , __fmaf_rn(coefx_4, __fadd_rn(u[IDX3_l(i+4,j,k)],u[IDX3_l(i-4,j,k)]) , __fmaf_rn(coefy_4, __fadd_rn(u[IDX3_l(i,j+4,k)],u[IDX3_l(i,j-4,k)]) , __fmul_rn(coefz_4, __fadd_rn(u[IDX3_l(i,j,k+4)],u[IDX3_l(i,j,k-4)]) ))))))))))))); const float s_eta_c = eta[IDX3_eta1(i,j,k)]; v[IDX3_l(i,j,k)] = __fdiv_rn( __fmaf_rn( __fmaf_rn(2.f, s_eta_c, __fsub_rn(2.f, __fmul_rn(s_eta_c, s_eta_c) ) ), u[IDX3_l(i,j,k)], __fmaf_rn( vp[IDX3(i,j,k)], __fadd_rn(lap, phi[IDX3(i,j,k)]), -v[IDX3_l(i,j,k)] ) ), __fmaf_rn(2.f, s_eta_c, 1.f) ); phi[IDX3(i,j,k)] = __fdiv_rn( __fsub_rn( phi[IDX3(i,j,k)], __fmaf_rn( __fmul_rn( __fsub_rn(eta[IDX3_eta1(i+1,j,k)], eta[IDX3_eta1(i-1,j,k)]), __fsub_rn(u[IDX3_l(i+1,j,k)], u[IDX3_l(i-1,j,k)]) ), hdx_2, __fmaf_rn( __fmul_rn( __fsub_rn(eta[IDX3_eta1(i,j+1,k)], eta[IDX3_eta1(i,j-1,k)]), __fsub_rn(u[IDX3_l(i,j+1,k)], u[IDX3_l(i,j-1,k)]) ), hdy_2, __fmul_rn( __fmul_rn( __fsub_rn(eta[IDX3_eta1(i,j,k+1)], eta[IDX3_eta1(i,j,k-1)]), __fsub_rn(u[IDX3_l(i,j,k+1)], u[IDX3_l(i,j,k-1)]) ), hdz_2) )) ) , __fadd_rn(1.f, s_eta_c) ); } __global__ void kernel_add_source_kernel(float *g_u, llint idx, float source) { g_u[idx] += source; } extern "C" void target( uint nsteps, double *time_kernel, llint nx, llint ny, llint nz, llint x1, llint x2, llint x3, llint x4, llint x5, llint x6, llint y1, llint y2, llint y3, llint y4, llint y5, llint y6, llint z1, llint z2, llint z3, llint z4, llint z5, llint z6, llint lx, llint ly, llint lz, llint sx, llint sy, llint sz, float hdx_2, float hdy_2, float hdz_2, const float *__restrict__ coefx, const float *__restrict__ coefy, const float *__restrict__ coefz, float *__restrict__ u, const float *__restrict__ v, const float *__restrict__ vp, const float *__restrict__ phi, const float *__restrict__ eta, const float *__restrict__ source ) { struct timespec start, end; const llint size_u = (nx + 2 * lx) * (ny + 2 * ly) * (nz + 2 * lz); const llint size_v = size_u; const llint size_phi = nx*ny*nz; const llint size_vp = size_phi; const llint size_eta = (nx+2)*(ny+2)*(nz+2); float *d_u, *d_v, *d_vp, *d_phi, *d_eta; hipMalloc(&d_u, sizeof(float) * size_u); hipMalloc(&d_v, sizeof(float) * size_u); hipMalloc(&d_vp, sizeof(float) * size_vp); hipMalloc(&d_phi, sizeof(float) * size_phi); hipMalloc(&d_eta, sizeof(float) * size_eta); hipMemcpy(d_u, u, sizeof(float) * size_u, hipMemcpyHostToDevice); hipMemcpy(d_v, v, sizeof(float) * size_v, hipMemcpyHostToDevice); hipMemcpy(d_vp, vp, sizeof(float) * size_vp, hipMemcpyHostToDevice); hipMemcpy(d_phi, phi, sizeof(float) * size_phi, hipMemcpyHostToDevice); hipMemcpy(d_eta, eta, sizeof(float) * size_eta, hipMemcpyHostToDevice); const llint xmin = 0; const llint xmax = nx; const llint ymin = 0; const llint ymax = ny; dim3 threadsPerBlock_inner(N_THREADS_THIRD_DIM, N_THREADS_PLANE_DIM, N_THREADS_PLANE_DIM); dim3 threadsPerBlock(N_THREADS_PML_K_DIM, N_THREADS_PML_J_DIM, N_THREADS_PML_I_DIM); int num_streams = 7; hipStream_t streams[num_streams]; for (int i = 0; i < num_streams; i++) { hipStreamCreate(&(streams[i])); } const uint npo = 100; for (uint istep = 1; istep <= nsteps; ++istep) { clock_gettime(CLOCK_REALTIME, &start); dim3 n_block_front( (z2-z1+N_THREADS_PML_K_DIM-1) / N_THREADS_PML_K_DIM, (ny+N_THREADS_PML_J_DIM-1) / N_THREADS_PML_J_DIM, (nx+N_THREADS_PML_I_DIM-1) / N_THREADS_PML_I_DIM); hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_front), dim3(threadsPerBlock), 0, streams[1], nx,ny,nz, xmin,xmax,ymin,ymax,z1,z2, lx,ly,lz, hdx_2, hdy_2, hdz_2, coefx[0]+coefy[0]+coefz[0], coefx[1], coefx[2], coefx[3], coefx[4], coefy[1], coefy[2], coefy[3], coefy[4], coefz[1], coefz[2], coefz[3], coefz[4], d_u, d_v, d_vp, d_phi, d_eta); dim3 n_block_top( (z4-z3+N_THREADS_PML_K_DIM-1) / N_THREADS_PML_K_DIM, (y2-y1+N_THREADS_PML_J_DIM-1) / N_THREADS_PML_J_DIM, (nx+N_THREADS_PML_I_DIM-1) / N_THREADS_PML_I_DIM); hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_top), dim3(threadsPerBlock), 0, streams[2], nx,ny,nz, xmin,xmax,y1,y2,z3,z4, lx,ly,lz, hdx_2, hdy_2, hdz_2, coefx[0]+coefy[0]+coefz[0], coefx[1], coefx[2], coefx[3], coefx[4], coefy[1], coefy[2], coefy[3], coefy[4], coefz[1], coefz[2], coefz[3], coefz[4], d_u, d_v, d_vp, d_phi, d_eta); dim3 n_block_left( (z4-z3+N_THREADS_PML_K_DIM-1) / N_THREADS_PML_K_DIM, (y4-y3+N_THREADS_PML_J_DIM-1) / N_THREADS_PML_J_DIM, (x2-x1+N_THREADS_PML_I_DIM-1) / N_THREADS_PML_I_DIM); hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_left), dim3(threadsPerBlock), 0, streams[3], nx,ny,nz, x1,x2,y3,y4,z3,z4, lx,ly,lz, hdx_2, hdy_2, hdz_2, coefx[0]+coefy[0]+coefz[0], coefx[1], coefx[2], coefx[3], coefx[4], coefy[1], coefy[2], coefy[3], coefy[4], coefz[1], coefz[2], coefz[3], coefz[4], d_u, d_v, d_vp, d_phi, d_eta); dim3 n_block_center( (z4-z3+N_THREADS_THIRD_DIM-1) / N_THREADS_THIRD_DIM, (y4-y3+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM, (x4-x3+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM); hipLaunchKernelGGL(( target_inner_3d_kernel), dim3(n_block_center), dim3(threadsPerBlock_inner), 0, streams[0], nx,ny,nz, x3,x4,y3,y4,z3,z4, lx,ly,lz, hdx_2, hdy_2, hdz_2, coefx[0]+coefy[0]+coefz[0], coefx[1], coefx[2], coefx[3], coefx[4], coefy[1], coefy[2], coefy[3], coefy[4], coefz[1], coefz[2], coefz[3], coefz[4], d_u, d_v, d_vp, d_phi, d_eta); dim3 n_block_right( (z4-z3+N_THREADS_PML_K_DIM-1) / N_THREADS_PML_K_DIM, (y4-y3+N_THREADS_PML_J_DIM-1) / N_THREADS_PML_J_DIM, (x6-x5+N_THREADS_PML_I_DIM-1) / N_THREADS_PML_I_DIM); hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_right), dim3(threadsPerBlock), 0, streams[4], nx,ny,nz, x5,x6,y3,y4,z3,z4, lx,ly,lz, hdx_2, hdy_2, hdz_2, coefx[0]+coefy[0]+coefz[0], coefx[1], coefx[2], coefx[3], coefx[4], coefy[1], coefy[2], coefy[3], coefy[4], coefz[1], coefz[2], coefz[3], coefz[4], d_u, d_v, d_vp, d_phi, d_eta); dim3 n_block_bottom( (z4-z3+N_THREADS_PML_K_DIM-1) / N_THREADS_PML_K_DIM, (y6-y5+N_THREADS_PML_J_DIM-1) / N_THREADS_PML_J_DIM, (nx+N_THREADS_PML_I_DIM-1) / N_THREADS_PML_I_DIM); hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_bottom), dim3(threadsPerBlock), 0, streams[5], nx,ny,nz, xmin,xmax,y5,y6,z3,z4, lx,ly,lz, hdx_2, hdy_2, hdz_2, coefx[0]+coefy[0]+coefz[0], coefx[1], coefx[2], coefx[3], coefx[4], coefy[1], coefy[2], coefy[3], coefy[4], coefz[1], coefz[2], coefz[3], coefz[4], d_u, d_v, d_vp, d_phi, d_eta); dim3 n_block_back( (z6-z5+N_THREADS_PML_K_DIM-1) / N_THREADS_PML_K_DIM, (ny+N_THREADS_PML_J_DIM-1) / N_THREADS_PML_J_DIM, (nx+N_THREADS_PML_I_DIM-1) / N_THREADS_PML_I_DIM); hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_back), dim3(threadsPerBlock), 0, streams[6], nx,ny,nz, xmin,xmax,ymin,ymax,z5,z6, lx,ly,lz, hdx_2, hdy_2, hdz_2, coefx[0]+coefy[0]+coefz[0], coefx[1], coefx[2], coefx[3], coefx[4], coefy[1], coefy[2], coefy[3], coefy[4], coefz[1], coefz[2], coefz[3], coefz[4], d_u, d_v, d_vp, d_phi, d_eta); for (int i = 0; i < num_streams; i++) { hipStreamSynchronize(streams[i]); } hipLaunchKernelGGL(( kernel_add_source_kernel), dim3(1), dim3(1), 0, 0, d_v, IDX3_l(sx,sy,sz), source[istep]); clock_gettime(CLOCK_REALTIME, &end); *time_kernel += (end.tv_sec - start.tv_sec) + (double)(end.tv_nsec - start.tv_nsec) / 1.0e9; float *t = d_u; d_u = d_v; d_v = t; // Print out if (istep % npo == 0) { printf("time step %u / %u\n", istep, nsteps); } } for (int i = 0; i < num_streams; i++) { hipStreamDestroy(streams[i]); } hipMemcpy(u, d_u, sizeof(float) * size_u, hipMemcpyDeviceToHost); hipFree(d_u); hipFree(d_v); hipFree(d_vp); hipFree(d_phi); hipFree(d_eta); }
e2da4ddb315408cd7ced631614170abd129a367f.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <float.h> #include <math.h> #include <time.h> #include "../../constants.h" #define N_RADIUS 4 #define N_THREADS_PLANE_DIM 8 #define N_THREADS_THIRD_DIM 8 #define N_THREADS_PML_I_DIM 9 #define N_THREADS_PML_J_DIM 9 #define N_THREADS_PML_K_DIM 9 __global__ void target_inner_3d_kernel( llint nx, llint ny, llint nz, llint x3, llint x4, llint y3, llint y4, llint z3, llint z4, llint lx, llint ly, llint lz, float hdx_2, float hdy_2, float hdz_2, float coef0, float coefx_1, float coefx_2, float coefx_3, float coefx_4, float coefy_1, float coefy_2, float coefy_3, float coefy_4, float coefz_1, float coefz_2, float coefz_3, float coefz_4, const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp, const float *__restrict__ phi, const float *__restrict__ eta ) { const llint k0 = z3 + blockIdx.x * blockDim.x; const llint j0 = y3 + blockIdx.y * blockDim.y; const llint i0 = x3 + blockIdx.z * blockDim.z; const llint i = i0 + threadIdx.z; const llint j = j0 + threadIdx.y; const llint k = k0 + threadIdx.x; if (i > x4-1 || j > y4-1 || k > z4-1) { return; } float lap = __fmaf_rn(coef0, u[IDX3_l(i,j,k)] , __fmaf_rn(coefx_1, __fadd_rn(u[IDX3_l(i+1,j,k)],u[IDX3_l(i-1,j,k)]) , __fmaf_rn(coefy_1, __fadd_rn(u[IDX3_l(i,j+1,k)],u[IDX3_l(i,j-1,k)]) , __fmaf_rn(coefz_1, __fadd_rn(u[IDX3_l(i,j,k+1)],u[IDX3_l(i,j,k-1)]) , __fmaf_rn(coefx_2, __fadd_rn(u[IDX3_l(i+2,j,k)],u[IDX3_l(i-2,j,k)]) , __fmaf_rn(coefy_2, __fadd_rn(u[IDX3_l(i,j+2,k)],u[IDX3_l(i,j-2,k)]) , __fmaf_rn(coefz_2, __fadd_rn(u[IDX3_l(i,j,k+2)],u[IDX3_l(i,j,k-2)]) , __fmaf_rn(coefx_3, __fadd_rn(u[IDX3_l(i+3,j,k)],u[IDX3_l(i-3,j,k)]) , __fmaf_rn(coefy_3, __fadd_rn(u[IDX3_l(i,j+3,k)],u[IDX3_l(i,j-3,k)]) , __fmaf_rn(coefz_3, __fadd_rn(u[IDX3_l(i,j,k+3)],u[IDX3_l(i,j,k-3)]) , __fmaf_rn(coefx_4, __fadd_rn(u[IDX3_l(i+4,j,k)],u[IDX3_l(i-4,j,k)]) , __fmaf_rn(coefy_4, __fadd_rn(u[IDX3_l(i,j+4,k)],u[IDX3_l(i,j-4,k)]) , __fmul_rn(coefz_4, __fadd_rn(u[IDX3_l(i,j,k+4)],u[IDX3_l(i,j,k-4)]) ))))))))))))); v[IDX3_l(i,j,k)] = __fmaf_rn(2.f, u[IDX3_l(i,j,k)], __fmaf_rn(vp[IDX3(i,j,k)], lap, -v[IDX3_l(i,j,k)]) ); } __global__ void target_pml_3d_kernel( llint nx, llint ny, llint nz, llint x3, llint x4, llint y3, llint y4, llint z3, llint z4, llint lx, llint ly, llint lz, float hdx_2, float hdy_2, float hdz_2, float coef0, float coefx_1, float coefx_2, float coefx_3, float coefx_4, float coefy_1, float coefy_2, float coefy_3, float coefy_4, float coefz_1, float coefz_2, float coefz_3, float coefz_4, const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp, float *__restrict__ phi, const float *__restrict__ eta ) { const llint k0 = z3 + blockIdx.x * blockDim.x; const llint j0 = y3 + blockIdx.y * blockDim.y; const llint i0 = x3 + blockIdx.z * blockDim.z; const llint i = i0 + threadIdx.z; const llint j = j0 + threadIdx.y; const llint k = k0 + threadIdx.x; if (i > x4-1 || j > y4-1 || k > z4-1) { return; } float lap = __fmaf_rn(coef0, u[IDX3_l(i,j,k)] , __fmaf_rn(coefx_1, __fadd_rn(u[IDX3_l(i+1,j,k)],u[IDX3_l(i-1,j,k)]) , __fmaf_rn(coefy_1, __fadd_rn(u[IDX3_l(i,j+1,k)],u[IDX3_l(i,j-1,k)]) , __fmaf_rn(coefz_1, __fadd_rn(u[IDX3_l(i,j,k+1)],u[IDX3_l(i,j,k-1)]) , __fmaf_rn(coefx_2, __fadd_rn(u[IDX3_l(i+2,j,k)],u[IDX3_l(i-2,j,k)]) , __fmaf_rn(coefy_2, __fadd_rn(u[IDX3_l(i,j+2,k)],u[IDX3_l(i,j-2,k)]) , __fmaf_rn(coefz_2, __fadd_rn(u[IDX3_l(i,j,k+2)],u[IDX3_l(i,j,k-2)]) , __fmaf_rn(coefx_3, __fadd_rn(u[IDX3_l(i+3,j,k)],u[IDX3_l(i-3,j,k)]) , __fmaf_rn(coefy_3, __fadd_rn(u[IDX3_l(i,j+3,k)],u[IDX3_l(i,j-3,k)]) , __fmaf_rn(coefz_3, __fadd_rn(u[IDX3_l(i,j,k+3)],u[IDX3_l(i,j,k-3)]) , __fmaf_rn(coefx_4, __fadd_rn(u[IDX3_l(i+4,j,k)],u[IDX3_l(i-4,j,k)]) , __fmaf_rn(coefy_4, __fadd_rn(u[IDX3_l(i,j+4,k)],u[IDX3_l(i,j-4,k)]) , __fmul_rn(coefz_4, __fadd_rn(u[IDX3_l(i,j,k+4)],u[IDX3_l(i,j,k-4)]) ))))))))))))); const float s_eta_c = eta[IDX3_eta1(i,j,k)]; v[IDX3_l(i,j,k)] = __fdiv_rn( __fmaf_rn( __fmaf_rn(2.f, s_eta_c, __fsub_rn(2.f, __fmul_rn(s_eta_c, s_eta_c) ) ), u[IDX3_l(i,j,k)], __fmaf_rn( vp[IDX3(i,j,k)], __fadd_rn(lap, phi[IDX3(i,j,k)]), -v[IDX3_l(i,j,k)] ) ), __fmaf_rn(2.f, s_eta_c, 1.f) ); phi[IDX3(i,j,k)] = __fdiv_rn( __fsub_rn( phi[IDX3(i,j,k)], __fmaf_rn( __fmul_rn( __fsub_rn(eta[IDX3_eta1(i+1,j,k)], eta[IDX3_eta1(i-1,j,k)]), __fsub_rn(u[IDX3_l(i+1,j,k)], u[IDX3_l(i-1,j,k)]) ), hdx_2, __fmaf_rn( __fmul_rn( __fsub_rn(eta[IDX3_eta1(i,j+1,k)], eta[IDX3_eta1(i,j-1,k)]), __fsub_rn(u[IDX3_l(i,j+1,k)], u[IDX3_l(i,j-1,k)]) ), hdy_2, __fmul_rn( __fmul_rn( __fsub_rn(eta[IDX3_eta1(i,j,k+1)], eta[IDX3_eta1(i,j,k-1)]), __fsub_rn(u[IDX3_l(i,j,k+1)], u[IDX3_l(i,j,k-1)]) ), hdz_2) )) ) , __fadd_rn(1.f, s_eta_c) ); } __global__ void kernel_add_source_kernel(float *g_u, llint idx, float source) { g_u[idx] += source; } extern "C" void target( uint nsteps, double *time_kernel, llint nx, llint ny, llint nz, llint x1, llint x2, llint x3, llint x4, llint x5, llint x6, llint y1, llint y2, llint y3, llint y4, llint y5, llint y6, llint z1, llint z2, llint z3, llint z4, llint z5, llint z6, llint lx, llint ly, llint lz, llint sx, llint sy, llint sz, float hdx_2, float hdy_2, float hdz_2, const float *__restrict__ coefx, const float *__restrict__ coefy, const float *__restrict__ coefz, float *__restrict__ u, const float *__restrict__ v, const float *__restrict__ vp, const float *__restrict__ phi, const float *__restrict__ eta, const float *__restrict__ source ) { struct timespec start, end; const llint size_u = (nx + 2 * lx) * (ny + 2 * ly) * (nz + 2 * lz); const llint size_v = size_u; const llint size_phi = nx*ny*nz; const llint size_vp = size_phi; const llint size_eta = (nx+2)*(ny+2)*(nz+2); float *d_u, *d_v, *d_vp, *d_phi, *d_eta; cudaMalloc(&d_u, sizeof(float) * size_u); cudaMalloc(&d_v, sizeof(float) * size_u); cudaMalloc(&d_vp, sizeof(float) * size_vp); cudaMalloc(&d_phi, sizeof(float) * size_phi); cudaMalloc(&d_eta, sizeof(float) * size_eta); cudaMemcpy(d_u, u, sizeof(float) * size_u, cudaMemcpyHostToDevice); cudaMemcpy(d_v, v, sizeof(float) * size_v, cudaMemcpyHostToDevice); cudaMemcpy(d_vp, vp, sizeof(float) * size_vp, cudaMemcpyHostToDevice); cudaMemcpy(d_phi, phi, sizeof(float) * size_phi, cudaMemcpyHostToDevice); cudaMemcpy(d_eta, eta, sizeof(float) * size_eta, cudaMemcpyHostToDevice); const llint xmin = 0; const llint xmax = nx; const llint ymin = 0; const llint ymax = ny; dim3 threadsPerBlock_inner(N_THREADS_THIRD_DIM, N_THREADS_PLANE_DIM, N_THREADS_PLANE_DIM); dim3 threadsPerBlock(N_THREADS_PML_K_DIM, N_THREADS_PML_J_DIM, N_THREADS_PML_I_DIM); int num_streams = 7; cudaStream_t streams[num_streams]; for (int i = 0; i < num_streams; i++) { cudaStreamCreate(&(streams[i])); } const uint npo = 100; for (uint istep = 1; istep <= nsteps; ++istep) { clock_gettime(CLOCK_REALTIME, &start); dim3 n_block_front( (z2-z1+N_THREADS_PML_K_DIM-1) / N_THREADS_PML_K_DIM, (ny+N_THREADS_PML_J_DIM-1) / N_THREADS_PML_J_DIM, (nx+N_THREADS_PML_I_DIM-1) / N_THREADS_PML_I_DIM); target_pml_3d_kernel<<<n_block_front, threadsPerBlock, 0, streams[1]>>>(nx,ny,nz, xmin,xmax,ymin,ymax,z1,z2, lx,ly,lz, hdx_2, hdy_2, hdz_2, coefx[0]+coefy[0]+coefz[0], coefx[1], coefx[2], coefx[3], coefx[4], coefy[1], coefy[2], coefy[3], coefy[4], coefz[1], coefz[2], coefz[3], coefz[4], d_u, d_v, d_vp, d_phi, d_eta); dim3 n_block_top( (z4-z3+N_THREADS_PML_K_DIM-1) / N_THREADS_PML_K_DIM, (y2-y1+N_THREADS_PML_J_DIM-1) / N_THREADS_PML_J_DIM, (nx+N_THREADS_PML_I_DIM-1) / N_THREADS_PML_I_DIM); target_pml_3d_kernel<<<n_block_top, threadsPerBlock, 0, streams[2]>>>(nx,ny,nz, xmin,xmax,y1,y2,z3,z4, lx,ly,lz, hdx_2, hdy_2, hdz_2, coefx[0]+coefy[0]+coefz[0], coefx[1], coefx[2], coefx[3], coefx[4], coefy[1], coefy[2], coefy[3], coefy[4], coefz[1], coefz[2], coefz[3], coefz[4], d_u, d_v, d_vp, d_phi, d_eta); dim3 n_block_left( (z4-z3+N_THREADS_PML_K_DIM-1) / N_THREADS_PML_K_DIM, (y4-y3+N_THREADS_PML_J_DIM-1) / N_THREADS_PML_J_DIM, (x2-x1+N_THREADS_PML_I_DIM-1) / N_THREADS_PML_I_DIM); target_pml_3d_kernel<<<n_block_left, threadsPerBlock, 0, streams[3]>>>(nx,ny,nz, x1,x2,y3,y4,z3,z4, lx,ly,lz, hdx_2, hdy_2, hdz_2, coefx[0]+coefy[0]+coefz[0], coefx[1], coefx[2], coefx[3], coefx[4], coefy[1], coefy[2], coefy[3], coefy[4], coefz[1], coefz[2], coefz[3], coefz[4], d_u, d_v, d_vp, d_phi, d_eta); dim3 n_block_center( (z4-z3+N_THREADS_THIRD_DIM-1) / N_THREADS_THIRD_DIM, (y4-y3+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM, (x4-x3+N_THREADS_PLANE_DIM-1) / N_THREADS_PLANE_DIM); target_inner_3d_kernel<<<n_block_center, threadsPerBlock_inner, 0, streams[0]>>>(nx,ny,nz, x3,x4,y3,y4,z3,z4, lx,ly,lz, hdx_2, hdy_2, hdz_2, coefx[0]+coefy[0]+coefz[0], coefx[1], coefx[2], coefx[3], coefx[4], coefy[1], coefy[2], coefy[3], coefy[4], coefz[1], coefz[2], coefz[3], coefz[4], d_u, d_v, d_vp, d_phi, d_eta); dim3 n_block_right( (z4-z3+N_THREADS_PML_K_DIM-1) / N_THREADS_PML_K_DIM, (y4-y3+N_THREADS_PML_J_DIM-1) / N_THREADS_PML_J_DIM, (x6-x5+N_THREADS_PML_I_DIM-1) / N_THREADS_PML_I_DIM); target_pml_3d_kernel<<<n_block_right, threadsPerBlock, 0, streams[4]>>>(nx,ny,nz, x5,x6,y3,y4,z3,z4, lx,ly,lz, hdx_2, hdy_2, hdz_2, coefx[0]+coefy[0]+coefz[0], coefx[1], coefx[2], coefx[3], coefx[4], coefy[1], coefy[2], coefy[3], coefy[4], coefz[1], coefz[2], coefz[3], coefz[4], d_u, d_v, d_vp, d_phi, d_eta); dim3 n_block_bottom( (z4-z3+N_THREADS_PML_K_DIM-1) / N_THREADS_PML_K_DIM, (y6-y5+N_THREADS_PML_J_DIM-1) / N_THREADS_PML_J_DIM, (nx+N_THREADS_PML_I_DIM-1) / N_THREADS_PML_I_DIM); target_pml_3d_kernel<<<n_block_bottom, threadsPerBlock, 0, streams[5]>>>(nx,ny,nz, xmin,xmax,y5,y6,z3,z4, lx,ly,lz, hdx_2, hdy_2, hdz_2, coefx[0]+coefy[0]+coefz[0], coefx[1], coefx[2], coefx[3], coefx[4], coefy[1], coefy[2], coefy[3], coefy[4], coefz[1], coefz[2], coefz[3], coefz[4], d_u, d_v, d_vp, d_phi, d_eta); dim3 n_block_back( (z6-z5+N_THREADS_PML_K_DIM-1) / N_THREADS_PML_K_DIM, (ny+N_THREADS_PML_J_DIM-1) / N_THREADS_PML_J_DIM, (nx+N_THREADS_PML_I_DIM-1) / N_THREADS_PML_I_DIM); target_pml_3d_kernel<<<n_block_back, threadsPerBlock, 0, streams[6]>>>(nx,ny,nz, xmin,xmax,ymin,ymax,z5,z6, lx,ly,lz, hdx_2, hdy_2, hdz_2, coefx[0]+coefy[0]+coefz[0], coefx[1], coefx[2], coefx[3], coefx[4], coefy[1], coefy[2], coefy[3], coefy[4], coefz[1], coefz[2], coefz[3], coefz[4], d_u, d_v, d_vp, d_phi, d_eta); for (int i = 0; i < num_streams; i++) { cudaStreamSynchronize(streams[i]); } kernel_add_source_kernel<<<1, 1>>>(d_v, IDX3_l(sx,sy,sz), source[istep]); clock_gettime(CLOCK_REALTIME, &end); *time_kernel += (end.tv_sec - start.tv_sec) + (double)(end.tv_nsec - start.tv_nsec) / 1.0e9; float *t = d_u; d_u = d_v; d_v = t; // Print out if (istep % npo == 0) { printf("time step %u / %u\n", istep, nsteps); } } for (int i = 0; i < num_streams; i++) { cudaStreamDestroy(streams[i]); } cudaMemcpy(u, d_u, sizeof(float) * size_u, cudaMemcpyDeviceToHost); cudaFree(d_u); cudaFree(d_v); cudaFree(d_vp); cudaFree(d_phi); cudaFree(d_eta); }
cb67831cb7ae83963348a32e45b1ddb1c3512c02.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <stdio.h> // Array access macros #define INPUT(i,j) A[(i) + (j)*(m)] #define OUTPUT(i,j) B[(i) + (j)*(m)] __global__ void sampleAdd(double const * const A, double *B, int m, int n) { // Get pixel (x,y) in input int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if( i<m && j<n) { OUTPUT(i,j) = INPUT(i,j) + 1; } }
cb67831cb7ae83963348a32e45b1ddb1c3512c02.cu
#include <math.h> #include <stdio.h> // Array access macros #define INPUT(i,j) A[(i) + (j)*(m)] #define OUTPUT(i,j) B[(i) + (j)*(m)] __global__ void sampleAdd(double const * const A, double *B, int m, int n) { // Get pixel (x,y) in input int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if( i<m && j<n) { OUTPUT(i,j) = INPUT(i,j) + 1; } }
be792b1031f931577ff6c0e602dd2f033307b5c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel5_plus_4_a; int xdim0_update_halo_kernel5_plus_4_a_h = -1; __constant__ int ydim0_update_halo_kernel5_plus_4_a; int ydim0_update_halo_kernel5_plus_4_a_h = -1; __constant__ int xdim1_update_halo_kernel5_plus_4_a; int xdim1_update_halo_kernel5_plus_4_a_h = -1; __constant__ int ydim1_update_halo_kernel5_plus_4_a; int ydim1_update_halo_kernel5_plus_4_a_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel5_plus_4_a * (y) + \ xdim0_update_halo_kernel5_plus_4_a * ydim0_update_halo_kernel5_plus_4_a * \ (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel5_plus_4_a * (y) + \ xdim1_update_halo_kernel5_plus_4_a * ydim1_update_halo_kernel5_plus_4_a * \ (z)) // user function __device__ inline void update_halo_kernel5_plus_4_a_gpu(double *vol_flux_z, double *mass_flux_z, const int *fields) { if (fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0, 0, 0)] = vol_flux_z[OPS_ACC0(0, 4, 0)]; if (fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0, 0, 0)] = mass_flux_z[OPS_ACC1(0, 4, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel5_plus_4_a(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel5_plus_4_a + idx_z * 1 * 1 * xdim0_update_halo_kernel5_plus_4_a * ydim0_update_halo_kernel5_plus_4_a; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel5_plus_4_a + idx_z * 1 * 1 * xdim1_update_halo_kernel5_plus_4_a * ydim1_update_halo_kernel5_plus_4_a; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel5_plus_4_a_gpu(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel5_plus_4_a(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 129)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(129, "update_halo_kernel5_plus_4_a"); OPS_kernels[129].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel5_plus_4_a_h || ydim0 != ydim0_update_halo_kernel5_plus_4_a_h || xdim1 != xdim1_update_halo_kernel5_plus_4_a_h || ydim1 != ydim1_update_halo_kernel5_plus_4_a_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel5_plus_4_a, &xdim0, sizeof(int)); xdim0_update_halo_kernel5_plus_4_a_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel5_plus_4_a, &ydim0, sizeof(int)); ydim0_update_halo_kernel5_plus_4_a_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel5_plus_4_a, &xdim1, sizeof(int)); xdim1_update_halo_kernel5_plus_4_a_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel5_plus_4_a, &ydim1, sizeof(int)); ydim1_update_halo_kernel5_plus_4_a_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[129].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel5_plus_4_a), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[129].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[129].mpi_time += t2 - t1; OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
be792b1031f931577ff6c0e602dd2f033307b5c7.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel5_plus_4_a; int xdim0_update_halo_kernel5_plus_4_a_h = -1; __constant__ int ydim0_update_halo_kernel5_plus_4_a; int ydim0_update_halo_kernel5_plus_4_a_h = -1; __constant__ int xdim1_update_halo_kernel5_plus_4_a; int xdim1_update_halo_kernel5_plus_4_a_h = -1; __constant__ int ydim1_update_halo_kernel5_plus_4_a; int ydim1_update_halo_kernel5_plus_4_a_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel5_plus_4_a * (y) + \ xdim0_update_halo_kernel5_plus_4_a * ydim0_update_halo_kernel5_plus_4_a * \ (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel5_plus_4_a * (y) + \ xdim1_update_halo_kernel5_plus_4_a * ydim1_update_halo_kernel5_plus_4_a * \ (z)) // user function __device__ inline void update_halo_kernel5_plus_4_a_gpu(double *vol_flux_z, double *mass_flux_z, const int *fields) { if (fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0, 0, 0)] = vol_flux_z[OPS_ACC0(0, 4, 0)]; if (fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0, 0, 0)] = mass_flux_z[OPS_ACC1(0, 4, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel5_plus_4_a(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel5_plus_4_a + idx_z * 1 * 1 * xdim0_update_halo_kernel5_plus_4_a * ydim0_update_halo_kernel5_plus_4_a; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel5_plus_4_a + idx_z * 1 * 1 * xdim1_update_halo_kernel5_plus_4_a * ydim1_update_halo_kernel5_plus_4_a; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel5_plus_4_a_gpu(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel5_plus_4_a(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 129)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(129, "update_halo_kernel5_plus_4_a"); OPS_kernels[129].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel5_plus_4_a_h || ydim0 != ydim0_update_halo_kernel5_plus_4_a_h || xdim1 != xdim1_update_halo_kernel5_plus_4_a_h || ydim1 != ydim1_update_halo_kernel5_plus_4_a_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel5_plus_4_a, &xdim0, sizeof(int)); xdim0_update_halo_kernel5_plus_4_a_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel5_plus_4_a, &ydim0, sizeof(int)); ydim0_update_halo_kernel5_plus_4_a_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel5_plus_4_a, &xdim1, sizeof(int)); xdim1_update_halo_kernel5_plus_4_a_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel5_plus_4_a, &ydim1, sizeof(int)); ydim1_update_halo_kernel5_plus_4_a_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[129].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel5_plus_4_a<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[129].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[129].mpi_time += t2 - t1; OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
100e173635086cba2822f9b66cc00badcc3cc71b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Computes the DFT for coefficient k __device__ unsigned int X(unsigned int *x, int k, int N) { float sum1 = 0; for(int n = 0; n < N; n++) { sum1 += (float)x[n]*cos( (float)(2*k*n) * M_PI/ (float)N ); } float sum2 = 0; for(int n = 0; n < N; n++) { sum2 += (float)x[n]*sin( (float)(2*k*n) * M_PI/ (float)N ); } return sum1*sum1/10000 + sum2*sum2/10000; } __global__ void DFT(unsigned int *frame, unsigned int *dft) { // Get the index of the current core int index = threadIdx.x + blockIdx.x * blockDim.x; // If the core is in range to be used if( index < FRAME_SIZE/2) { // Get the coefficient coressponding to teh cores index dft[ index ] = X( frame, index, FRAME_SIZE/2 ); } }
100e173635086cba2822f9b66cc00badcc3cc71b.cu
// Computes the DFT for coefficient k __device__ unsigned int X(unsigned int *x, int k, int N) { float sum1 = 0; for(int n = 0; n < N; n++) { sum1 += (float)x[n]*cos( (float)(2*k*n) * M_PI/ (float)N ); } float sum2 = 0; for(int n = 0; n < N; n++) { sum2 += (float)x[n]*sin( (float)(2*k*n) * M_PI/ (float)N ); } return sum1*sum1/10000 + sum2*sum2/10000; } __global__ void DFT(unsigned int *frame, unsigned int *dft) { // Get the index of the current core int index = threadIdx.x + blockIdx.x * blockDim.x; // If the core is in range to be used if( index < FRAME_SIZE/2) { // Get the coefficient coressponding to teh cores index dft[ index ] = X( frame, index, FRAME_SIZE/2 ); } }
3c0b48565c3c168551b8ff0f83e8be2c9949556b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--gridDim=[2,128] --blockDim=[16,4] --no-inline #include "common.h" __global__ void SobelShared(uchar4 *pSobelOriginal, unsigned short SobelPitch, #ifndef FIXED_BLOCKWIDTH short BlockWidth, short SharedPitch, #endif short w, short h, float fScale, int blockOperation, pointFunction_t pPointFunction ) { __requires(SobelPitch == 512); __requires(w == 512); #ifndef FIXED_BLOCKWIDTH __requires(BlockWidth == 80); __requires(SharedPitch == 384); #endif __requires(blockOperation == 0 | blockOperation == 1); __requires(blockFunction_table[0] == ComputeSobel); __requires(blockFunction_table[1] == ComputeBox); __requires(pPointFunction == Threshold | pPointFunction == NULL); short u = 4*blockIdx.x*BlockWidth; short v = blockIdx.y*blockDim.y + threadIdx.y; short ib; int SharedIdx = threadIdx.y * SharedPitch; for (ib = threadIdx.x; __global_invariant(ib%blockDim.x == threadIdx.x), __global_invariant(__write_implies(LocalBlock, (__write_offset_bytes(LocalBlock)-SharedIdx)/4 < (BlockWidth+2*RADIUS))), __global_invariant(__write_implies(LocalBlock, (__write_offset_bytes(LocalBlock)-SharedIdx)/4%blockDim.x == threadIdx.x)), ib < BlockWidth+2*RADIUS; ib += blockDim.x) { LocalBlock[SharedIdx+4*ib+0] = tex2D(tex, (float)(u+4*ib-RADIUS+0), (float)(v-RADIUS)); LocalBlock[SharedIdx+4*ib+1] = tex2D(tex, (float)(u+4*ib-RADIUS+1), (float)(v-RADIUS)); LocalBlock[SharedIdx+4*ib+2] = tex2D(tex, (float)(u+4*ib-RADIUS+2), (float)(v-RADIUS)); LocalBlock[SharedIdx+4*ib+3] = tex2D(tex, (float)(u+4*ib-RADIUS+3), (float)(v-RADIUS)); } if (threadIdx.y < RADIUS*2) { // // copy trailing RADIUS*2 rows of pixels into shared // SharedIdx = (blockDim.y+threadIdx.y) * SharedPitch; for (ib = threadIdx.x; #define SharedIdxOld (threadIdx.y * SharedPitch) __global_invariant(__implies(threadIdx.y < RADIUS*2, ib%blockDim.x == threadIdx.x)), __global_invariant(__implies(threadIdx.y < RADIUS*2, __write_implies(LocalBlock, (((__write_offset_bytes(LocalBlock)-SharedIdx)/4 < (BlockWidth+2*RADIUS)) & ((__write_offset_bytes(LocalBlock)-SharedIdx)/4%blockDim.x == threadIdx.x)) | (((__write_offset_bytes(LocalBlock)-SharedIdxOld)/4 < (BlockWidth+2*RADIUS)) & ((__write_offset_bytes(LocalBlock)-SharedIdxOld)/4%blockDim.x == threadIdx.x))))), ib < BlockWidth+2*RADIUS; ib += blockDim.x) { LocalBlock[SharedIdx+4*ib+0] = tex2D(tex, (float)(u+4*ib-RADIUS+0), (float)(v+blockDim.y-RADIUS)); LocalBlock[SharedIdx+4*ib+1] = tex2D(tex, (float)(u+4*ib-RADIUS+1), (float)(v+blockDim.y-RADIUS)); LocalBlock[SharedIdx+4*ib+2] = tex2D(tex, (float)(u+4*ib-RADIUS+2), (float)(v+blockDim.y-RADIUS)); LocalBlock[SharedIdx+4*ib+3] = tex2D(tex, (float)(u+4*ib-RADIUS+3), (float)(v+blockDim.y-RADIUS)); } } __syncthreads(); u >>= 2; // index as uchar4 from here uchar4 *pSobel = (uchar4 *)(((char *) pSobelOriginal)+v*SobelPitch); SharedIdx = threadIdx.y * SharedPitch; blockFunction = blockFunction_table[blockOperation]; for (ib = threadIdx.x; __global_invariant(ib%blockDim.x == threadIdx.x), __global_invariant(__write_implies(pSobelOriginal, (((__write_offset_bytes(pSobelOriginal) - v*SobelPitch)/sizeof(uchar4)) - u)%blockDim.x == threadIdx.x)), __global_invariant(__write_implies(pSobelOriginal, (((__write_offset_bytes(pSobelOriginal) - v*SobelPitch)/sizeof(uchar4)) - u) < BlockWidth)), __global_invariant(__write_implies(pSobelOriginal, (__write_offset_bytes(pSobelOriginal) < (v + 1)*SobelPitch))), ib < BlockWidth; ib += blockDim.x) { uchar4 out; unsigned char pix00 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+0]; unsigned char pix01 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+1]; unsigned char pix02 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+2]; unsigned char pix10 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+0]; unsigned char pix11 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+1]; unsigned char pix12 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+2]; unsigned char pix20 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+0]; unsigned char pix21 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+1]; unsigned char pix22 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+2]; out.x = (*blockFunction)(pix00, pix01, pix02, pix10, pix11, pix12, pix20, pix21, pix22, fScale); pix00 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+3]; pix10 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+3]; pix20 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+3]; out.y = (*blockFunction)(pix01, pix02, pix00, pix11, pix12, pix10, pix21, pix22, pix20, fScale); pix01 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+4]; pix11 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+4]; pix21 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+4]; out.z = (*blockFunction)(pix02, pix00, pix01, pix12, pix10, pix11, pix22, pix20, pix21, fScale); pix02 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+5]; pix12 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+5]; pix22 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+5]; out.w = (*blockFunction)(pix00, pix01, pix02, pix10, pix11, pix12, pix20, pix21, pix22, fScale); if (pPointFunction != NULL) { out.x = (*pPointFunction)(out.x, THRESHOLD); out.y = (*pPointFunction)(out.y, THRESHOLD); out.z = (*pPointFunction)(out.z, THRESHOLD); out.w = (*pPointFunction)(out.w, THRESHOLD); } if (u+ib < w/4 && v < h) { pSobel[u+ib] = out; } } __syncthreads(); }
3c0b48565c3c168551b8ff0f83e8be2c9949556b.cu
//pass //--gridDim=[2,128] --blockDim=[16,4] --no-inline #include "common.h" __global__ void SobelShared(uchar4 *pSobelOriginal, unsigned short SobelPitch, #ifndef FIXED_BLOCKWIDTH short BlockWidth, short SharedPitch, #endif short w, short h, float fScale, int blockOperation, pointFunction_t pPointFunction ) { __requires(SobelPitch == 512); __requires(w == 512); #ifndef FIXED_BLOCKWIDTH __requires(BlockWidth == 80); __requires(SharedPitch == 384); #endif __requires(blockOperation == 0 | blockOperation == 1); __requires(blockFunction_table[0] == ComputeSobel); __requires(blockFunction_table[1] == ComputeBox); __requires(pPointFunction == Threshold | pPointFunction == NULL); short u = 4*blockIdx.x*BlockWidth; short v = blockIdx.y*blockDim.y + threadIdx.y; short ib; int SharedIdx = threadIdx.y * SharedPitch; for (ib = threadIdx.x; __global_invariant(ib%blockDim.x == threadIdx.x), __global_invariant(__write_implies(LocalBlock, (__write_offset_bytes(LocalBlock)-SharedIdx)/4 < (BlockWidth+2*RADIUS))), __global_invariant(__write_implies(LocalBlock, (__write_offset_bytes(LocalBlock)-SharedIdx)/4%blockDim.x == threadIdx.x)), ib < BlockWidth+2*RADIUS; ib += blockDim.x) { LocalBlock[SharedIdx+4*ib+0] = tex2D(tex, (float)(u+4*ib-RADIUS+0), (float)(v-RADIUS)); LocalBlock[SharedIdx+4*ib+1] = tex2D(tex, (float)(u+4*ib-RADIUS+1), (float)(v-RADIUS)); LocalBlock[SharedIdx+4*ib+2] = tex2D(tex, (float)(u+4*ib-RADIUS+2), (float)(v-RADIUS)); LocalBlock[SharedIdx+4*ib+3] = tex2D(tex, (float)(u+4*ib-RADIUS+3), (float)(v-RADIUS)); } if (threadIdx.y < RADIUS*2) { // // copy trailing RADIUS*2 rows of pixels into shared // SharedIdx = (blockDim.y+threadIdx.y) * SharedPitch; for (ib = threadIdx.x; #define SharedIdxOld (threadIdx.y * SharedPitch) __global_invariant(__implies(threadIdx.y < RADIUS*2, ib%blockDim.x == threadIdx.x)), __global_invariant(__implies(threadIdx.y < RADIUS*2, __write_implies(LocalBlock, (((__write_offset_bytes(LocalBlock)-SharedIdx)/4 < (BlockWidth+2*RADIUS)) & ((__write_offset_bytes(LocalBlock)-SharedIdx)/4%blockDim.x == threadIdx.x)) | (((__write_offset_bytes(LocalBlock)-SharedIdxOld)/4 < (BlockWidth+2*RADIUS)) & ((__write_offset_bytes(LocalBlock)-SharedIdxOld)/4%blockDim.x == threadIdx.x))))), ib < BlockWidth+2*RADIUS; ib += blockDim.x) { LocalBlock[SharedIdx+4*ib+0] = tex2D(tex, (float)(u+4*ib-RADIUS+0), (float)(v+blockDim.y-RADIUS)); LocalBlock[SharedIdx+4*ib+1] = tex2D(tex, (float)(u+4*ib-RADIUS+1), (float)(v+blockDim.y-RADIUS)); LocalBlock[SharedIdx+4*ib+2] = tex2D(tex, (float)(u+4*ib-RADIUS+2), (float)(v+blockDim.y-RADIUS)); LocalBlock[SharedIdx+4*ib+3] = tex2D(tex, (float)(u+4*ib-RADIUS+3), (float)(v+blockDim.y-RADIUS)); } } __syncthreads(); u >>= 2; // index as uchar4 from here uchar4 *pSobel = (uchar4 *)(((char *) pSobelOriginal)+v*SobelPitch); SharedIdx = threadIdx.y * SharedPitch; blockFunction = blockFunction_table[blockOperation]; for (ib = threadIdx.x; __global_invariant(ib%blockDim.x == threadIdx.x), __global_invariant(__write_implies(pSobelOriginal, (((__write_offset_bytes(pSobelOriginal) - v*SobelPitch)/sizeof(uchar4)) - u)%blockDim.x == threadIdx.x)), __global_invariant(__write_implies(pSobelOriginal, (((__write_offset_bytes(pSobelOriginal) - v*SobelPitch)/sizeof(uchar4)) - u) < BlockWidth)), __global_invariant(__write_implies(pSobelOriginal, (__write_offset_bytes(pSobelOriginal) < (v + 1)*SobelPitch))), ib < BlockWidth; ib += blockDim.x) { uchar4 out; unsigned char pix00 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+0]; unsigned char pix01 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+1]; unsigned char pix02 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+2]; unsigned char pix10 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+0]; unsigned char pix11 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+1]; unsigned char pix12 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+2]; unsigned char pix20 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+0]; unsigned char pix21 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+1]; unsigned char pix22 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+2]; out.x = (*blockFunction)(pix00, pix01, pix02, pix10, pix11, pix12, pix20, pix21, pix22, fScale); pix00 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+3]; pix10 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+3]; pix20 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+3]; out.y = (*blockFunction)(pix01, pix02, pix00, pix11, pix12, pix10, pix21, pix22, pix20, fScale); pix01 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+4]; pix11 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+4]; pix21 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+4]; out.z = (*blockFunction)(pix02, pix00, pix01, pix12, pix10, pix11, pix22, pix20, pix21, fScale); pix02 = LocalBlock[SharedIdx+4*ib+0*SharedPitch+5]; pix12 = LocalBlock[SharedIdx+4*ib+1*SharedPitch+5]; pix22 = LocalBlock[SharedIdx+4*ib+2*SharedPitch+5]; out.w = (*blockFunction)(pix00, pix01, pix02, pix10, pix11, pix12, pix20, pix21, pix22, fScale); if (pPointFunction != NULL) { out.x = (*pPointFunction)(out.x, THRESHOLD); out.y = (*pPointFunction)(out.y, THRESHOLD); out.z = (*pPointFunction)(out.z, THRESHOLD); out.w = (*pPointFunction)(out.w, THRESHOLD); } if (u+ib < w/4 && v < h) { pSobel[u+ib] = out; } } __syncthreads(); }
90e463877ca56a0a8439aa53aad15def360caaeb.hip
// !!! This is a file automatically generated by hipify!!! #include "chainerx/cuda/cuda_device.h" #include <cmath> #include <cstdint> #include <hip/hip_runtime.h> #include "chainerx/array.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/elementwise.cuh" #include "chainerx/cuda/numeric.cuh" #include "chainerx/device.h" #include "chainerx/dtype.h" #include "chainerx/numeric.h" #include "chainerx/scalar.h" namespace chainerx { namespace cuda { namespace { template <typename T> struct IfLessElseASSAImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType neg, CudaType& out) { out = x1 < x2 ? pos : neg; } CudaType x2; CudaType pos; }; } // namespace void CudaDevice::IfLessElseASSA(const Array& x1, Scalar x2, Scalar pos, const Array& neg, const Array& out) { CheckDevicesCompatible(x1, neg, out); CudaSetDeviceScope scope{index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; using CudaType = cuda_internal::DataType<T>; Elementwise<const T, const T, T>(IfLessElseASSAImpl<T>{static_cast<CudaType>(x2), static_cast<CudaType>(pos)}, x1, neg, out); }); } namespace { template <typename T> struct IfGreaterElseASSAImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType neg, CudaType& out) { out = x1 > x2 ? pos : neg; } CudaType x2; CudaType pos; }; } // namespace void CudaDevice::IfGreaterElseASSA(const Array& x1, Scalar x2, Scalar pos, const Array& neg, const Array& out) { CheckDevicesCompatible(x1, neg, out); CudaSetDeviceScope scope{index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; using CudaType = cuda_internal::DataType<T>; Elementwise<const T, const T, T>(IfGreaterElseASSAImpl<T>{static_cast<CudaType>(x2), static_cast<CudaType>(pos)}, x1, neg, out); }); } namespace { template <typename T> struct TanhImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Tanh(x); } }; } // namespace void CudaDevice::Tanh(const Array& x, const Array& out) { CheckDevicesCompatible(x, out); CudaSetDeviceScope scope{index()}; const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype()); VisitFloatingPointDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, T>(TanhImpl<T>{}, x_cast, out); }); } namespace { template <typename T> struct SinImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Sin(x); } }; } // namespace void CudaDevice::Sin(const Array& x, const Array& out) { CheckDevicesCompatible(x, out); CudaSetDeviceScope scope{index()}; const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype()); VisitFloatingPointDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, T>(SinImpl<T>{}, x_cast, out); }); } namespace { template <typename T> struct CosImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Cos(x); } }; } // namespace void CudaDevice::Cos(const Array& x, const Array& out) { CheckDevicesCompatible(x, out); CudaSetDeviceScope scope{index()}; const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype()); VisitFloatingPointDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, T>(CosImpl<T>{}, x_cast, out); }); } } // namespace cuda } // namespace chainerx
90e463877ca56a0a8439aa53aad15def360caaeb.cu
#include "chainerx/cuda/cuda_device.h" #include <cmath> #include <cstdint> #include <cuda_runtime.h> #include "chainerx/array.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/elementwise.cuh" #include "chainerx/cuda/numeric.cuh" #include "chainerx/device.h" #include "chainerx/dtype.h" #include "chainerx/numeric.h" #include "chainerx/scalar.h" namespace chainerx { namespace cuda { namespace { template <typename T> struct IfLessElseASSAImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType neg, CudaType& out) { out = x1 < x2 ? pos : neg; } CudaType x2; CudaType pos; }; } // namespace void CudaDevice::IfLessElseASSA(const Array& x1, Scalar x2, Scalar pos, const Array& neg, const Array& out) { CheckDevicesCompatible(x1, neg, out); CudaSetDeviceScope scope{index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; using CudaType = cuda_internal::DataType<T>; Elementwise<const T, const T, T>(IfLessElseASSAImpl<T>{static_cast<CudaType>(x2), static_cast<CudaType>(pos)}, x1, neg, out); }); } namespace { template <typename T> struct IfGreaterElseASSAImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x1, CudaType neg, CudaType& out) { out = x1 > x2 ? pos : neg; } CudaType x2; CudaType pos; }; } // namespace void CudaDevice::IfGreaterElseASSA(const Array& x1, Scalar x2, Scalar pos, const Array& neg, const Array& out) { CheckDevicesCompatible(x1, neg, out); CudaSetDeviceScope scope{index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; using CudaType = cuda_internal::DataType<T>; Elementwise<const T, const T, T>(IfGreaterElseASSAImpl<T>{static_cast<CudaType>(x2), static_cast<CudaType>(pos)}, x1, neg, out); }); } namespace { template <typename T> struct TanhImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Tanh(x); } }; } // namespace void CudaDevice::Tanh(const Array& x, const Array& out) { CheckDevicesCompatible(x, out); CudaSetDeviceScope scope{index()}; const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype()); VisitFloatingPointDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, T>(TanhImpl<T>{}, x_cast, out); }); } namespace { template <typename T> struct SinImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Sin(x); } }; } // namespace void CudaDevice::Sin(const Array& x, const Array& out) { CheckDevicesCompatible(x, out); CudaSetDeviceScope scope{index()}; const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype()); VisitFloatingPointDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, T>(SinImpl<T>{}, x_cast, out); }); } namespace { template <typename T> struct CosImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Cos(x); } }; } // namespace void CudaDevice::Cos(const Array& x, const Array& out) { CheckDevicesCompatible(x, out); CudaSetDeviceScope scope{index()}; const Array& x_cast = x.dtype() == out.dtype() ? x : x.AsType(out.dtype()); VisitFloatingPointDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, T>(CosImpl<T>{}, x_cast, out); }); } } // namespace cuda } // namespace chainerx
9011328965bca41f4f8daf3820783cc3066ffa10.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/opencv.hpp> #include <opencv2/gpu/gpu.hpp> #include <algorithm> // std::min using namespace std; using namespace cv; using namespace cv::gpu; inline uint getFirstIndex(uchar, uchar, uchar); uchar *LUMBGR2HSV; uchar *d_LUMBGR2HSV; __global__ void kernelconvert(uchar *LUT) { uint i = (blockIdx.x * blockDim.x) + threadIdx.x; uint j = (blockIdx.y * blockDim.y) + threadIdx.y; uint k = (blockIdx.z * blockDim.z) + threadIdx.z; if (i < 256 && j < 256 && k < 256) { uchar _b = i; uchar _g = j; uchar _r = k; float b = (float)_b / 255.0; float g = (float)_g / 255.0; float r = (float)_r / 255.0; float h, s, v; float _min = min(min(b, g), r); v = max(max(b, g), r); float chroma = v - _min; if (v != 0) s = chroma / v; // s else { s = 0; h = -1; return; } if (r == v) h = (g - b) / chroma; else if (g == v) h = 2 + (b - r) / chroma; else h = 4 + (r - g) / chroma; h *= 30; if (h < 0) h += 180; s *= 255; v *= 255; uint index = 3 * 256 * 256 * i + 256 * 3 * j + 3 * k; LUT[index] = (uchar)h; LUT[index + 1] = (uchar)s; //height, width Saturation LUT[index + 2] = (uchar)v; //height, width Value } } __global__ void kernelSwap(PtrStepSz<uchar> src, PtrStepSz<uchar> dst, uchar *LUT) { uint i = (blockIdx.x * blockDim.x) + threadIdx.x; uint j = 3 * ((blockIdx.y * blockDim.y) + threadIdx.y); uint index = 3 * 256 * 256 * src.ptr(i)[j] + 256 * 3 * src.ptr(i)[j + 1] + 3 * src.ptr(i)[j + 2]; dst.ptr(i)[j] = LUT[index]; dst.ptr(i)[j+1] = LUT[index+1]; dst.ptr(i)[j+2] = LUT[index+2]; } inline uint getFirstIndex(uchar b, uchar g, uchar r) { return 3 * 256 * 256 * b + 256 * 3 * g + 3 * r; } void initializeLUM() { hipSetDeviceFlags(hipDeviceMapHost); hipHostMalloc((void **)&LUMBGR2HSV, 256*256*256*3, hipHostMallocMapped); hipHostGetDevicePointer((void**)&d_LUMBGR2HSV, (void *) LUMBGR2HSV, 0); dim3 threads_per_block(8, 8,8); dim3 numBlocks(32,32,32); kernelconvert << <numBlocks, threads_per_block >> >(d_LUMBGR2HSV); } void BGR2HSV_LUM(GpuMat src, GpuMat dst) { dim3 threads_per_block(16, 16); dim3 numBlocks(45, 80); kernelSwap << <numBlocks, threads_per_block >> >(src, dst, d_LUMBGR2HSV); } /* Commented Sections used to test speed difference with GPU look up table and opencv cvtColor GPU code about 2760000 cv ticks faster */ int main(int argc, char** argv) { string filename = "mouse.mp4"; initializeLUM(); gpu::setDevice(0); gpu::GpuMat src, inHSV; Mat frame; Mat openCvcvt; VideoCapture capture(filename); for (; ; ) { capture.read(frame); if (frame.empty()) break; src.upload(frame); inHSV.upload(frame); //int64 before = getTickCount(); //cvtColor(frame, openCvcvt, CV_BGR2HSV); //int64 afterOpencvF = getTickCount(); BGR2HSV_LUM(src, inHSV); //int64 afterCuda = getTickCount(); //int cvtColorTime = afterOpencvF - before; //int kernelConvertTime = afterCuda - afterOpencvF; //printf("CvtColor: %d kernelConvert %d diff %d \n", cvtColorTime, kernelConvertTime, cvtColorTime - kernelConvertTime); Mat download(inHSV); imshow("HSV", download); //imshow("hsvopencv", openCvcvt); waitKey(10); // waits to display frame } waitKey(0); // key press to close window }
9011328965bca41f4f8daf3820783cc3066ffa10.cu
#include <stdio.h> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/opencv.hpp> #include <opencv2/gpu/gpu.hpp> #include <algorithm> // std::min using namespace std; using namespace cv; using namespace cv::gpu; inline uint getFirstIndex(uchar, uchar, uchar); uchar *LUMBGR2HSV; uchar *d_LUMBGR2HSV; __global__ void kernelconvert(uchar *LUT) { uint i = (blockIdx.x * blockDim.x) + threadIdx.x; uint j = (blockIdx.y * blockDim.y) + threadIdx.y; uint k = (blockIdx.z * blockDim.z) + threadIdx.z; if (i < 256 && j < 256 && k < 256) { uchar _b = i; uchar _g = j; uchar _r = k; float b = (float)_b / 255.0; float g = (float)_g / 255.0; float r = (float)_r / 255.0; float h, s, v; float _min = min(min(b, g), r); v = max(max(b, g), r); float chroma = v - _min; if (v != 0) s = chroma / v; // s else { s = 0; h = -1; return; } if (r == v) h = (g - b) / chroma; else if (g == v) h = 2 + (b - r) / chroma; else h = 4 + (r - g) / chroma; h *= 30; if (h < 0) h += 180; s *= 255; v *= 255; uint index = 3 * 256 * 256 * i + 256 * 3 * j + 3 * k; LUT[index] = (uchar)h; LUT[index + 1] = (uchar)s; //height, width Saturation LUT[index + 2] = (uchar)v; //height, width Value } } __global__ void kernelSwap(PtrStepSz<uchar> src, PtrStepSz<uchar> dst, uchar *LUT) { uint i = (blockIdx.x * blockDim.x) + threadIdx.x; uint j = 3 * ((blockIdx.y * blockDim.y) + threadIdx.y); uint index = 3 * 256 * 256 * src.ptr(i)[j] + 256 * 3 * src.ptr(i)[j + 1] + 3 * src.ptr(i)[j + 2]; dst.ptr(i)[j] = LUT[index]; dst.ptr(i)[j+1] = LUT[index+1]; dst.ptr(i)[j+2] = LUT[index+2]; } inline uint getFirstIndex(uchar b, uchar g, uchar r) { return 3 * 256 * 256 * b + 256 * 3 * g + 3 * r; } void initializeLUM() { cudaSetDeviceFlags(cudaDeviceMapHost); cudaHostAlloc((void **)&LUMBGR2HSV, 256*256*256*3, cudaHostAllocMapped); cudaHostGetDevicePointer((void**)&d_LUMBGR2HSV, (void *) LUMBGR2HSV, 0); dim3 threads_per_block(8, 8,8); dim3 numBlocks(32,32,32); kernelconvert << <numBlocks, threads_per_block >> >(d_LUMBGR2HSV); } void BGR2HSV_LUM(GpuMat src, GpuMat dst) { dim3 threads_per_block(16, 16); dim3 numBlocks(45, 80); kernelSwap << <numBlocks, threads_per_block >> >(src, dst, d_LUMBGR2HSV); } /* Commented Sections used to test speed difference with GPU look up table and opencv cvtColor GPU code about 2760000 cv ticks faster */ int main(int argc, char** argv) { string filename = "mouse.mp4"; initializeLUM(); gpu::setDevice(0); gpu::GpuMat src, inHSV; Mat frame; Mat openCvcvt; VideoCapture capture(filename); for (; ; ) { capture.read(frame); if (frame.empty()) break; src.upload(frame); inHSV.upload(frame); //int64 before = getTickCount(); //cvtColor(frame, openCvcvt, CV_BGR2HSV); //int64 afterOpencvF = getTickCount(); BGR2HSV_LUM(src, inHSV); //int64 afterCuda = getTickCount(); //int cvtColorTime = afterOpencvF - before; //int kernelConvertTime = afterCuda - afterOpencvF; //printf("CvtColor: %d kernelConvert %d diff %d \n", cvtColorTime, kernelConvertTime, cvtColorTime - kernelConvertTime); Mat download(inHSV); imshow("HSV", download); //imshow("hsvopencv", openCvcvt); waitKey(10); // waits to display frame } waitKey(0); // key press to close window }
d80e33f7c590141975f16628d23489663b2f54ce.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "deltaCalcHidden.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *Activation = NULL; hipMalloc(&Activation, XSIZE*YSIZE); float *delta = NULL; hipMalloc(&delta, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( deltaCalcHidden), dim3(gridBlock),dim3(threadBlock), 0, 0, Activation,delta); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( deltaCalcHidden), dim3(gridBlock),dim3(threadBlock), 0, 0, Activation,delta); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( deltaCalcHidden), dim3(gridBlock),dim3(threadBlock), 0, 0, Activation,delta); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d80e33f7c590141975f16628d23489663b2f54ce.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "deltaCalcHidden.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *Activation = NULL; cudaMalloc(&Activation, XSIZE*YSIZE); float *delta = NULL; cudaMalloc(&delta, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); deltaCalcHidden<<<gridBlock,threadBlock>>>(Activation,delta); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { deltaCalcHidden<<<gridBlock,threadBlock>>>(Activation,delta); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { deltaCalcHidden<<<gridBlock,threadBlock>>>(Activation,delta); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ea40dfade882636f07bc908097c39171e9fdab3a.hip
// !!! This is a file automatically generated by hipify!!! #include <gtest/gtest.h> #include "linalg/add.h" #include "linalg/subtract.h" #include "cuda_utils.h" #include <assert.h> #include <math.h> #include <stddef.h> #include <vector> #include <algorithm> #include <limits> namespace { template<typename T> class TestBuffer { public: TestBuffer(size_t arrayLength) : devContainterRaw(nullptr) , hostContainter(arrayLength, T()) { MLCommon::allocate(devContainterRaw, arrayLength); EXPECT_TRUE(devContainterRaw != nullptr); } ~TestBuffer() { EXPECT_TRUE(hipFree(devContainterRaw) == hipSuccess); } T* getDevPtr() { return devContainterRaw; } T* getHostPtr() { if (hostContainter.empty()) return nullptr; else return &hostContainter[0]; } T hostValueAt(size_t index) const { if (index >= hostContainter.size()) { assert(!"INDEX IS OT OF ACCESSABLE RANGE"); return T(); } return hostContainter.at(index); } size_t size() const { return hostContainter.size(); } void fillArithmeticSeq(const T& start = T(1), const T& step = T(1)) { for (size_t i = 0; i < hostContainter.size(); ++i) hostContainter[i] = start + step*i; copy2Device(); } void copy2Device() { EXPECT_TRUE(hipMemcpy(getDevPtr(), getHostPtr(), size() * sizeof(T), hipMemcpyHostToDevice) == hipSuccess); } void copy2Host() { EXPECT_TRUE(hipMemcpy(getHostPtr(), getDevPtr(), size() * sizeof(T), hipMemcpyDeviceToHost) == hipSuccess); } private: T* devContainterRaw; std::vector<T> hostContainter; private: TestBuffer(const TestBuffer&) = delete; TestBuffer operator = (const TestBuffer&) = delete; }; } template<typename T> void test_add(size_t arraLength) { TestBuffer<T> in(arraLength); TestBuffer<T> extraScalar(1); TestBuffer<T> out(arraLength); in.fillArithmeticSeq(); extraScalar.fillArithmeticSeq(); out.fillArithmeticSeq(); MLCommon::LinAlg::addDevScalar(out.getDevPtr(), in.getDevPtr(), extraScalar.getDevPtr(), in.size()); out.copy2Host(); T maxError = T(); for (int i = 0; i < arraLength; i++) { maxError = ::max(maxError, abs( (in.hostValueAt(i) + extraScalar.hostValueAt(0)) - out.hostValueAt(i) ) ); } EXPECT_TRUE(maxError < std::numeric_limits<T>::epsilon()) << "Max deviation in test_add is greater then " << std::numeric_limits<T>::epsilon(); } template<typename T> void test_subtract(size_t arraLength) { TestBuffer<T> in(arraLength); TestBuffer<T> extraScalar(1); TestBuffer<T> out(arraLength); in.fillArithmeticSeq(); extraScalar.fillArithmeticSeq(); out.fillArithmeticSeq(); MLCommon::LinAlg::subtractDevScalar(out.getDevPtr(), in.getDevPtr(), extraScalar.getDevPtr(), in.size()); out.copy2Host(); T maxError = T(); for (int i = 0; i < arraLength; i++) maxError = ::max(maxError, abs( (in.hostValueAt(i) - extraScalar.hostValueAt(0)) - out.hostValueAt(i) ) ); EXPECT_TRUE(maxError < std::numeric_limits<T>::epsilon()) << "Max deviation test_subtract is greater then " << std::numeric_limits<T>::epsilon(); } TEST(AddAndSubDevScalarTest, add_test) { test_add<float>(1); test_add<float>(100); test_add<double>(1); test_add<double>(100); } TEST(AddAndSubDevScalarTest, subtract_test) { test_subtract<float>(1); test_subtract<float>(100); test_subtract<double>(1); test_subtract<double>(100); }
ea40dfade882636f07bc908097c39171e9fdab3a.cu
#include <gtest/gtest.h> #include "linalg/add.h" #include "linalg/subtract.h" #include "cuda_utils.h" #include <assert.h> #include <math.h> #include <stddef.h> #include <vector> #include <algorithm> #include <limits> namespace { template<typename T> class TestBuffer { public: TestBuffer(size_t arrayLength) : devContainterRaw(nullptr) , hostContainter(arrayLength, T()) { MLCommon::allocate(devContainterRaw, arrayLength); EXPECT_TRUE(devContainterRaw != nullptr); } ~TestBuffer() { EXPECT_TRUE(cudaFree(devContainterRaw) == cudaSuccess); } T* getDevPtr() { return devContainterRaw; } T* getHostPtr() { if (hostContainter.empty()) return nullptr; else return &hostContainter[0]; } T hostValueAt(size_t index) const { if (index >= hostContainter.size()) { assert(!"INDEX IS OT OF ACCESSABLE RANGE"); return T(); } return hostContainter.at(index); } size_t size() const { return hostContainter.size(); } void fillArithmeticSeq(const T& start = T(1), const T& step = T(1)) { for (size_t i = 0; i < hostContainter.size(); ++i) hostContainter[i] = start + step*i; copy2Device(); } void copy2Device() { EXPECT_TRUE(cudaMemcpy(getDevPtr(), getHostPtr(), size() * sizeof(T), cudaMemcpyHostToDevice) == cudaSuccess); } void copy2Host() { EXPECT_TRUE(cudaMemcpy(getHostPtr(), getDevPtr(), size() * sizeof(T), cudaMemcpyDeviceToHost) == cudaSuccess); } private: T* devContainterRaw; std::vector<T> hostContainter; private: TestBuffer(const TestBuffer&) = delete; TestBuffer operator = (const TestBuffer&) = delete; }; } template<typename T> void test_add(size_t arraLength) { TestBuffer<T> in(arraLength); TestBuffer<T> extraScalar(1); TestBuffer<T> out(arraLength); in.fillArithmeticSeq(); extraScalar.fillArithmeticSeq(); out.fillArithmeticSeq(); MLCommon::LinAlg::addDevScalar(out.getDevPtr(), in.getDevPtr(), extraScalar.getDevPtr(), in.size()); out.copy2Host(); T maxError = T(); for (int i = 0; i < arraLength; i++) { maxError = std::max(maxError, abs( (in.hostValueAt(i) + extraScalar.hostValueAt(0)) - out.hostValueAt(i) ) ); } EXPECT_TRUE(maxError < std::numeric_limits<T>::epsilon()) << "Max deviation in test_add is greater then " << std::numeric_limits<T>::epsilon(); } template<typename T> void test_subtract(size_t arraLength) { TestBuffer<T> in(arraLength); TestBuffer<T> extraScalar(1); TestBuffer<T> out(arraLength); in.fillArithmeticSeq(); extraScalar.fillArithmeticSeq(); out.fillArithmeticSeq(); MLCommon::LinAlg::subtractDevScalar(out.getDevPtr(), in.getDevPtr(), extraScalar.getDevPtr(), in.size()); out.copy2Host(); T maxError = T(); for (int i = 0; i < arraLength; i++) maxError = std::max(maxError, abs( (in.hostValueAt(i) - extraScalar.hostValueAt(0)) - out.hostValueAt(i) ) ); EXPECT_TRUE(maxError < std::numeric_limits<T>::epsilon()) << "Max deviation test_subtract is greater then " << std::numeric_limits<T>::epsilon(); } TEST(AddAndSubDevScalarTest, add_test) { test_add<float>(1); test_add<float>(100); test_add<double>(1); test_add<double>(100); } TEST(AddAndSubDevScalarTest, subtract_test) { test_subtract<float>(1); test_subtract<float>(100); test_subtract<double>(1); test_subtract<double>(100); }
c5fe3de059b7093fe556a2a5d51c2a135acdd6db.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2011-2018, The University of Oxford * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of the University of Oxford nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "correlate/private_correlate_functions_inline.h" #include "correlate/oskar_cross_correlate_cuda.h" #include "math/oskar_add_inline.h" #include "utility/oskar_device_utils.h" #include <hip/hip_runtime.h> #include <cstdlib> #include <cstring> // Indices into the visibility/baseline matrix. #define SP blockIdx.x /* Column index. */ #define SQ blockIdx.y /* Row index. */ enum { VER_OLD = 1, VER_NON_SM = 2, VER_SM = 3 }; static int ver_ = 0; static int correlate_version(void); template < // Compile-time parameters. bool BANDWIDTH_SMEARING, bool TIME_SMEARING, bool GAUSSIAN, typename REAL, typename REAL2, typename REAL8 > __global__ void oskar_xcorr_cudak( const int num_sources, const int num_stations, const REAL8* const restrict jones, const REAL* const restrict source_I, const REAL* const restrict source_Q, const REAL* const restrict source_U, const REAL* const restrict source_V, const REAL* const restrict source_l, const REAL* const restrict source_m, const REAL* const restrict source_n, const REAL* const restrict source_a, const REAL* const restrict source_b, const REAL* const restrict source_c, const REAL* const restrict station_u, const REAL* const restrict station_v, const REAL* const restrict station_w, const REAL* const restrict station_x, const REAL* const restrict station_y, const REAL uv_min_lambda, const REAL uv_max_lambda, const REAL inv_wavelength, const REAL frac_bandwidth, const REAL time_int_sec, const REAL gha0_rad, const REAL dec0_rad, REAL8* restrict vis) { extern __shared__ __align__(sizeof(double4c)) unsigned char my_smem[]; __shared__ REAL uv_len, uu, vv, ww, uu2, vv2, uuvv, du, dv, dw; REAL8 m1, m2, sum; // Partial sum per thread. REAL8* smem = reinterpret_cast<REAL8*>(my_smem); // Allows template. // Return immediately if in the wrong half of the visibility matrix. if (SQ >= SP) return; // Get common baseline values per thread block. if (threadIdx.x == 0) { OSKAR_BASELINE_TERMS(REAL, station_u[SP], station_u[SQ], station_v[SP], station_v[SQ], station_w[SP], station_w[SQ], uu, vv, ww, uu2, vv2, uuvv, uv_len); if (TIME_SMEARING) OSKAR_BASELINE_DELTAS(REAL, station_x[SP], station_x[SQ], station_y[SP], station_y[SQ], du, dv, dw); } __syncthreads(); // Apply the baseline length filter. if (uv_len < uv_min_lambda || uv_len > uv_max_lambda) return; // Get pointers to source vectors for both stations. const REAL8* const restrict station_p = &jones[num_sources * SP]; const REAL8* const restrict station_q = &jones[num_sources * SQ]; // Each thread loops over a subset of the sources. OSKAR_CLEAR_COMPLEX_MATRIX(REAL, sum) for (int i = threadIdx.x; i < num_sources; i += blockDim.x) { REAL smearing; if (GAUSSIAN) { const REAL t = source_a[i] * uu2 + source_b[i] * uuvv + source_c[i] * vv2; smearing = exp((REAL) -t); } else { smearing = (REAL) 1; } if (BANDWIDTH_SMEARING || TIME_SMEARING) { const REAL l = source_l[i]; const REAL m = source_m[i]; const REAL n = source_n[i] - (REAL) 1; if (BANDWIDTH_SMEARING) { const REAL t = uu * l + vv * m + ww * n; smearing *= oskar_sinc<REAL>(t); } if (TIME_SMEARING) { const REAL t = du * l + dv * m + dw * n; smearing *= oskar_sinc<REAL>(t); } } // Construct source brightness matrix. OSKAR_CONSTRUCT_B(REAL, m2, source_I[i], source_Q[i], source_U[i], source_V[i]) // Multiply first Jones matrix with source brightness matrix. OSKAR_LOAD_MATRIX(m1, station_p[i]) OSKAR_MUL_COMPLEX_MATRIX_HERMITIAN_IN_PLACE(REAL2, m1, m2) // Multiply result with second (Hermitian transposed) Jones matrix. OSKAR_LOAD_MATRIX(m2, station_q[i]) OSKAR_MUL_COMPLEX_MATRIX_CONJUGATE_TRANSPOSE_IN_PLACE(REAL2, m1, m2) // Multiply result by smearing term and accumulate. OSKAR_MUL_ADD_COMPLEX_MATRIX_SCALAR(sum, m1, smearing) } // Store partial sum for the thread in shared memory. smem[threadIdx.x] = sum; __syncthreads(); // Accumulate contents of shared memory. if (threadIdx.x == 0) { // Sum over all sources for this baseline. for (int i = 1; i < blockDim.x; ++i) OSKAR_ADD_COMPLEX_MATRIX_IN_PLACE(sum, smem[i]); // Add result of this thread block to the baseline visibility. int i = oskar_evaluate_baseline_index_inline(num_stations, SP, SQ); OSKAR_ADD_COMPLEX_MATRIX_IN_PLACE(vis[i], sum); } } #define OKN_NSOURCES 32 #define OKN_BPK 4 /* baselines per kernel */ #define WARP 32 template < // Compile-time parameters. bool BANDWIDTH_SMEARING, bool TIME_SMEARING, bool GAUSSIAN, typename REAL, typename REAL2, typename REAL8 > __global__ void oskar_xcorr_NON_SM_cudak( const int num_sources, const int num_stations, const REAL8* const restrict jones, const REAL* const restrict source_I, const REAL* const restrict source_Q, const REAL* const restrict source_U, const REAL* const restrict source_V, const REAL* const restrict source_l, const REAL* const restrict source_m, const REAL* const restrict source_n, const REAL* const restrict source_a, const REAL* const restrict source_b, const REAL* const restrict source_c, const REAL* const restrict station_u, const REAL* const restrict station_v, const REAL* const restrict station_w, const REAL* const restrict station_x, const REAL* const restrict station_y, const REAL uv_min_lambda, const REAL uv_max_lambda, const REAL inv_wavelength, const REAL frac_bandwidth, const REAL time_int_sec, const REAL gha0_rad, const REAL dec0_rad, REAL8* restrict vis) { __shared__ REAL uv_len[OKN_BPK], uu[OKN_BPK], vv[OKN_BPK], ww[OKN_BPK]; __shared__ REAL uu2[OKN_BPK], vv2[OKN_BPK], uuvv[OKN_BPK]; __shared__ REAL du[OKN_BPK], dv[OKN_BPK], dw[OKN_BPK]; __shared__ const REAL8 *station_q[OKN_BPK]; REAL8 m1, m2, sum; const int w = (threadIdx.x >> 5); // Warp ID. const int i = (threadIdx.x & 31); // ID within warp (local ID). // Return immediately if in the wrong half of the visibility matrix. if (OKN_BPK * SQ >= SP) return; // Get baseline values per warp. if (i == 0) { const int i_sq = OKN_BPK * SQ + w; // Set pointer to source vector for station q to safe position // so non-existence SQ >= SP does not cause problems. station_q[w] = &jones[0]; if (i_sq < num_stations) { OSKAR_BASELINE_TERMS(REAL, station_u[SP], station_u[i_sq], station_v[SP], station_v[i_sq], station_w[SP], station_w[i_sq], uu[w], vv[w], ww[w], uu2[w], vv2[w], uuvv[w], uv_len[w]); if (TIME_SMEARING) OSKAR_BASELINE_DELTAS(REAL, station_x[SP], station_x[i_sq], station_y[SP], station_y[i_sq], du[w], dv[w], dw[w]); // Get valid pointer to source vector for station q. station_q[w] = &jones[num_sources * i_sq]; } } __syncthreads(); // Get pointer to source vector for station p. const REAL8* const restrict station_p = &jones[num_sources * SP]; // Each thread from given warp loops over a subset of the sources, // and each warp works with a different station q. OSKAR_CLEAR_COMPLEX_MATRIX(REAL, sum) int itemp = (num_sources >> 5) * WARP; for (int outer = i; outer < itemp; outer += WARP) { REAL smearing; if (GAUSSIAN) { const REAL t = source_a[outer] * uu2[w] + source_b[outer] * uuvv[w] + source_c[outer] * vv2[w]; smearing = exp((REAL) -t); } else { smearing = (REAL) 1; } if (BANDWIDTH_SMEARING || TIME_SMEARING) { const REAL l = source_l[outer]; const REAL m = source_m[outer]; const REAL n = source_n[outer] - (REAL) 1; if (BANDWIDTH_SMEARING) { const REAL t = uu[w] * l + vv[w] * m + ww[w] * n; smearing *= oskar_sinc<REAL>(t); } if (TIME_SMEARING) { const REAL t = du[w] * l + dv[w] * m + dw[w] * n; smearing *= oskar_sinc<REAL>(t); } } // Construct source brightness matrix. OSKAR_CONSTRUCT_B(REAL, m2, source_I[outer], source_Q[outer], source_U[outer], source_V[outer]) // Multiply first Jones matrix with source brightness matrix. m1 = station_p[outer]; OSKAR_MUL_COMPLEX_MATRIX_HERMITIAN_IN_PLACE(REAL2, m1, m2) // Multiply result with second (Hermitian transposed) Jones matrix. OSKAR_LOAD_MATRIX(m2, (station_q[w])[outer]) OSKAR_MUL_COMPLEX_MATRIX_CONJUGATE_TRANSPOSE_IN_PLACE(REAL2, m1, m2) // Multiply result by smearing term and accumulate. OSKAR_MUL_ADD_COMPLEX_MATRIX_SCALAR(sum, m1, smearing) __syncthreads(); } if ((num_sources & 31) > 0) { int outer = (num_sources >> 5) * WARP + i; if (outer < num_sources) { REAL smearing; if (GAUSSIAN) { const REAL t = source_a[outer] * uu2[w] + source_b[outer] * uuvv[w] + source_c[outer] * vv2[w]; smearing = exp((REAL) -t); } else { smearing = (REAL) 1; } if (BANDWIDTH_SMEARING || TIME_SMEARING) { const REAL l = source_l[outer]; const REAL m = source_m[outer]; const REAL n = source_n[outer] - (REAL) 1; if (BANDWIDTH_SMEARING) { const REAL t = uu[w] * l + vv[w] * m + ww[w] * n; smearing *= oskar_sinc<REAL>(t); } if (TIME_SMEARING) { const REAL t = du[w] * l + dv[w] * m + dw[w] * n; smearing *= oskar_sinc<REAL>(t); } } // Construct source brightness matrix. OSKAR_CONSTRUCT_B(REAL, m2, source_I[outer], source_Q[outer], source_U[outer], source_V[outer]) // Multiply first Jones matrix with source brightness matrix. m1 = station_p[outer]; OSKAR_MUL_COMPLEX_MATRIX_HERMITIAN_IN_PLACE(REAL2, m1, m2) // Multiply result with second (Hermitian transposed) Jones matrix. OSKAR_LOAD_MATRIX(m2, (station_q[w])[outer]) OSKAR_MUL_COMPLEX_MATRIX_CONJUGATE_TRANSPOSE_IN_PLACE(REAL2, m1, m2) // Multiply result by smearing term and accumulate. OSKAR_MUL_ADD_COMPLEX_MATRIX_SCALAR(sum, m1, smearing) } } // Reduce matrices within warp. OSKAR_WARP_REDUCE(sum.a.x); OSKAR_WARP_REDUCE(sum.a.y); OSKAR_WARP_REDUCE(sum.b.x); OSKAR_WARP_REDUCE(sum.b.y); OSKAR_WARP_REDUCE(sum.c.x); OSKAR_WARP_REDUCE(sum.c.y); OSKAR_WARP_REDUCE(sum.d.x); OSKAR_WARP_REDUCE(sum.d.y); // Add result of this warp to the baseline visibility. if (i == 0 && (OKN_BPK * SQ + w) < SP) { if (uv_len[w] < uv_min_lambda || uv_len[w] > uv_max_lambda) return; const int j = oskar_evaluate_baseline_index_inline(num_stations, SP, OKN_BPK * SQ + w); OSKAR_ADD_COMPLEX_MATRIX_IN_PLACE(vis[j], sum); } } template < // Compile-time parameters. bool BANDWIDTH_SMEARING, bool TIME_SMEARING, bool GAUSSIAN, typename REAL, typename REAL2, typename REAL8 > __global__ void oskar_xcorr_SM_cudak( const int num_sources, const int num_stations, const REAL8* const restrict jones, const REAL* const restrict source_I, const REAL* const restrict source_Q, const REAL* const restrict source_U, const REAL* const restrict source_V, const REAL* const restrict source_l, const REAL* const restrict source_m, const REAL* const restrict source_n, const REAL* const restrict source_a, const REAL* const restrict source_b, const REAL* const restrict source_c, const REAL* const restrict station_u, const REAL* const restrict station_v, const REAL* const restrict station_w, const REAL* const restrict station_x, const REAL* const restrict station_y, const REAL uv_min_lambda, const REAL uv_max_lambda, const REAL inv_wavelength, const REAL frac_bandwidth, const REAL time_int_sec, const REAL gha0_rad, const REAL dec0_rad, REAL8* restrict vis) { __shared__ REAL uv_len[OKN_BPK], uu[OKN_BPK], vv[OKN_BPK], ww[OKN_BPK]; __shared__ REAL uu2[OKN_BPK], vv2[OKN_BPK], uuvv[OKN_BPK]; __shared__ REAL du[OKN_BPK], dv[OKN_BPK], dw[OKN_BPK]; __shared__ const REAL8 *station_q[OKN_BPK]; __shared__ REAL s_I[OKN_NSOURCES]; __shared__ REAL s_Q[OKN_NSOURCES]; __shared__ REAL s_U[OKN_NSOURCES]; __shared__ REAL s_V[OKN_NSOURCES]; __shared__ REAL s_l[OKN_NSOURCES]; __shared__ REAL s_m[OKN_NSOURCES]; __shared__ REAL s_n[OKN_NSOURCES]; __shared__ REAL s_a[OKN_NSOURCES]; __shared__ REAL s_b[OKN_NSOURCES]; __shared__ REAL s_c[OKN_NSOURCES]; __shared__ REAL8 s_sp[OKN_NSOURCES]; REAL8 m1, m2, sum; const int w = (threadIdx.x >> 5); // Warp ID. const int i = (threadIdx.x & 31); // ID within warp (local ID). // Return immediately if in the wrong half of the visibility matrix. if (OKN_BPK * SQ >= SP) return; // Get baseline values per warp. if (i == 0) { const int i_sq = OKN_BPK * SQ + w; // Set pointer to source vector for station q to safe position // so non-existence SQ >= SP does not cause problems. station_q[w] = &jones[0]; if (i_sq < num_stations) { OSKAR_BASELINE_TERMS(REAL, station_u[SP], station_u[i_sq], station_v[SP], station_v[i_sq], station_w[SP], station_w[i_sq], uu[w], vv[w], ww[w], uu2[w], vv2[w], uuvv[w], uv_len[w]); if (TIME_SMEARING) OSKAR_BASELINE_DELTAS(REAL, station_x[SP], station_x[i_sq], station_y[SP], station_y[i_sq], du[w], dv[w], dw[w]); // Get valid pointer to source vector for station q. station_q[w] = &jones[num_sources * i_sq]; } } __syncthreads(); // Get pointer to source vector for station p. const REAL8* const restrict station_p = &jones[num_sources * SP]; // Each thread from given warp loops over a subset of the sources, // and each warp works with a different station q. OSKAR_CLEAR_COMPLEX_MATRIX(REAL, sum) int itemp = (num_sources >> 5) * WARP; for (int outer = i; outer < itemp; outer += WARP) { if (w == 0) { s_I[i] = source_I[outer]; if (BANDWIDTH_SMEARING || TIME_SMEARING) s_l[i] = source_l[outer]; if (GAUSSIAN) { s_a[i] = source_a[outer]; s_b[i] = source_b[outer]; } } if (w == 1) { s_Q[i] = source_Q[outer]; if (BANDWIDTH_SMEARING || TIME_SMEARING) s_m[i] = source_m[outer]; if (GAUSSIAN) s_c[i] = source_c[outer]; } if (w == 2) { s_U[i] = source_U[outer]; s_V[i] = source_V[outer]; if (BANDWIDTH_SMEARING || TIME_SMEARING) s_n[i] = source_n[outer]; } if (w == 3) { s_sp[i] = station_p[outer]; } __syncthreads(); REAL smearing; if (GAUSSIAN) { const REAL t = s_a[i] * uu2[w] + s_b[i] * uuvv[w] + s_c[i] * vv2[w]; smearing = exp((REAL) -t); } else { smearing = (REAL) 1; } if (BANDWIDTH_SMEARING || TIME_SMEARING) { const REAL l = s_l[i]; const REAL m = s_m[i]; const REAL n = s_n[i] - (REAL) 1; if (BANDWIDTH_SMEARING) { const REAL t = uu[w] * l + vv[w] * m + ww[w] * n; smearing *= oskar_sinc<REAL>(t); } if (TIME_SMEARING) { const REAL t = du[w] * l + dv[w] * m + dw[w] * n; smearing *= oskar_sinc<REAL>(t); } } // Construct source brightness matrix. OSKAR_CONSTRUCT_B(REAL, m2, s_I[i], s_Q[i], s_U[i], s_V[i]) // Multiply first Jones matrix with source brightness matrix. m1 = s_sp[i]; OSKAR_MUL_COMPLEX_MATRIX_HERMITIAN_IN_PLACE(REAL2, m1, m2) // Multiply result with second (Hermitian transposed) Jones matrix. OSKAR_LOAD_MATRIX(m2, (station_q[w])[outer]) OSKAR_MUL_COMPLEX_MATRIX_CONJUGATE_TRANSPOSE_IN_PLACE(REAL2, m1, m2) // Multiply result by smearing term and accumulate. OSKAR_MUL_ADD_COMPLEX_MATRIX_SCALAR(sum, m1, smearing) __syncthreads(); } if ((num_sources & 31) > 0) { int outer = (num_sources >> 5) * WARP + i; if (outer < num_sources) { if (w == 0) { s_I[i] = source_I[outer]; if (BANDWIDTH_SMEARING || TIME_SMEARING) s_l[i] = source_l[outer]; if (GAUSSIAN) { s_a[i] = source_a[outer]; s_b[i] = source_b[outer]; } } if (w == 1) { s_Q[i] = source_Q[outer]; if (BANDWIDTH_SMEARING || TIME_SMEARING) s_m[i] = source_m[outer]; if (GAUSSIAN) s_c[i] = source_c[outer]; } if (w == 2) { s_U[i] = source_U[outer]; s_V[i] = source_V[outer]; if (BANDWIDTH_SMEARING || TIME_SMEARING) s_n[i] = source_n[outer]; } if (w == 3) { s_sp[i] = station_p[outer]; } } __syncthreads(); if (outer < num_sources) { REAL smearing; if (GAUSSIAN) { const REAL t = s_a[i] * uu2[w] + s_b[i] * uuvv[w] + s_c[i] * vv2[w]; smearing = exp((REAL) -t); } else { smearing = (REAL) 1; } if (BANDWIDTH_SMEARING || TIME_SMEARING) { const REAL l = s_l[i]; const REAL m = s_m[i]; const REAL n = s_n[i] - (REAL) 1; if (BANDWIDTH_SMEARING) { const REAL t = uu[w] * l + vv[w] * m + ww[w] * n; smearing *= oskar_sinc<REAL>(t); } if (TIME_SMEARING) { const REAL t = du[w] * l + dv[w] * m + dw[w] * n; smearing *= oskar_sinc<REAL>(t); } } // Construct source brightness matrix. OSKAR_CONSTRUCT_B(REAL, m2, s_I[i], s_Q[i], s_U[i], s_V[i]) // Multiply first Jones matrix with source brightness matrix. m1 = s_sp[i]; OSKAR_MUL_COMPLEX_MATRIX_HERMITIAN_IN_PLACE(REAL2, m1, m2) // Multiply result with second (Hermitian transposed) Jones matrix. OSKAR_LOAD_MATRIX(m2, (station_q[w])[outer]) OSKAR_MUL_COMPLEX_MATRIX_CONJUGATE_TRANSPOSE_IN_PLACE(REAL2, m1, m2) // Multiply result by smearing term and accumulate. OSKAR_MUL_ADD_COMPLEX_MATRIX_SCALAR(sum, m1, smearing) } } // Reduce matrices within warp. OSKAR_WARP_REDUCE(sum.a.x); OSKAR_WARP_REDUCE(sum.a.y); OSKAR_WARP_REDUCE(sum.b.x); OSKAR_WARP_REDUCE(sum.b.y); OSKAR_WARP_REDUCE(sum.c.x); OSKAR_WARP_REDUCE(sum.c.y); OSKAR_WARP_REDUCE(sum.d.x); OSKAR_WARP_REDUCE(sum.d.y); // Add result of this warp to the baseline visibility. if (i == 0 && (OKN_BPK * SQ + w) < SP) { if (uv_len[w] < uv_min_lambda || uv_len[w] > uv_max_lambda) return; const int j = oskar_evaluate_baseline_index_inline(num_stations, SP, OKN_BPK * SQ + w); OSKAR_ADD_COMPLEX_MATRIX_IN_PLACE(vis[j], sum); } } #define XCORR_KERNEL(NAME, BS, TS, GAUSSIAN, REAL, REAL2, REAL8) \ NAME<BS, TS, GAUSSIAN, REAL, REAL2, REAL8> \ OSKAR_CUDAK_CONF(num_blocks, num_threads, shared_mem) \ (num_sources, num_stations, d_jones, d_I, d_Q, d_U, d_V, \ d_l, d_m, d_n, d_a, d_b, d_c, \ d_station_u, d_station_v, d_station_w, \ d_station_x, d_station_y, uv_min_lambda, uv_max_lambda, \ inv_wavelength, frac_bandwidth, time_int_sec, \ gha0_rad, dec0_rad, d_vis); #define XCORR_SELECT(NAME, GAUSSIAN, REAL, REAL2, REAL8) \ if (frac_bandwidth == (REAL)0 && time_int_sec == (REAL)0) \ XCORR_KERNEL(NAME, false, false, GAUSSIAN, REAL, REAL2, REAL8) \ else if (frac_bandwidth != (REAL)0 && time_int_sec == (REAL)0) \ XCORR_KERNEL(NAME, true, false, GAUSSIAN, REAL, REAL2, REAL8) \ else if (frac_bandwidth == (REAL)0 && time_int_sec != (REAL)0) \ XCORR_KERNEL(NAME, false, true, GAUSSIAN, REAL, REAL2, REAL8) \ else if (frac_bandwidth != (REAL)0 && time_int_sec != (REAL)0) \ XCORR_KERNEL(NAME, true, true, GAUSSIAN, REAL, REAL2, REAL8) void oskar_cross_correlate_point_cuda_f( int num_sources, int num_stations, const float4c* d_jones, const float* d_I, const float* d_Q, const float* d_U, const float* d_V, const float* d_l, const float* d_m, const float* d_n, const float* d_station_u, const float* d_station_v, const float* d_station_w, const float* d_station_x, const float* d_station_y, float uv_min_lambda, float uv_max_lambda, float inv_wavelength, float frac_bandwidth, float time_int_sec, float gha0_rad, float dec0_rad, float4c* d_vis) { const dim3 num_threads(128, 1); const float *d_a = 0, *d_b = 0, *d_c = 0; if (correlate_version() == VER_NON_SM) { dim3 num_blocks(num_stations, (num_stations + OKN_BPK - 1) / OKN_BPK); const size_t shared_mem = 0; XCORR_SELECT(oskar_xcorr_NON_SM_cudak, false, float, float2, float4c) } else if (correlate_version() == VER_SM) { dim3 num_blocks(num_stations, (num_stations + OKN_BPK - 1) / OKN_BPK); const size_t shared_mem = 0; XCORR_SELECT(oskar_xcorr_SM_cudak, false, float, float2, float4c) } else { dim3 num_blocks(num_stations, num_stations); const size_t shared_mem = num_threads.x * sizeof(float4c); XCORR_SELECT(oskar_xcorr_cudak, false, float, float2, float4c) } } void oskar_cross_correlate_point_cuda_d( int num_sources, int num_stations, const double4c* d_jones, const double* d_I, const double* d_Q, const double* d_U, const double* d_V, const double* d_l, const double* d_m, const double* d_n, const double* d_station_u, const double* d_station_v, const double* d_station_w, const double* d_station_x, const double* d_station_y, double uv_min_lambda, double uv_max_lambda, double inv_wavelength, double frac_bandwidth, double time_int_sec, double gha0_rad, double dec0_rad, double4c* d_vis) { const double *d_a = 0, *d_b = 0, *d_c = 0; if (correlate_version() == VER_NON_SM) { dim3 num_threads(128, 1); dim3 num_blocks(num_stations, (num_stations + OKN_BPK - 1) / OKN_BPK); const size_t shared_mem = 0; XCORR_SELECT(oskar_xcorr_NON_SM_cudak, false, double, double2, double4c) } else if (correlate_version() == VER_SM) { dim3 num_threads(128, 1); dim3 num_blocks(num_stations, (num_stations + OKN_BPK - 1) / OKN_BPK); const size_t shared_mem = 0; XCORR_SELECT(oskar_xcorr_SM_cudak, false, double, double2, double4c) } else { dim3 num_threads(128, 1); dim3 num_blocks(num_stations, num_stations); if (time_int_sec != 0.0) num_threads.x = 64; const size_t shared_mem = num_threads.x * sizeof(double4c); XCORR_SELECT(oskar_xcorr_cudak, false, double, double2, double4c) } } void oskar_cross_correlate_gaussian_cuda_f( int num_sources, int num_stations, const float4c* d_jones, const float* d_I, const float* d_Q, const float* d_U, const float* d_V, const float* d_l, const float* d_m, const float* d_n, const float* d_a, const float* d_b, const float* d_c, const float* d_station_u, const float* d_station_v, const float* d_station_w, const float* d_station_x, const float* d_station_y, float uv_min_lambda, float uv_max_lambda, float inv_wavelength, float frac_bandwidth, float time_int_sec, float gha0_rad, float dec0_rad, float4c* d_vis) { const dim3 num_threads(128, 1); if (correlate_version() == VER_NON_SM) { dim3 num_blocks(num_stations, (num_stations + OKN_BPK - 1) / OKN_BPK); const size_t shared_mem = 0; XCORR_SELECT(oskar_xcorr_NON_SM_cudak, true, float, float2, float4c) } else if (correlate_version() == VER_SM) { dim3 num_blocks(num_stations, (num_stations + OKN_BPK - 1) / OKN_BPK); const size_t shared_mem = 0; XCORR_SELECT(oskar_xcorr_SM_cudak, true, float, float2, float4c) } else { dim3 num_blocks(num_stations, num_stations); const size_t shared_mem = num_threads.x * sizeof(float4c); XCORR_SELECT(oskar_xcorr_cudak, true, float, float2, float4c) } } void oskar_cross_correlate_gaussian_cuda_d( int num_sources, int num_stations, const double4c* d_jones, const double* d_I, const double* d_Q, const double* d_U, const double* d_V, const double* d_l, const double* d_m, const double* d_n, const double* d_a, const double* d_b, const double* d_c, const double* d_station_u, const double* d_station_v, const double* d_station_w, const double* d_station_x, const double* d_station_y, double uv_min_lambda, double uv_max_lambda, double inv_wavelength, double frac_bandwidth, double time_int_sec, double gha0_rad, double dec0_rad, double4c* d_vis) { if (correlate_version() == VER_NON_SM) { dim3 num_threads(128, 1); dim3 num_blocks(num_stations, (num_stations + OKN_BPK - 1) / OKN_BPK); const size_t shared_mem = 0; XCORR_SELECT(oskar_xcorr_NON_SM_cudak, true, double, double2, double4c) } else if (correlate_version() == VER_SM) { dim3 num_threads(128, 1); dim3 num_blocks(num_stations, (num_stations + OKN_BPK - 1) / OKN_BPK); const size_t shared_mem = 0; XCORR_SELECT(oskar_xcorr_SM_cudak, true, double, double2, double4c) } else { dim3 num_threads(128, 1); dim3 num_blocks(num_stations, num_stations); if (time_int_sec != 0.0) num_threads.x = 64; const size_t shared_mem = num_threads.x * sizeof(double4c); XCORR_SELECT(oskar_xcorr_cudak, true, double, double2, double4c) } } int correlate_version() { if (ver_ == 0) { const char* v = getenv("OSKAR_CORRELATE"); if (v) { if (!strcmp(v, "OLD") || !strcmp(v, "old")) ver_ = VER_OLD; else if (!strcmp(v, "SM") || !strcmp(v, "sm")) ver_ = VER_SM; else if (strstr(v, "NO") || strstr(v, "no")) ver_ = VER_NON_SM; } if (ver_ == 0) { const int compute = oskar_device_compute_capability(); if (compute >= 70) ver_ = VER_NON_SM; else if (compute >= 30) ver_ = VER_SM; else ver_ = VER_OLD; } } return ver_; }
c5fe3de059b7093fe556a2a5d51c2a135acdd6db.cu
/* * Copyright (c) 2011-2018, The University of Oxford * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of the University of Oxford nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "correlate/private_correlate_functions_inline.h" #include "correlate/oskar_cross_correlate_cuda.h" #include "math/oskar_add_inline.h" #include "utility/oskar_device_utils.h" #include <cuda_runtime.h> #include <cstdlib> #include <cstring> // Indices into the visibility/baseline matrix. #define SP blockIdx.x /* Column index. */ #define SQ blockIdx.y /* Row index. */ enum { VER_OLD = 1, VER_NON_SM = 2, VER_SM = 3 }; static int ver_ = 0; static int correlate_version(void); template < // Compile-time parameters. bool BANDWIDTH_SMEARING, bool TIME_SMEARING, bool GAUSSIAN, typename REAL, typename REAL2, typename REAL8 > __global__ void oskar_xcorr_cudak( const int num_sources, const int num_stations, const REAL8* const restrict jones, const REAL* const restrict source_I, const REAL* const restrict source_Q, const REAL* const restrict source_U, const REAL* const restrict source_V, const REAL* const restrict source_l, const REAL* const restrict source_m, const REAL* const restrict source_n, const REAL* const restrict source_a, const REAL* const restrict source_b, const REAL* const restrict source_c, const REAL* const restrict station_u, const REAL* const restrict station_v, const REAL* const restrict station_w, const REAL* const restrict station_x, const REAL* const restrict station_y, const REAL uv_min_lambda, const REAL uv_max_lambda, const REAL inv_wavelength, const REAL frac_bandwidth, const REAL time_int_sec, const REAL gha0_rad, const REAL dec0_rad, REAL8* restrict vis) { extern __shared__ __align__(sizeof(double4c)) unsigned char my_smem[]; __shared__ REAL uv_len, uu, vv, ww, uu2, vv2, uuvv, du, dv, dw; REAL8 m1, m2, sum; // Partial sum per thread. REAL8* smem = reinterpret_cast<REAL8*>(my_smem); // Allows template. // Return immediately if in the wrong half of the visibility matrix. if (SQ >= SP) return; // Get common baseline values per thread block. if (threadIdx.x == 0) { OSKAR_BASELINE_TERMS(REAL, station_u[SP], station_u[SQ], station_v[SP], station_v[SQ], station_w[SP], station_w[SQ], uu, vv, ww, uu2, vv2, uuvv, uv_len); if (TIME_SMEARING) OSKAR_BASELINE_DELTAS(REAL, station_x[SP], station_x[SQ], station_y[SP], station_y[SQ], du, dv, dw); } __syncthreads(); // Apply the baseline length filter. if (uv_len < uv_min_lambda || uv_len > uv_max_lambda) return; // Get pointers to source vectors for both stations. const REAL8* const restrict station_p = &jones[num_sources * SP]; const REAL8* const restrict station_q = &jones[num_sources * SQ]; // Each thread loops over a subset of the sources. OSKAR_CLEAR_COMPLEX_MATRIX(REAL, sum) for (int i = threadIdx.x; i < num_sources; i += blockDim.x) { REAL smearing; if (GAUSSIAN) { const REAL t = source_a[i] * uu2 + source_b[i] * uuvv + source_c[i] * vv2; smearing = exp((REAL) -t); } else { smearing = (REAL) 1; } if (BANDWIDTH_SMEARING || TIME_SMEARING) { const REAL l = source_l[i]; const REAL m = source_m[i]; const REAL n = source_n[i] - (REAL) 1; if (BANDWIDTH_SMEARING) { const REAL t = uu * l + vv * m + ww * n; smearing *= oskar_sinc<REAL>(t); } if (TIME_SMEARING) { const REAL t = du * l + dv * m + dw * n; smearing *= oskar_sinc<REAL>(t); } } // Construct source brightness matrix. OSKAR_CONSTRUCT_B(REAL, m2, source_I[i], source_Q[i], source_U[i], source_V[i]) // Multiply first Jones matrix with source brightness matrix. OSKAR_LOAD_MATRIX(m1, station_p[i]) OSKAR_MUL_COMPLEX_MATRIX_HERMITIAN_IN_PLACE(REAL2, m1, m2) // Multiply result with second (Hermitian transposed) Jones matrix. OSKAR_LOAD_MATRIX(m2, station_q[i]) OSKAR_MUL_COMPLEX_MATRIX_CONJUGATE_TRANSPOSE_IN_PLACE(REAL2, m1, m2) // Multiply result by smearing term and accumulate. OSKAR_MUL_ADD_COMPLEX_MATRIX_SCALAR(sum, m1, smearing) } // Store partial sum for the thread in shared memory. smem[threadIdx.x] = sum; __syncthreads(); // Accumulate contents of shared memory. if (threadIdx.x == 0) { // Sum over all sources for this baseline. for (int i = 1; i < blockDim.x; ++i) OSKAR_ADD_COMPLEX_MATRIX_IN_PLACE(sum, smem[i]); // Add result of this thread block to the baseline visibility. int i = oskar_evaluate_baseline_index_inline(num_stations, SP, SQ); OSKAR_ADD_COMPLEX_MATRIX_IN_PLACE(vis[i], sum); } } #define OKN_NSOURCES 32 #define OKN_BPK 4 /* baselines per kernel */ #define WARP 32 template < // Compile-time parameters. bool BANDWIDTH_SMEARING, bool TIME_SMEARING, bool GAUSSIAN, typename REAL, typename REAL2, typename REAL8 > __global__ void oskar_xcorr_NON_SM_cudak( const int num_sources, const int num_stations, const REAL8* const restrict jones, const REAL* const restrict source_I, const REAL* const restrict source_Q, const REAL* const restrict source_U, const REAL* const restrict source_V, const REAL* const restrict source_l, const REAL* const restrict source_m, const REAL* const restrict source_n, const REAL* const restrict source_a, const REAL* const restrict source_b, const REAL* const restrict source_c, const REAL* const restrict station_u, const REAL* const restrict station_v, const REAL* const restrict station_w, const REAL* const restrict station_x, const REAL* const restrict station_y, const REAL uv_min_lambda, const REAL uv_max_lambda, const REAL inv_wavelength, const REAL frac_bandwidth, const REAL time_int_sec, const REAL gha0_rad, const REAL dec0_rad, REAL8* restrict vis) { __shared__ REAL uv_len[OKN_BPK], uu[OKN_BPK], vv[OKN_BPK], ww[OKN_BPK]; __shared__ REAL uu2[OKN_BPK], vv2[OKN_BPK], uuvv[OKN_BPK]; __shared__ REAL du[OKN_BPK], dv[OKN_BPK], dw[OKN_BPK]; __shared__ const REAL8 *station_q[OKN_BPK]; REAL8 m1, m2, sum; const int w = (threadIdx.x >> 5); // Warp ID. const int i = (threadIdx.x & 31); // ID within warp (local ID). // Return immediately if in the wrong half of the visibility matrix. if (OKN_BPK * SQ >= SP) return; // Get baseline values per warp. if (i == 0) { const int i_sq = OKN_BPK * SQ + w; // Set pointer to source vector for station q to safe position // so non-existence SQ >= SP does not cause problems. station_q[w] = &jones[0]; if (i_sq < num_stations) { OSKAR_BASELINE_TERMS(REAL, station_u[SP], station_u[i_sq], station_v[SP], station_v[i_sq], station_w[SP], station_w[i_sq], uu[w], vv[w], ww[w], uu2[w], vv2[w], uuvv[w], uv_len[w]); if (TIME_SMEARING) OSKAR_BASELINE_DELTAS(REAL, station_x[SP], station_x[i_sq], station_y[SP], station_y[i_sq], du[w], dv[w], dw[w]); // Get valid pointer to source vector for station q. station_q[w] = &jones[num_sources * i_sq]; } } __syncthreads(); // Get pointer to source vector for station p. const REAL8* const restrict station_p = &jones[num_sources * SP]; // Each thread from given warp loops over a subset of the sources, // and each warp works with a different station q. OSKAR_CLEAR_COMPLEX_MATRIX(REAL, sum) int itemp = (num_sources >> 5) * WARP; for (int outer = i; outer < itemp; outer += WARP) { REAL smearing; if (GAUSSIAN) { const REAL t = source_a[outer] * uu2[w] + source_b[outer] * uuvv[w] + source_c[outer] * vv2[w]; smearing = exp((REAL) -t); } else { smearing = (REAL) 1; } if (BANDWIDTH_SMEARING || TIME_SMEARING) { const REAL l = source_l[outer]; const REAL m = source_m[outer]; const REAL n = source_n[outer] - (REAL) 1; if (BANDWIDTH_SMEARING) { const REAL t = uu[w] * l + vv[w] * m + ww[w] * n; smearing *= oskar_sinc<REAL>(t); } if (TIME_SMEARING) { const REAL t = du[w] * l + dv[w] * m + dw[w] * n; smearing *= oskar_sinc<REAL>(t); } } // Construct source brightness matrix. OSKAR_CONSTRUCT_B(REAL, m2, source_I[outer], source_Q[outer], source_U[outer], source_V[outer]) // Multiply first Jones matrix with source brightness matrix. m1 = station_p[outer]; OSKAR_MUL_COMPLEX_MATRIX_HERMITIAN_IN_PLACE(REAL2, m1, m2) // Multiply result with second (Hermitian transposed) Jones matrix. OSKAR_LOAD_MATRIX(m2, (station_q[w])[outer]) OSKAR_MUL_COMPLEX_MATRIX_CONJUGATE_TRANSPOSE_IN_PLACE(REAL2, m1, m2) // Multiply result by smearing term and accumulate. OSKAR_MUL_ADD_COMPLEX_MATRIX_SCALAR(sum, m1, smearing) __syncthreads(); } if ((num_sources & 31) > 0) { int outer = (num_sources >> 5) * WARP + i; if (outer < num_sources) { REAL smearing; if (GAUSSIAN) { const REAL t = source_a[outer] * uu2[w] + source_b[outer] * uuvv[w] + source_c[outer] * vv2[w]; smearing = exp((REAL) -t); } else { smearing = (REAL) 1; } if (BANDWIDTH_SMEARING || TIME_SMEARING) { const REAL l = source_l[outer]; const REAL m = source_m[outer]; const REAL n = source_n[outer] - (REAL) 1; if (BANDWIDTH_SMEARING) { const REAL t = uu[w] * l + vv[w] * m + ww[w] * n; smearing *= oskar_sinc<REAL>(t); } if (TIME_SMEARING) { const REAL t = du[w] * l + dv[w] * m + dw[w] * n; smearing *= oskar_sinc<REAL>(t); } } // Construct source brightness matrix. OSKAR_CONSTRUCT_B(REAL, m2, source_I[outer], source_Q[outer], source_U[outer], source_V[outer]) // Multiply first Jones matrix with source brightness matrix. m1 = station_p[outer]; OSKAR_MUL_COMPLEX_MATRIX_HERMITIAN_IN_PLACE(REAL2, m1, m2) // Multiply result with second (Hermitian transposed) Jones matrix. OSKAR_LOAD_MATRIX(m2, (station_q[w])[outer]) OSKAR_MUL_COMPLEX_MATRIX_CONJUGATE_TRANSPOSE_IN_PLACE(REAL2, m1, m2) // Multiply result by smearing term and accumulate. OSKAR_MUL_ADD_COMPLEX_MATRIX_SCALAR(sum, m1, smearing) } } // Reduce matrices within warp. OSKAR_WARP_REDUCE(sum.a.x); OSKAR_WARP_REDUCE(sum.a.y); OSKAR_WARP_REDUCE(sum.b.x); OSKAR_WARP_REDUCE(sum.b.y); OSKAR_WARP_REDUCE(sum.c.x); OSKAR_WARP_REDUCE(sum.c.y); OSKAR_WARP_REDUCE(sum.d.x); OSKAR_WARP_REDUCE(sum.d.y); // Add result of this warp to the baseline visibility. if (i == 0 && (OKN_BPK * SQ + w) < SP) { if (uv_len[w] < uv_min_lambda || uv_len[w] > uv_max_lambda) return; const int j = oskar_evaluate_baseline_index_inline(num_stations, SP, OKN_BPK * SQ + w); OSKAR_ADD_COMPLEX_MATRIX_IN_PLACE(vis[j], sum); } } template < // Compile-time parameters. bool BANDWIDTH_SMEARING, bool TIME_SMEARING, bool GAUSSIAN, typename REAL, typename REAL2, typename REAL8 > __global__ void oskar_xcorr_SM_cudak( const int num_sources, const int num_stations, const REAL8* const restrict jones, const REAL* const restrict source_I, const REAL* const restrict source_Q, const REAL* const restrict source_U, const REAL* const restrict source_V, const REAL* const restrict source_l, const REAL* const restrict source_m, const REAL* const restrict source_n, const REAL* const restrict source_a, const REAL* const restrict source_b, const REAL* const restrict source_c, const REAL* const restrict station_u, const REAL* const restrict station_v, const REAL* const restrict station_w, const REAL* const restrict station_x, const REAL* const restrict station_y, const REAL uv_min_lambda, const REAL uv_max_lambda, const REAL inv_wavelength, const REAL frac_bandwidth, const REAL time_int_sec, const REAL gha0_rad, const REAL dec0_rad, REAL8* restrict vis) { __shared__ REAL uv_len[OKN_BPK], uu[OKN_BPK], vv[OKN_BPK], ww[OKN_BPK]; __shared__ REAL uu2[OKN_BPK], vv2[OKN_BPK], uuvv[OKN_BPK]; __shared__ REAL du[OKN_BPK], dv[OKN_BPK], dw[OKN_BPK]; __shared__ const REAL8 *station_q[OKN_BPK]; __shared__ REAL s_I[OKN_NSOURCES]; __shared__ REAL s_Q[OKN_NSOURCES]; __shared__ REAL s_U[OKN_NSOURCES]; __shared__ REAL s_V[OKN_NSOURCES]; __shared__ REAL s_l[OKN_NSOURCES]; __shared__ REAL s_m[OKN_NSOURCES]; __shared__ REAL s_n[OKN_NSOURCES]; __shared__ REAL s_a[OKN_NSOURCES]; __shared__ REAL s_b[OKN_NSOURCES]; __shared__ REAL s_c[OKN_NSOURCES]; __shared__ REAL8 s_sp[OKN_NSOURCES]; REAL8 m1, m2, sum; const int w = (threadIdx.x >> 5); // Warp ID. const int i = (threadIdx.x & 31); // ID within warp (local ID). // Return immediately if in the wrong half of the visibility matrix. if (OKN_BPK * SQ >= SP) return; // Get baseline values per warp. if (i == 0) { const int i_sq = OKN_BPK * SQ + w; // Set pointer to source vector for station q to safe position // so non-existence SQ >= SP does not cause problems. station_q[w] = &jones[0]; if (i_sq < num_stations) { OSKAR_BASELINE_TERMS(REAL, station_u[SP], station_u[i_sq], station_v[SP], station_v[i_sq], station_w[SP], station_w[i_sq], uu[w], vv[w], ww[w], uu2[w], vv2[w], uuvv[w], uv_len[w]); if (TIME_SMEARING) OSKAR_BASELINE_DELTAS(REAL, station_x[SP], station_x[i_sq], station_y[SP], station_y[i_sq], du[w], dv[w], dw[w]); // Get valid pointer to source vector for station q. station_q[w] = &jones[num_sources * i_sq]; } } __syncthreads(); // Get pointer to source vector for station p. const REAL8* const restrict station_p = &jones[num_sources * SP]; // Each thread from given warp loops over a subset of the sources, // and each warp works with a different station q. OSKAR_CLEAR_COMPLEX_MATRIX(REAL, sum) int itemp = (num_sources >> 5) * WARP; for (int outer = i; outer < itemp; outer += WARP) { if (w == 0) { s_I[i] = source_I[outer]; if (BANDWIDTH_SMEARING || TIME_SMEARING) s_l[i] = source_l[outer]; if (GAUSSIAN) { s_a[i] = source_a[outer]; s_b[i] = source_b[outer]; } } if (w == 1) { s_Q[i] = source_Q[outer]; if (BANDWIDTH_SMEARING || TIME_SMEARING) s_m[i] = source_m[outer]; if (GAUSSIAN) s_c[i] = source_c[outer]; } if (w == 2) { s_U[i] = source_U[outer]; s_V[i] = source_V[outer]; if (BANDWIDTH_SMEARING || TIME_SMEARING) s_n[i] = source_n[outer]; } if (w == 3) { s_sp[i] = station_p[outer]; } __syncthreads(); REAL smearing; if (GAUSSIAN) { const REAL t = s_a[i] * uu2[w] + s_b[i] * uuvv[w] + s_c[i] * vv2[w]; smearing = exp((REAL) -t); } else { smearing = (REAL) 1; } if (BANDWIDTH_SMEARING || TIME_SMEARING) { const REAL l = s_l[i]; const REAL m = s_m[i]; const REAL n = s_n[i] - (REAL) 1; if (BANDWIDTH_SMEARING) { const REAL t = uu[w] * l + vv[w] * m + ww[w] * n; smearing *= oskar_sinc<REAL>(t); } if (TIME_SMEARING) { const REAL t = du[w] * l + dv[w] * m + dw[w] * n; smearing *= oskar_sinc<REAL>(t); } } // Construct source brightness matrix. OSKAR_CONSTRUCT_B(REAL, m2, s_I[i], s_Q[i], s_U[i], s_V[i]) // Multiply first Jones matrix with source brightness matrix. m1 = s_sp[i]; OSKAR_MUL_COMPLEX_MATRIX_HERMITIAN_IN_PLACE(REAL2, m1, m2) // Multiply result with second (Hermitian transposed) Jones matrix. OSKAR_LOAD_MATRIX(m2, (station_q[w])[outer]) OSKAR_MUL_COMPLEX_MATRIX_CONJUGATE_TRANSPOSE_IN_PLACE(REAL2, m1, m2) // Multiply result by smearing term and accumulate. OSKAR_MUL_ADD_COMPLEX_MATRIX_SCALAR(sum, m1, smearing) __syncthreads(); } if ((num_sources & 31) > 0) { int outer = (num_sources >> 5) * WARP + i; if (outer < num_sources) { if (w == 0) { s_I[i] = source_I[outer]; if (BANDWIDTH_SMEARING || TIME_SMEARING) s_l[i] = source_l[outer]; if (GAUSSIAN) { s_a[i] = source_a[outer]; s_b[i] = source_b[outer]; } } if (w == 1) { s_Q[i] = source_Q[outer]; if (BANDWIDTH_SMEARING || TIME_SMEARING) s_m[i] = source_m[outer]; if (GAUSSIAN) s_c[i] = source_c[outer]; } if (w == 2) { s_U[i] = source_U[outer]; s_V[i] = source_V[outer]; if (BANDWIDTH_SMEARING || TIME_SMEARING) s_n[i] = source_n[outer]; } if (w == 3) { s_sp[i] = station_p[outer]; } } __syncthreads(); if (outer < num_sources) { REAL smearing; if (GAUSSIAN) { const REAL t = s_a[i] * uu2[w] + s_b[i] * uuvv[w] + s_c[i] * vv2[w]; smearing = exp((REAL) -t); } else { smearing = (REAL) 1; } if (BANDWIDTH_SMEARING || TIME_SMEARING) { const REAL l = s_l[i]; const REAL m = s_m[i]; const REAL n = s_n[i] - (REAL) 1; if (BANDWIDTH_SMEARING) { const REAL t = uu[w] * l + vv[w] * m + ww[w] * n; smearing *= oskar_sinc<REAL>(t); } if (TIME_SMEARING) { const REAL t = du[w] * l + dv[w] * m + dw[w] * n; smearing *= oskar_sinc<REAL>(t); } } // Construct source brightness matrix. OSKAR_CONSTRUCT_B(REAL, m2, s_I[i], s_Q[i], s_U[i], s_V[i]) // Multiply first Jones matrix with source brightness matrix. m1 = s_sp[i]; OSKAR_MUL_COMPLEX_MATRIX_HERMITIAN_IN_PLACE(REAL2, m1, m2) // Multiply result with second (Hermitian transposed) Jones matrix. OSKAR_LOAD_MATRIX(m2, (station_q[w])[outer]) OSKAR_MUL_COMPLEX_MATRIX_CONJUGATE_TRANSPOSE_IN_PLACE(REAL2, m1, m2) // Multiply result by smearing term and accumulate. OSKAR_MUL_ADD_COMPLEX_MATRIX_SCALAR(sum, m1, smearing) } } // Reduce matrices within warp. OSKAR_WARP_REDUCE(sum.a.x); OSKAR_WARP_REDUCE(sum.a.y); OSKAR_WARP_REDUCE(sum.b.x); OSKAR_WARP_REDUCE(sum.b.y); OSKAR_WARP_REDUCE(sum.c.x); OSKAR_WARP_REDUCE(sum.c.y); OSKAR_WARP_REDUCE(sum.d.x); OSKAR_WARP_REDUCE(sum.d.y); // Add result of this warp to the baseline visibility. if (i == 0 && (OKN_BPK * SQ + w) < SP) { if (uv_len[w] < uv_min_lambda || uv_len[w] > uv_max_lambda) return; const int j = oskar_evaluate_baseline_index_inline(num_stations, SP, OKN_BPK * SQ + w); OSKAR_ADD_COMPLEX_MATRIX_IN_PLACE(vis[j], sum); } } #define XCORR_KERNEL(NAME, BS, TS, GAUSSIAN, REAL, REAL2, REAL8) \ NAME<BS, TS, GAUSSIAN, REAL, REAL2, REAL8> \ OSKAR_CUDAK_CONF(num_blocks, num_threads, shared_mem) \ (num_sources, num_stations, d_jones, d_I, d_Q, d_U, d_V, \ d_l, d_m, d_n, d_a, d_b, d_c, \ d_station_u, d_station_v, d_station_w, \ d_station_x, d_station_y, uv_min_lambda, uv_max_lambda, \ inv_wavelength, frac_bandwidth, time_int_sec, \ gha0_rad, dec0_rad, d_vis); #define XCORR_SELECT(NAME, GAUSSIAN, REAL, REAL2, REAL8) \ if (frac_bandwidth == (REAL)0 && time_int_sec == (REAL)0) \ XCORR_KERNEL(NAME, false, false, GAUSSIAN, REAL, REAL2, REAL8) \ else if (frac_bandwidth != (REAL)0 && time_int_sec == (REAL)0) \ XCORR_KERNEL(NAME, true, false, GAUSSIAN, REAL, REAL2, REAL8) \ else if (frac_bandwidth == (REAL)0 && time_int_sec != (REAL)0) \ XCORR_KERNEL(NAME, false, true, GAUSSIAN, REAL, REAL2, REAL8) \ else if (frac_bandwidth != (REAL)0 && time_int_sec != (REAL)0) \ XCORR_KERNEL(NAME, true, true, GAUSSIAN, REAL, REAL2, REAL8) void oskar_cross_correlate_point_cuda_f( int num_sources, int num_stations, const float4c* d_jones, const float* d_I, const float* d_Q, const float* d_U, const float* d_V, const float* d_l, const float* d_m, const float* d_n, const float* d_station_u, const float* d_station_v, const float* d_station_w, const float* d_station_x, const float* d_station_y, float uv_min_lambda, float uv_max_lambda, float inv_wavelength, float frac_bandwidth, float time_int_sec, float gha0_rad, float dec0_rad, float4c* d_vis) { const dim3 num_threads(128, 1); const float *d_a = 0, *d_b = 0, *d_c = 0; if (correlate_version() == VER_NON_SM) { dim3 num_blocks(num_stations, (num_stations + OKN_BPK - 1) / OKN_BPK); const size_t shared_mem = 0; XCORR_SELECT(oskar_xcorr_NON_SM_cudak, false, float, float2, float4c) } else if (correlate_version() == VER_SM) { dim3 num_blocks(num_stations, (num_stations + OKN_BPK - 1) / OKN_BPK); const size_t shared_mem = 0; XCORR_SELECT(oskar_xcorr_SM_cudak, false, float, float2, float4c) } else { dim3 num_blocks(num_stations, num_stations); const size_t shared_mem = num_threads.x * sizeof(float4c); XCORR_SELECT(oskar_xcorr_cudak, false, float, float2, float4c) } } void oskar_cross_correlate_point_cuda_d( int num_sources, int num_stations, const double4c* d_jones, const double* d_I, const double* d_Q, const double* d_U, const double* d_V, const double* d_l, const double* d_m, const double* d_n, const double* d_station_u, const double* d_station_v, const double* d_station_w, const double* d_station_x, const double* d_station_y, double uv_min_lambda, double uv_max_lambda, double inv_wavelength, double frac_bandwidth, double time_int_sec, double gha0_rad, double dec0_rad, double4c* d_vis) { const double *d_a = 0, *d_b = 0, *d_c = 0; if (correlate_version() == VER_NON_SM) { dim3 num_threads(128, 1); dim3 num_blocks(num_stations, (num_stations + OKN_BPK - 1) / OKN_BPK); const size_t shared_mem = 0; XCORR_SELECT(oskar_xcorr_NON_SM_cudak, false, double, double2, double4c) } else if (correlate_version() == VER_SM) { dim3 num_threads(128, 1); dim3 num_blocks(num_stations, (num_stations + OKN_BPK - 1) / OKN_BPK); const size_t shared_mem = 0; XCORR_SELECT(oskar_xcorr_SM_cudak, false, double, double2, double4c) } else { dim3 num_threads(128, 1); dim3 num_blocks(num_stations, num_stations); if (time_int_sec != 0.0) num_threads.x = 64; const size_t shared_mem = num_threads.x * sizeof(double4c); XCORR_SELECT(oskar_xcorr_cudak, false, double, double2, double4c) } } void oskar_cross_correlate_gaussian_cuda_f( int num_sources, int num_stations, const float4c* d_jones, const float* d_I, const float* d_Q, const float* d_U, const float* d_V, const float* d_l, const float* d_m, const float* d_n, const float* d_a, const float* d_b, const float* d_c, const float* d_station_u, const float* d_station_v, const float* d_station_w, const float* d_station_x, const float* d_station_y, float uv_min_lambda, float uv_max_lambda, float inv_wavelength, float frac_bandwidth, float time_int_sec, float gha0_rad, float dec0_rad, float4c* d_vis) { const dim3 num_threads(128, 1); if (correlate_version() == VER_NON_SM) { dim3 num_blocks(num_stations, (num_stations + OKN_BPK - 1) / OKN_BPK); const size_t shared_mem = 0; XCORR_SELECT(oskar_xcorr_NON_SM_cudak, true, float, float2, float4c) } else if (correlate_version() == VER_SM) { dim3 num_blocks(num_stations, (num_stations + OKN_BPK - 1) / OKN_BPK); const size_t shared_mem = 0; XCORR_SELECT(oskar_xcorr_SM_cudak, true, float, float2, float4c) } else { dim3 num_blocks(num_stations, num_stations); const size_t shared_mem = num_threads.x * sizeof(float4c); XCORR_SELECT(oskar_xcorr_cudak, true, float, float2, float4c) } } void oskar_cross_correlate_gaussian_cuda_d( int num_sources, int num_stations, const double4c* d_jones, const double* d_I, const double* d_Q, const double* d_U, const double* d_V, const double* d_l, const double* d_m, const double* d_n, const double* d_a, const double* d_b, const double* d_c, const double* d_station_u, const double* d_station_v, const double* d_station_w, const double* d_station_x, const double* d_station_y, double uv_min_lambda, double uv_max_lambda, double inv_wavelength, double frac_bandwidth, double time_int_sec, double gha0_rad, double dec0_rad, double4c* d_vis) { if (correlate_version() == VER_NON_SM) { dim3 num_threads(128, 1); dim3 num_blocks(num_stations, (num_stations + OKN_BPK - 1) / OKN_BPK); const size_t shared_mem = 0; XCORR_SELECT(oskar_xcorr_NON_SM_cudak, true, double, double2, double4c) } else if (correlate_version() == VER_SM) { dim3 num_threads(128, 1); dim3 num_blocks(num_stations, (num_stations + OKN_BPK - 1) / OKN_BPK); const size_t shared_mem = 0; XCORR_SELECT(oskar_xcorr_SM_cudak, true, double, double2, double4c) } else { dim3 num_threads(128, 1); dim3 num_blocks(num_stations, num_stations); if (time_int_sec != 0.0) num_threads.x = 64; const size_t shared_mem = num_threads.x * sizeof(double4c); XCORR_SELECT(oskar_xcorr_cudak, true, double, double2, double4c) } } int correlate_version() { if (ver_ == 0) { const char* v = getenv("OSKAR_CORRELATE"); if (v) { if (!strcmp(v, "OLD") || !strcmp(v, "old")) ver_ = VER_OLD; else if (!strcmp(v, "SM") || !strcmp(v, "sm")) ver_ = VER_SM; else if (strstr(v, "NO") || strstr(v, "no")) ver_ = VER_NON_SM; } if (ver_ == 0) { const int compute = oskar_device_compute_capability(); if (compute >= 70) ver_ = VER_NON_SM; else if (compute >= 30) ver_ = VER_SM; else ver_ = VER_OLD; } } return ver_; }
122b7f0820c9f174468768f9b3e834f92f44dad9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel_utils.cuh" #include <ATen/ATen.h> #ifdef VERSION_LE_04 #include "ATen/hip/AccumulateType.cuh" #else #include "ATen/AccumulateType.h" #endif #include "ATen/hip/HIPTensorMethods.cuh" // #include "ATen/hip/HIPTypeConversion.cuh" // #include <THH/THHTensorMathReduce.cuh> template <typename scalar_t, typename accscalar_t> __global__ void weight_norm_bwd_first_dim_kernel (scalar_t* __restrict__ pLpv, scalar_t* __restrict__ pLpg, const scalar_t* __restrict__ pLpw, const scalar_t* __restrict__ savedv, const scalar_t* __restrict__ savedg, const accscalar_t* __restrict__ savedNorms, const int rowSize) { // For now, assign one block to each row. const int tid = threadIdx.x; const int row = blockIdx.x; const int stride = blockDim.x; // Logical index offset for this flattened row const int rowStart = row*rowSize; // Hack to get around nvcc complaining when an smem array is declared with the same name // but different types in different kernels (in this case different instantiations) // extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s" extern __shared__ char buf[]; accscalar_t* s = (accscalar_t*)buf; accscalar_t thread_sum = 0.f; for(int i = tid; i < rowSize; i += stride ) { accscalar_t pLpwi = scalar_cast<accscalar_t>(pLpw[i+rowStart]); accscalar_t savedvi = scalar_cast<accscalar_t>(savedv[i+rowStart]); thread_sum += pLpwi*savedvi; // AccumOp, could do Kahan here } reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>()); accscalar_t result = s[0]; // Could choose to save reciprocal of norm instead I suppose, but norms is probably // more handy to keep around. // Broadcast load; could use shared memory instead. accscalar_t rnorm = 1.f/savedNorms[row]; accscalar_t rnorm3 = rnorm*rnorm*rnorm; // Write g gradients. if(tid == 0) pLpg[row] = scalar_cast<scalar_t>(result*rnorm); // Broadcast load, could use shared memory instead. accscalar_t g_this_row = scalar_cast<accscalar_t>(savedg[row]); // Write v gradients. We are reusing values that were loaded earlier, so there // is an optimization opportunity here (store values persistently). for(int j = tid; j < rowSize; j += stride ) { accscalar_t pLpwj = scalar_cast<accscalar_t>(pLpw[j+rowStart]); accscalar_t savedvj = scalar_cast<accscalar_t>(savedv[j+rowStart]); accscalar_t pLpvj = g_this_row*(rnorm*pLpwj - rnorm3*savedvj*result); pLpv[j+rowStart] = scalar_cast<scalar_t>(pLpvj); } } template <typename scalar_t, typename accscalar_t> __global__ void weight_norm_bwd_last_dim_kernel (scalar_t* __restrict__ pLpv, scalar_t* __restrict__ pLpg, const scalar_t* __restrict__ pLpw, const scalar_t* __restrict__ savedv, const scalar_t* __restrict__ savedg, const accscalar_t* __restrict__ savedNorms, const int fast_dim_size, const int slower_dims_size) { const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x; extern __shared__ char buf[]; accscalar_t* s = (accscalar_t*)buf; accscalar_t thread_sum = 0.f; int slower_dims_location = threadIdx.y; int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location; if(fast_dim_location < fast_dim_size) while(slower_dims_location < slower_dims_size) { accscalar_t pLpwi = scalar_cast<accscalar_t>(pLpw[currentIdx]); accscalar_t savedvi = scalar_cast<accscalar_t>(savedv[currentIdx]); thread_sum += pLpwi*savedvi; // AccumOp, could do Kahan here currentIdx += blockDim.y*fast_dim_size; slower_dims_location += blockDim.y; } reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>()); accscalar_t result = s[threadIdx.x]; // Broadcast load; could use shared memory instead. accscalar_t rnorm = 1.f/savedNorms[fast_dim_location]; accscalar_t rnorm3 = rnorm*rnorm*rnorm; // Write g gradients. if(threadIdx.y == 0) pLpg[fast_dim_location] = scalar_cast<scalar_t>(result*rnorm); // Entire block pulls these values, could use shared memory instead. accscalar_t g_this_col = scalar_cast<accscalar_t>(savedg[fast_dim_location]); // Write v gradients. slower_dims_location = threadIdx.y; currentIdx = fast_dim_location + fast_dim_size*slower_dims_location; if(fast_dim_location < fast_dim_size) while(slower_dims_location < slower_dims_size) { accscalar_t pLpwj = scalar_cast<accscalar_t>(pLpw[currentIdx]); accscalar_t savedvj = scalar_cast<accscalar_t>(savedv[currentIdx]); accscalar_t pLpvj = g_this_col*(rnorm*pLpwj - rnorm3*savedvj*result); pLpv[currentIdx] = scalar_cast<scalar_t>(pLpvj); currentIdx += blockDim.y*fast_dim_size; slower_dims_location += blockDim.y; } } void weight_norm_bwd_cuda (const at::Tensor& pLpv, const at::Tensor& pLpg, const at::Tensor& pLpw, const at::Tensor& savedv, const at::Tensor& savedg, const at::Tensor& savedNorms, int dim) { #ifdef DEBUG_ANY using namespace std; cout << "Hello from send_to_bwd with pLpw.type() = " << pLpw.type() << endl; #endif const int ndims = savedv.ndimension(); if(dim == 0) { // Find logical size of each flattened slowest-dim row int rowSize = 1; for(int i = ndims - 1; i > 0; i--) rowSize *= savedv.size(i); using namespace at; hipStream_t stream = globalContext().getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF (savedv.type(), "weight_norm_bwd_first_dim_kernel", [&] { using cuda_scalar_t = apex::cuda::type<scalar_t>; USING_ACCSCALAR_T hipLaunchKernelGGL(( weight_norm_bwd_first_dim_kernel) , dim3(pLpw.size(0)), dim3( BLOCK), BLOCK*sizeof(accscalar_t), stream, pLpv.data<cuda_scalar_t>(), pLpg.data<cuda_scalar_t>(), pLpw.data<cuda_scalar_t>(), savedv.data<cuda_scalar_t>(), savedg.data<cuda_scalar_t>(), savedNorms.data<accscalar_t>(), rowSize); }); } else if(dim == ndims - 1) { // Precompute slower_dims_size and fast_dim_size because they involve dynamically indexing an array. int slower_dims_size = 1; for(int i = 0; i < ndims - 1; i++) slower_dims_size *= savedv.size(i); int fast_dim_size = savedv.size(ndims-1); using namespace at; hipStream_t stream = globalContext().getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF (savedv.type(), "weight_norm_bwd_last_dim_kernel", [&] { using cuda_scalar_t = apex::cuda::type<scalar_t>; USING_ACCSCALAR_T hipLaunchKernelGGL(( weight_norm_bwd_last_dim_kernel) , dim3((fast_dim_size+TILE_W-1)/TILE_W), dim3(dim3(TILE_W,TILE_H)), (TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t), stream, pLpv.data<cuda_scalar_t>(), pLpg.data<cuda_scalar_t>(), pLpw.data<cuda_scalar_t>(), savedv.data<cuda_scalar_t>(), savedg.data<cuda_scalar_t>(), savedNorms.data<accscalar_t>(), fast_dim_size, slower_dims_size); }); } // else // { // intermediate dim kernel. Error checking on the dim was already done in // Module.cpp:weight_norm_bwd. Could put that logic here instead, if we include // <python.h> in both files. // } // The kernel execution is asynchronous, so this will only catch errors on the kernel launch, // not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught // until a later error check on a synchronizing CUDA call. Unfortunately, without manually // synchronizing here, this is the best we can do. THCudaCheck(hipGetLastError()); #ifdef DEBUG_PROFILE THCudaCheck(hipDeviceSynchronize()); #endif }
122b7f0820c9f174468768f9b3e834f92f44dad9.cu
#include "kernel_utils.cuh" #include <ATen/ATen.h> #ifdef VERSION_LE_04 #include "ATen/cuda/AccumulateType.cuh" #else #include "ATen/AccumulateType.h" #endif #include "ATen/cuda/CUDATensorMethods.cuh" // #include "ATen/cuda/CUDATypeConversion.cuh" // #include <THC/THCTensorMathReduce.cuh> template <typename scalar_t, typename accscalar_t> __global__ void weight_norm_bwd_first_dim_kernel (scalar_t* __restrict__ pLpv, scalar_t* __restrict__ pLpg, const scalar_t* __restrict__ pLpw, const scalar_t* __restrict__ savedv, const scalar_t* __restrict__ savedg, const accscalar_t* __restrict__ savedNorms, const int rowSize) { // For now, assign one block to each row. const int tid = threadIdx.x; const int row = blockIdx.x; const int stride = blockDim.x; // Logical index offset for this flattened row const int rowStart = row*rowSize; // Hack to get around nvcc complaining when an smem array is declared with the same name // but different types in different kernels (in this case different instantiations) // extern __shared__ accscalar_t s[]; // error: declaration is incompatible with previous "s" extern __shared__ char buf[]; accscalar_t* s = (accscalar_t*)buf; accscalar_t thread_sum = 0.f; for(int i = tid; i < rowSize; i += stride ) { accscalar_t pLpwi = scalar_cast<accscalar_t>(pLpw[i+rowStart]); accscalar_t savedvi = scalar_cast<accscalar_t>(savedv[i+rowStart]); thread_sum += pLpwi*savedvi; // AccumOp, could do Kahan here } reduce_block_into_lanes(s, thread_sum, 1, ReduceAdd<accscalar_t>()); accscalar_t result = s[0]; // Could choose to save reciprocal of norm instead I suppose, but norms is probably // more handy to keep around. // Broadcast load; could use shared memory instead. accscalar_t rnorm = 1.f/savedNorms[row]; accscalar_t rnorm3 = rnorm*rnorm*rnorm; // Write g gradients. if(tid == 0) pLpg[row] = scalar_cast<scalar_t>(result*rnorm); // Broadcast load, could use shared memory instead. accscalar_t g_this_row = scalar_cast<accscalar_t>(savedg[row]); // Write v gradients. We are reusing values that were loaded earlier, so there // is an optimization opportunity here (store values persistently). for(int j = tid; j < rowSize; j += stride ) { accscalar_t pLpwj = scalar_cast<accscalar_t>(pLpw[j+rowStart]); accscalar_t savedvj = scalar_cast<accscalar_t>(savedv[j+rowStart]); accscalar_t pLpvj = g_this_row*(rnorm*pLpwj - rnorm3*savedvj*result); pLpv[j+rowStart] = scalar_cast<scalar_t>(pLpvj); } } template <typename scalar_t, typename accscalar_t> __global__ void weight_norm_bwd_last_dim_kernel (scalar_t* __restrict__ pLpv, scalar_t* __restrict__ pLpg, const scalar_t* __restrict__ pLpw, const scalar_t* __restrict__ savedv, const scalar_t* __restrict__ savedg, const accscalar_t* __restrict__ savedNorms, const int fast_dim_size, const int slower_dims_size) { const int fast_dim_location = threadIdx.x + blockIdx.x*blockDim.x; extern __shared__ char buf[]; accscalar_t* s = (accscalar_t*)buf; accscalar_t thread_sum = 0.f; int slower_dims_location = threadIdx.y; int currentIdx = fast_dim_location + fast_dim_size*slower_dims_location; if(fast_dim_location < fast_dim_size) while(slower_dims_location < slower_dims_size) { accscalar_t pLpwi = scalar_cast<accscalar_t>(pLpw[currentIdx]); accscalar_t savedvi = scalar_cast<accscalar_t>(savedv[currentIdx]); thread_sum += pLpwi*savedvi; // AccumOp, could do Kahan here currentIdx += blockDim.y*fast_dim_size; slower_dims_location += blockDim.y; } reduce_block_into_lanes(s, thread_sum, blockDim.x, ReduceAdd<accscalar_t>()); accscalar_t result = s[threadIdx.x]; // Broadcast load; could use shared memory instead. accscalar_t rnorm = 1.f/savedNorms[fast_dim_location]; accscalar_t rnorm3 = rnorm*rnorm*rnorm; // Write g gradients. if(threadIdx.y == 0) pLpg[fast_dim_location] = scalar_cast<scalar_t>(result*rnorm); // Entire block pulls these values, could use shared memory instead. accscalar_t g_this_col = scalar_cast<accscalar_t>(savedg[fast_dim_location]); // Write v gradients. slower_dims_location = threadIdx.y; currentIdx = fast_dim_location + fast_dim_size*slower_dims_location; if(fast_dim_location < fast_dim_size) while(slower_dims_location < slower_dims_size) { accscalar_t pLpwj = scalar_cast<accscalar_t>(pLpw[currentIdx]); accscalar_t savedvj = scalar_cast<accscalar_t>(savedv[currentIdx]); accscalar_t pLpvj = g_this_col*(rnorm*pLpwj - rnorm3*savedvj*result); pLpv[currentIdx] = scalar_cast<scalar_t>(pLpvj); currentIdx += blockDim.y*fast_dim_size; slower_dims_location += blockDim.y; } } void weight_norm_bwd_cuda (const at::Tensor& pLpv, const at::Tensor& pLpg, const at::Tensor& pLpw, const at::Tensor& savedv, const at::Tensor& savedg, const at::Tensor& savedNorms, int dim) { #ifdef DEBUG_ANY using namespace std; cout << "Hello from send_to_bwd with pLpw.type() = " << pLpw.type() << endl; #endif const int ndims = savedv.ndimension(); if(dim == 0) { // Find logical size of each flattened slowest-dim row int rowSize = 1; for(int i = ndims - 1; i > 0; i--) rowSize *= savedv.size(i); using namespace at; cudaStream_t stream = globalContext().getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF (savedv.type(), "weight_norm_bwd_first_dim_kernel", [&] { using cuda_scalar_t = apex::cuda::type<scalar_t>; USING_ACCSCALAR_T weight_norm_bwd_first_dim_kernel <<<pLpw.size(0), BLOCK, BLOCK*sizeof(accscalar_t), stream>>> (pLpv.data<cuda_scalar_t>(), pLpg.data<cuda_scalar_t>(), pLpw.data<cuda_scalar_t>(), savedv.data<cuda_scalar_t>(), savedg.data<cuda_scalar_t>(), savedNorms.data<accscalar_t>(), rowSize); }); } else if(dim == ndims - 1) { // Precompute slower_dims_size and fast_dim_size because they involve dynamically indexing an array. int slower_dims_size = 1; for(int i = 0; i < ndims - 1; i++) slower_dims_size *= savedv.size(i); int fast_dim_size = savedv.size(ndims-1); using namespace at; cudaStream_t stream = globalContext().getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF (savedv.type(), "weight_norm_bwd_last_dim_kernel", [&] { using cuda_scalar_t = apex::cuda::type<scalar_t>; USING_ACCSCALAR_T weight_norm_bwd_last_dim_kernel <<<(fast_dim_size+TILE_W-1)/TILE_W, dim3(TILE_W,TILE_H), (TILE_W*TILE_H + TILE_W)*sizeof(accscalar_t), stream>>> (pLpv.data<cuda_scalar_t>(), pLpg.data<cuda_scalar_t>(), pLpw.data<cuda_scalar_t>(), savedv.data<cuda_scalar_t>(), savedg.data<cuda_scalar_t>(), savedNorms.data<accscalar_t>(), fast_dim_size, slower_dims_size); }); } // else // { // intermediate dim kernel. Error checking on the dim was already done in // Module.cpp:weight_norm_bwd. Could put that logic here instead, if we include // <python.h> in both files. // } // The kernel execution is asynchronous, so this will only catch errors on the kernel launch, // not the kernel's execution. Errors in kernel execution aren't guaranteed to be caught // until a later error check on a synchronizing CUDA call. Unfortunately, without manually // synchronizing here, this is the best we can do. THCudaCheck(cudaGetLastError()); #ifdef DEBUG_PROFILE THCudaCheck(cudaDeviceSynchronize()); #endif }
8875eb1e69850939ae29c150d467e2e9cb67dffc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/native/layer_norm.h> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/native/hip/block_reduce.cuh> #include <THH/THHDeviceUtils.cuh> #include <c10/hip/HIPMathCompat.h> namespace at { namespace native { namespace { constexpr int kCUDANumThreads = 256; constexpr int kColwiseReduceTileSize = 32; template <typename T> __global__ void RowwiseMomentsCUDAKernel( int64_t N, T eps, const T* X, T* mean, T* rstd) { using T_ACC = acc_type<T, true>; __shared__ T_ACC m_shared[C10_WARP_SIZE]; __shared__ T_ACC v_shared[C10_WARP_SIZE]; const int64_t i = blockIdx.x; T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; sum1 += static_cast<T_ACC>(X[index]); sum2 += static_cast<T_ACC>(X[index]) * static_cast<T_ACC>(X[index]); } sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, m_shared); sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, v_shared); if (threadIdx.x == 0) { const T_ACC scale = T_ACC(1) / static_cast<T_ACC>(N); sum1 *= scale; sum2 = c10::hip::compat::max(sum2 * scale - sum1 * sum1, T_ACC(0)); mean[i] = sum1; rstd[i] = c10::hip::compat::rsqrt(sum2 + static_cast<T_ACC>(eps)); } } template <typename T> __global__ void LayerNormForwardCUDAKernel( int64_t N, const T* X, const T* mean, const T* rstd, const T* gamma, const T* beta, T* Y) { using T_ACC = acc_type<T, true>; const int64_t i = blockIdx.x; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]); const T_ACC beta_v = beta == nullptr ? T_ACC(0) : static_cast<T_ACC>(beta[j]); Y[index] = (static_cast<T_ACC>(X[index]) - static_cast<T_ACC>(mean[i])) * static_cast<T_ACC>(rstd[i]) * gamma_v + beta_v; } } template <typename T> __global__ void ComputeInternalGradientsCUDAKernel( int64_t N, const T* dY, const T* X, const T* gamma, acc_type<T, true>* ds, acc_type<T, true>* db) { using T_ACC = acc_type<T, true>; __shared__ T_ACC ds_shared[C10_WARP_SIZE]; __shared__ T_ACC db_shared[C10_WARP_SIZE]; const int64_t i = blockIdx.x; T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]); sum1 += static_cast<T_ACC>(dY[index]) * static_cast<T_ACC>(X[index]) * gamma_v; sum2 += static_cast<T_ACC>(dY[index]) * gamma_v; } sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared); sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared); if (threadIdx.x == 0) { ds[i] = sum1; db[i] = sum2; } } template <typename T> __global__ void ComputeGradientFusedParamsCUDAKernel( int64_t M, int64_t N, const T* mean, const T* rstd, const acc_type<T, true>* ds, const acc_type<T, true>* db, acc_type<T, true>* c1, acc_type<T, true>* c2) { using T_ACC = acc_type<T, true>; const int64_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index < M) { const T_ACC s = T_ACC(1) / static_cast<T_ACC>(N); const T_ACC a = (db[index] * static_cast<T_ACC>(mean[index]) - ds[index]) * static_cast<T_ACC>(rstd[index]) * static_cast<T_ACC>(rstd[index]) * static_cast<T_ACC>(rstd[index]) * s; c1[index] = a; c2[index] = -(a * static_cast<T_ACC>(mean[index]) + db[index] * static_cast<T_ACC>(rstd[index]) * s); } } template <typename T> __global__ void LayerNormBackwardCUDAKenrel( int64_t N, const T* dY, const T* X, const T* gamma, const T* a, const acc_type<T, true>* b, const acc_type<T, true>* c, T* dX) { using T_ACC = acc_type<T, true>; const int64_t i = blockIdx.x; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]); dX[index] = static_cast<T_ACC>(a[i]) * static_cast<T_ACC>(dY[index]) * gamma_v + b[i] * static_cast<T_ACC>(X[index]) + c[i]; } } template <typename T> __global__ void GammaBetaBackwardSimpleCUDAKernel( int64_t M, int64_t N, const T* dY, const T* X, const T* mean, const T* rstd, T* dg, T* db) { using T_ACC = acc_type<T, true>; const int64_t j = blockIdx.x * blockDim.x + threadIdx.x; if (j < N) { T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t i = 0; i < M; ++i) { const int64_t index = i * N + j; sum1 += dg == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index]) * (static_cast<T_ACC>(X[index]) - static_cast<T_ACC>(mean[i])) * static_cast<T_ACC>(rstd[i]); sum2 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index]); } if (dg != nullptr) { dg[j] = sum1; } if (db != nullptr) { db[j] = sum2; } } } template <typename T> __global__ void GammaBetaBackwardCUDAKernel( int64_t M, int64_t N, const T* dY, const T* X, const T* mean, const T* rstd, T* dg, T* db) { using T_ACC = acc_type<T, true>; __shared__ T_ACC g_shared[kColwiseReduceTileSize][kColwiseReduceTileSize + 1]; __shared__ T_ACC b_shared[kColwiseReduceTileSize][kColwiseReduceTileSize + 1]; const int64_t j = blockIdx.x * blockDim.x + threadIdx.x; T_ACC dg_sum1 = 0; T_ACC dg_sum2 = 0; T_ACC db_sum1 = 0; T_ACC db_sum2 = 0; if (j < N) { for (int64_t i = threadIdx.y; i < M; i += blockDim.y * 2) { const int64_t i1 = i; const int64_t i2 = i + blockDim.y; const int64_t index1 = i1 * N + j; const int64_t index2 = i2 * N + j; dg_sum1 += dg == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index1]) * (static_cast<T_ACC>(X[index1]) - static_cast<T_ACC>(mean[i1])) * static_cast<T_ACC>(rstd[i1]); db_sum1 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index1]); if (i2 < M) { dg_sum2 += dg == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index2]) * (static_cast<T_ACC>(X[index2]) - static_cast<T_ACC>(mean[i2])) * static_cast<T_ACC>(rstd[i2]); db_sum2 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index2]); } } } g_shared[threadIdx.y][threadIdx.x] = dg_sum1; g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2; b_shared[threadIdx.y][threadIdx.x] = db_sum1; b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2; __syncthreads(); T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y]; T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y]; sum1 = cuda_utils::WarpReduceSum(sum1); sum2 = cuda_utils::WarpReduceSum(sum2); if (threadIdx.x == 0) { const int64_t j = blockIdx.x * blockDim.x + threadIdx.y; if (j < N) { if (dg != nullptr) { dg[j] = sum1; } if (db != nullptr) { db[j] = sum2; } } } sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y]; sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y]; sum1 = cuda_utils::WarpReduceSum(sum1); sum2 = cuda_utils::WarpReduceSum(sum2); if (threadIdx.x == 0) { const int64_t j = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y; if (j < N) { if (dg != nullptr) { dg[j] = sum1; } if (db != nullptr) { db[j] = sum2; } } } } template <typename T> void LayerNormKernelImplInternal( const Tensor& X, const Tensor& gamma, const Tensor& beta, int64_t M, int64_t N, T eps, Tensor* Y, Tensor* mean, Tensor* rstd) { DCHECK_EQ(X.numel(), M * N); DCHECK(!gamma.defined() || gamma.numel() == N); DCHECK(!beta.defined() || beta.numel() == N); const T* X_data = X.data_ptr<T>(); const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr; const T* beta_data = beta.defined() ? beta.data_ptr<T>() : nullptr; T* Y_data = Y->data_ptr<T>(); T* mean_data = mean->data_ptr<T>(); T* rstd_data = rstd->data_ptr<T>(); hipStream_t cuda_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( RowwiseMomentsCUDAKernel<T>) , dim3(M), dim3(cuda_utils::kCUDABlockReduceNumThreads), 0, cuda_stream, N, eps, X_data, mean_data, rstd_data); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); hipLaunchKernelGGL(( LayerNormForwardCUDAKernel<T>), dim3(M), dim3(kCUDANumThreads), 0, cuda_stream, N, X_data, mean_data, rstd_data, gamma_data, beta_data, Y_data); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); } void LayerNormKernelImpl( const Tensor& X, const Tensor& gamma, const Tensor& beta, int64_t M, int64_t N, double eps, Tensor* Y, Tensor* mean, Tensor* rstd) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, X.scalar_type(), "LayerNormKernelImpl", [&]() { LayerNormKernelImplInternal<scalar_t>( X, gamma, beta, M, N, static_cast<scalar_t>(eps), Y, mean, rstd); }); } template <typename T> void LayerNormBackwardKernelImplInternal( const Tensor& dY, const Tensor& X, const Tensor& mean, const Tensor& rstd, const Tensor& gamma, int64_t M, int64_t N, Tensor* dX, Tensor* dgamma, Tensor* dbeta) { using T_ACC = acc_type<T, true>; DCHECK_EQ(dY.numel(), M * N); DCHECK_EQ(X.numel(), M * N); DCHECK_EQ(mean.numel(), M); DCHECK_EQ(rstd.numel(), M); DCHECK(!gamma.defined() || gamma.numel() == N); const T* dY_data = dY.template data_ptr<T>(); const T* X_data = X.template data_ptr<T>(); const T* mean_data = mean.template data_ptr<T>(); const T* rstd_data = rstd.template data_ptr<T>(); const T* gamma_data = gamma.defined() ? gamma.template data_ptr<T>() : nullptr; T* dX_data = dX->defined() ? dX->template data_ptr<T>() : nullptr; hipStream_t cuda_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (dX_data != nullptr) { const auto kAccType = (X.scalar_type() == kHalf || X.scalar_type() == kBFloat16) ? kFloat : X.scalar_type(); Tensor ds = at::empty({M}, X.options().dtype(kAccType)); Tensor db = at::empty({M}, X.options().dtype(kAccType)); Tensor scale = at::empty({M}, X.options().dtype(kAccType)); Tensor bias = at::empty({M}, X.options().dtype(kAccType)); T_ACC* ds_data = ds.template data_ptr<T_ACC>(); T_ACC* db_data = db.template data_ptr<T_ACC>(); T_ACC* scale_data = scale.template data_ptr<T_ACC>(); T_ACC* bias_data = bias.template data_ptr<T_ACC>(); hipLaunchKernelGGL(( ComputeInternalGradientsCUDAKernel<T>) , dim3(M), dim3(cuda_utils::kCUDABlockReduceNumThreads), 0, cuda_stream, N, dY_data, X_data, gamma_data, ds_data, db_data); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); const int64_t B = (M + kCUDANumThreads - 1) / kCUDANumThreads; hipLaunchKernelGGL(( ComputeGradientFusedParamsCUDAKernel<T>) , dim3(B), dim3(kCUDANumThreads), 0, cuda_stream, M, N, mean_data, rstd_data, ds_data, db_data, scale_data, bias_data); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); hipLaunchKernelGGL(( LayerNormBackwardCUDAKenrel<T>), dim3(M), dim3(kCUDANumThreads), 0, cuda_stream, N, dY_data, X_data, gamma_data, rstd_data, scale_data, bias_data, dX_data); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); } if (dgamma->defined() || dbeta->defined()) { T* dgamma_data = dgamma->defined() ? dgamma->template data_ptr<T>() : nullptr; T* dbeta_data = dbeta->defined() ? dbeta->template data_ptr<T>() : nullptr; if (M < 512) { // For small batch size, do colwise reduce directly. const int64_t B = (N + kCUDANumThreads - 1) / kCUDANumThreads; hipLaunchKernelGGL(( GammaBetaBackwardSimpleCUDAKernel<T>) , dim3(B), dim3(kCUDANumThreads), 0, cuda_stream, M, N, dY_data, X_data, mean_data, rstd_data, dgamma_data, dbeta_data); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); } else { const int64_t B = (N + kColwiseReduceTileSize - 1) / kColwiseReduceTileSize; constexpr int kThreadX = kColwiseReduceTileSize; constexpr int kThreadY = kColwiseReduceTileSize / 2; hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel<T>) , dim3(B), dim3(dim3(kThreadX, kThreadY)), 0, cuda_stream, M, N, dY_data, X_data, mean_data, rstd_data, dgamma_data, dbeta_data); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); } } } void LayerNormBackwardKernelImpl( const Tensor& dY, const Tensor& X, const Tensor& mean, const Tensor& rstd, const Tensor& gamma, int64_t M, int64_t N, Tensor* dX, Tensor* dgamma, Tensor* dbeta) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, X.scalar_type(), "LayerNormBackwardKernelImpl", [&]() { LayerNormBackwardKernelImplInternal<scalar_t>( dY, X, mean, rstd, gamma, M, N, dX, dgamma, dbeta); }); } } // namespace std::tuple<Tensor, Tensor, Tensor> layer_norm_cuda( const Tensor& X, const Tensor& gamma /* optional */, const Tensor& beta /* optional */, int64_t M, int64_t N, double eps) { Tensor Y = at::native::empty_like(X, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor mean = at::empty({M}, X.options()); Tensor rstd = at::empty({M}, X.options()); if (M > 0) { LayerNormKernelImpl(X, gamma, beta, M, N, eps, &Y, &mean, &rstd); } return std::make_tuple(std::move(Y), std::move(mean), std::move(rstd)); } std::tuple<Tensor, Tensor, Tensor> layer_norm_backward_cuda( const Tensor& dY, const Tensor& X, const Tensor& mean, const Tensor& rstd, const Tensor& gamma, int64_t M, int64_t N, std::array<bool, 3> grad_input_mask) { Tensor dX; Tensor dgamma; Tensor dbeta; if (grad_input_mask[0]) { dX = at::native::empty_like(X, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } if (grad_input_mask[1]) { dgamma = M > 0 ? at::native::empty_like(gamma, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::native::zeros_like(gamma, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } if (grad_input_mask[2]) { dbeta = M > 0 ? at::native::empty_like(gamma, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::native::zeros_like(gamma, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } if (M > 0) { LayerNormBackwardKernelImpl( dY, X, mean, rstd, gamma, M, N, &dX, &dgamma, &dbeta); } return std::make_tuple(std::move(dX), std::move(dgamma), std::move(dbeta)); } REGISTER_DISPATCH(LayerNormKernel, &LayerNormKernelImpl); REGISTER_DISPATCH(LayerNormBackwardKernel, &LayerNormBackwardKernelImpl); } // namespace native } // namespace at
8875eb1e69850939ae29c150d467e2e9cb67dffc.cu
#include <ATen/native/layer_norm.h> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/native/cuda/block_reduce.cuh> #include <THC/THCDeviceUtils.cuh> #include <c10/cuda/CUDAMathCompat.h> namespace at { namespace native { namespace { constexpr int kCUDANumThreads = 256; constexpr int kColwiseReduceTileSize = 32; template <typename T> __global__ void RowwiseMomentsCUDAKernel( int64_t N, T eps, const T* X, T* mean, T* rstd) { using T_ACC = acc_type<T, true>; __shared__ T_ACC m_shared[C10_WARP_SIZE]; __shared__ T_ACC v_shared[C10_WARP_SIZE]; const int64_t i = blockIdx.x; T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; sum1 += static_cast<T_ACC>(X[index]); sum2 += static_cast<T_ACC>(X[index]) * static_cast<T_ACC>(X[index]); } sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, m_shared); sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, v_shared); if (threadIdx.x == 0) { const T_ACC scale = T_ACC(1) / static_cast<T_ACC>(N); sum1 *= scale; sum2 = c10::cuda::compat::max(sum2 * scale - sum1 * sum1, T_ACC(0)); mean[i] = sum1; rstd[i] = c10::cuda::compat::rsqrt(sum2 + static_cast<T_ACC>(eps)); } } template <typename T> __global__ void LayerNormForwardCUDAKernel( int64_t N, const T* X, const T* mean, const T* rstd, const T* gamma, const T* beta, T* Y) { using T_ACC = acc_type<T, true>; const int64_t i = blockIdx.x; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]); const T_ACC beta_v = beta == nullptr ? T_ACC(0) : static_cast<T_ACC>(beta[j]); Y[index] = (static_cast<T_ACC>(X[index]) - static_cast<T_ACC>(mean[i])) * static_cast<T_ACC>(rstd[i]) * gamma_v + beta_v; } } template <typename T> __global__ void ComputeInternalGradientsCUDAKernel( int64_t N, const T* dY, const T* X, const T* gamma, acc_type<T, true>* ds, acc_type<T, true>* db) { using T_ACC = acc_type<T, true>; __shared__ T_ACC ds_shared[C10_WARP_SIZE]; __shared__ T_ACC db_shared[C10_WARP_SIZE]; const int64_t i = blockIdx.x; T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]); sum1 += static_cast<T_ACC>(dY[index]) * static_cast<T_ACC>(X[index]) * gamma_v; sum2 += static_cast<T_ACC>(dY[index]) * gamma_v; } sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared); sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared); if (threadIdx.x == 0) { ds[i] = sum1; db[i] = sum2; } } template <typename T> __global__ void ComputeGradientFusedParamsCUDAKernel( int64_t M, int64_t N, const T* mean, const T* rstd, const acc_type<T, true>* ds, const acc_type<T, true>* db, acc_type<T, true>* c1, acc_type<T, true>* c2) { using T_ACC = acc_type<T, true>; const int64_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index < M) { const T_ACC s = T_ACC(1) / static_cast<T_ACC>(N); const T_ACC a = (db[index] * static_cast<T_ACC>(mean[index]) - ds[index]) * static_cast<T_ACC>(rstd[index]) * static_cast<T_ACC>(rstd[index]) * static_cast<T_ACC>(rstd[index]) * s; c1[index] = a; c2[index] = -(a * static_cast<T_ACC>(mean[index]) + db[index] * static_cast<T_ACC>(rstd[index]) * s); } } template <typename T> __global__ void LayerNormBackwardCUDAKenrel( int64_t N, const T* dY, const T* X, const T* gamma, const T* a, const acc_type<T, true>* b, const acc_type<T, true>* c, T* dX) { using T_ACC = acc_type<T, true>; const int64_t i = blockIdx.x; for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; const T_ACC gamma_v = gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[j]); dX[index] = static_cast<T_ACC>(a[i]) * static_cast<T_ACC>(dY[index]) * gamma_v + b[i] * static_cast<T_ACC>(X[index]) + c[i]; } } template <typename T> __global__ void GammaBetaBackwardSimpleCUDAKernel( int64_t M, int64_t N, const T* dY, const T* X, const T* mean, const T* rstd, T* dg, T* db) { using T_ACC = acc_type<T, true>; const int64_t j = blockIdx.x * blockDim.x + threadIdx.x; if (j < N) { T_ACC sum1 = 0; T_ACC sum2 = 0; for (int64_t i = 0; i < M; ++i) { const int64_t index = i * N + j; sum1 += dg == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index]) * (static_cast<T_ACC>(X[index]) - static_cast<T_ACC>(mean[i])) * static_cast<T_ACC>(rstd[i]); sum2 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index]); } if (dg != nullptr) { dg[j] = sum1; } if (db != nullptr) { db[j] = sum2; } } } template <typename T> __global__ void GammaBetaBackwardCUDAKernel( int64_t M, int64_t N, const T* dY, const T* X, const T* mean, const T* rstd, T* dg, T* db) { using T_ACC = acc_type<T, true>; __shared__ T_ACC g_shared[kColwiseReduceTileSize][kColwiseReduceTileSize + 1]; __shared__ T_ACC b_shared[kColwiseReduceTileSize][kColwiseReduceTileSize + 1]; const int64_t j = blockIdx.x * blockDim.x + threadIdx.x; T_ACC dg_sum1 = 0; T_ACC dg_sum2 = 0; T_ACC db_sum1 = 0; T_ACC db_sum2 = 0; if (j < N) { for (int64_t i = threadIdx.y; i < M; i += blockDim.y * 2) { const int64_t i1 = i; const int64_t i2 = i + blockDim.y; const int64_t index1 = i1 * N + j; const int64_t index2 = i2 * N + j; dg_sum1 += dg == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index1]) * (static_cast<T_ACC>(X[index1]) - static_cast<T_ACC>(mean[i1])) * static_cast<T_ACC>(rstd[i1]); db_sum1 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index1]); if (i2 < M) { dg_sum2 += dg == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index2]) * (static_cast<T_ACC>(X[index2]) - static_cast<T_ACC>(mean[i2])) * static_cast<T_ACC>(rstd[i2]); db_sum2 += db == nullptr ? T_ACC(0) : static_cast<T_ACC>(dY[index2]); } } } g_shared[threadIdx.y][threadIdx.x] = dg_sum1; g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2; b_shared[threadIdx.y][threadIdx.x] = db_sum1; b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2; __syncthreads(); T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y]; T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y]; sum1 = cuda_utils::WarpReduceSum(sum1); sum2 = cuda_utils::WarpReduceSum(sum2); if (threadIdx.x == 0) { const int64_t j = blockIdx.x * blockDim.x + threadIdx.y; if (j < N) { if (dg != nullptr) { dg[j] = sum1; } if (db != nullptr) { db[j] = sum2; } } } sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y]; sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y]; sum1 = cuda_utils::WarpReduceSum(sum1); sum2 = cuda_utils::WarpReduceSum(sum2); if (threadIdx.x == 0) { const int64_t j = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y; if (j < N) { if (dg != nullptr) { dg[j] = sum1; } if (db != nullptr) { db[j] = sum2; } } } } template <typename T> void LayerNormKernelImplInternal( const Tensor& X, const Tensor& gamma, const Tensor& beta, int64_t M, int64_t N, T eps, Tensor* Y, Tensor* mean, Tensor* rstd) { DCHECK_EQ(X.numel(), M * N); DCHECK(!gamma.defined() || gamma.numel() == N); DCHECK(!beta.defined() || beta.numel() == N); const T* X_data = X.data_ptr<T>(); const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr; const T* beta_data = beta.defined() ? beta.data_ptr<T>() : nullptr; T* Y_data = Y->data_ptr<T>(); T* mean_data = mean->data_ptr<T>(); T* rstd_data = rstd->data_ptr<T>(); cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream(); RowwiseMomentsCUDAKernel<T> <<<M, cuda_utils::kCUDABlockReduceNumThreads, 0, cuda_stream>>>( N, eps, X_data, mean_data, rstd_data); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); LayerNormForwardCUDAKernel<T><<<M, kCUDANumThreads, 0, cuda_stream>>>( N, X_data, mean_data, rstd_data, gamma_data, beta_data, Y_data); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); } void LayerNormKernelImpl( const Tensor& X, const Tensor& gamma, const Tensor& beta, int64_t M, int64_t N, double eps, Tensor* Y, Tensor* mean, Tensor* rstd) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, X.scalar_type(), "LayerNormKernelImpl", [&]() { LayerNormKernelImplInternal<scalar_t>( X, gamma, beta, M, N, static_cast<scalar_t>(eps), Y, mean, rstd); }); } template <typename T> void LayerNormBackwardKernelImplInternal( const Tensor& dY, const Tensor& X, const Tensor& mean, const Tensor& rstd, const Tensor& gamma, int64_t M, int64_t N, Tensor* dX, Tensor* dgamma, Tensor* dbeta) { using T_ACC = acc_type<T, true>; DCHECK_EQ(dY.numel(), M * N); DCHECK_EQ(X.numel(), M * N); DCHECK_EQ(mean.numel(), M); DCHECK_EQ(rstd.numel(), M); DCHECK(!gamma.defined() || gamma.numel() == N); const T* dY_data = dY.template data_ptr<T>(); const T* X_data = X.template data_ptr<T>(); const T* mean_data = mean.template data_ptr<T>(); const T* rstd_data = rstd.template data_ptr<T>(); const T* gamma_data = gamma.defined() ? gamma.template data_ptr<T>() : nullptr; T* dX_data = dX->defined() ? dX->template data_ptr<T>() : nullptr; cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream(); if (dX_data != nullptr) { const auto kAccType = (X.scalar_type() == kHalf || X.scalar_type() == kBFloat16) ? kFloat : X.scalar_type(); Tensor ds = at::empty({M}, X.options().dtype(kAccType)); Tensor db = at::empty({M}, X.options().dtype(kAccType)); Tensor scale = at::empty({M}, X.options().dtype(kAccType)); Tensor bias = at::empty({M}, X.options().dtype(kAccType)); T_ACC* ds_data = ds.template data_ptr<T_ACC>(); T_ACC* db_data = db.template data_ptr<T_ACC>(); T_ACC* scale_data = scale.template data_ptr<T_ACC>(); T_ACC* bias_data = bias.template data_ptr<T_ACC>(); ComputeInternalGradientsCUDAKernel<T> <<<M, cuda_utils::kCUDABlockReduceNumThreads, 0, cuda_stream>>>( N, dY_data, X_data, gamma_data, ds_data, db_data); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); const int64_t B = (M + kCUDANumThreads - 1) / kCUDANumThreads; ComputeGradientFusedParamsCUDAKernel<T> <<<B, kCUDANumThreads, 0, cuda_stream>>>( M, N, mean_data, rstd_data, ds_data, db_data, scale_data, bias_data); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); LayerNormBackwardCUDAKenrel<T><<<M, kCUDANumThreads, 0, cuda_stream>>>( N, dY_data, X_data, gamma_data, rstd_data, scale_data, bias_data, dX_data); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); } if (dgamma->defined() || dbeta->defined()) { T* dgamma_data = dgamma->defined() ? dgamma->template data_ptr<T>() : nullptr; T* dbeta_data = dbeta->defined() ? dbeta->template data_ptr<T>() : nullptr; if (M < 512) { // For small batch size, do colwise reduce directly. const int64_t B = (N + kCUDANumThreads - 1) / kCUDANumThreads; GammaBetaBackwardSimpleCUDAKernel<T> <<<B, kCUDANumThreads, 0, cuda_stream>>>( M, N, dY_data, X_data, mean_data, rstd_data, dgamma_data, dbeta_data); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); } else { const int64_t B = (N + kColwiseReduceTileSize - 1) / kColwiseReduceTileSize; constexpr int kThreadX = kColwiseReduceTileSize; constexpr int kThreadY = kColwiseReduceTileSize / 2; GammaBetaBackwardCUDAKernel<T> <<<B, dim3(kThreadX, kThreadY), 0, cuda_stream>>>( M, N, dY_data, X_data, mean_data, rstd_data, dgamma_data, dbeta_data); TORCH_CUDA_KERNEL_LAUNCH_CHECK(); } } } void LayerNormBackwardKernelImpl( const Tensor& dY, const Tensor& X, const Tensor& mean, const Tensor& rstd, const Tensor& gamma, int64_t M, int64_t N, Tensor* dX, Tensor* dgamma, Tensor* dbeta) { AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, X.scalar_type(), "LayerNormBackwardKernelImpl", [&]() { LayerNormBackwardKernelImplInternal<scalar_t>( dY, X, mean, rstd, gamma, M, N, dX, dgamma, dbeta); }); } } // namespace std::tuple<Tensor, Tensor, Tensor> layer_norm_cuda( const Tensor& X, const Tensor& gamma /* optional */, const Tensor& beta /* optional */, int64_t M, int64_t N, double eps) { Tensor Y = at::native::empty_like(X, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor mean = at::empty({M}, X.options()); Tensor rstd = at::empty({M}, X.options()); if (M > 0) { LayerNormKernelImpl(X, gamma, beta, M, N, eps, &Y, &mean, &rstd); } return std::make_tuple(std::move(Y), std::move(mean), std::move(rstd)); } std::tuple<Tensor, Tensor, Tensor> layer_norm_backward_cuda( const Tensor& dY, const Tensor& X, const Tensor& mean, const Tensor& rstd, const Tensor& gamma, int64_t M, int64_t N, std::array<bool, 3> grad_input_mask) { Tensor dX; Tensor dgamma; Tensor dbeta; if (grad_input_mask[0]) { dX = at::native::empty_like(X, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } if (grad_input_mask[1]) { dgamma = M > 0 ? at::native::empty_like(gamma, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::native::zeros_like(gamma, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } if (grad_input_mask[2]) { dbeta = M > 0 ? at::native::empty_like(gamma, LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::native::zeros_like(gamma, LEGACY_CONTIGUOUS_MEMORY_FORMAT); } if (M > 0) { LayerNormBackwardKernelImpl( dY, X, mean, rstd, gamma, M, N, &dX, &dgamma, &dbeta); } return std::make_tuple(std::move(dX), std::move(dgamma), std::move(dbeta)); } REGISTER_DISPATCH(LayerNormKernel, &LayerNormKernelImpl); REGISTER_DISPATCH(LayerNormBackwardKernel, &LayerNormBackwardKernelImpl); } // namespace native } // namespace at
335e901f6ad62836c1a2923e595ba8d9b9cded3a.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cudnn.h> #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } #define checkCUDA(expression) \ { \ hipError_t status = (expression); \ if (status != hipSuccess) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << hipGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } void print_array(float *array, int size, const char *name) { std::cout << name; for (int i = 0; i < size; i++) { std::cout << array[i] << " "; } std::cout << std::endl; } int main(int argc, char const *argv[]) { cudnnHandle_t cudnn; checkCUDNN(cudnnCreate(&cudnn)); auto mode = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; const cudnnBatchNormOps_t bn_ops = CUDNN_BATCHNORM_OPS_BN; float one = 1.0; float zero = 0.0; int N = 2, C = 3, H = 1, W = 2; int x_size = N * C * H * W; int x_size_bytes = x_size * sizeof(float); int mean_size = C; int mean_size_bytes = mean_size * sizeof(float); cudnnTensorDescriptor_t x_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&x_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(x_descriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/N, /*channels=*/C, /*image_height=*/H, /*image_width=*/W)); float *x, *y, *dy, *dx; checkCUDA(hipMallocManaged(&x, x_size_bytes)); checkCUDA(hipMallocManaged(&y, x_size_bytes)); checkCUDA(hipMallocManaged(&dy, x_size_bytes)); checkCUDA(hipMallocManaged(&dx, x_size_bytes)); x[0] = 0.16513085; x[2] = 0.9014813; x[4] = 0.6309742; x[1] = 0.4345461; x[3] = 0.29193902; x[5] = 0.64250207; x[6] = 0.9757855; x[8] = 0.43509948; x[10] = 0.6601019; x[7] = 0.60489583; x[9] = 0.6366315; x[11] = 0.6144488; dy[0] = 1.0; dy[2] = 1.0; dy[4] = 1.0; dy[1] = 1.0; dy[3] = 1.0; dy[5] = 1.0; dy[6] = 1.0; dy[8] = 1.0; dy[10] = 1.0; dy[7] = 1.0; dy[9] = 1.0; dy[11] = 1.0; cudnnTensorDescriptor_t mean_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&mean_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(mean_descriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/1, /*image_width=*/1)); float *scale, *offset, *dscale, *doffset; float *running_mean, *running_var; float *saved_mean, *saved_inv_var; checkCUDA(hipMallocManaged(&scale, mean_size_bytes)); checkCUDA(hipMallocManaged(&offset, mean_size_bytes)); checkCUDA(hipMallocManaged(&dscale, mean_size_bytes)); checkCUDA(hipMallocManaged(&doffset, mean_size_bytes)); checkCUDA(hipMallocManaged(&running_mean, mean_size_bytes)); checkCUDA(hipMallocManaged(&running_var, mean_size_bytes)); checkCUDA(hipMallocManaged(&saved_mean, mean_size_bytes)); checkCUDA(hipMallocManaged(&saved_inv_var, mean_size_bytes)); // saved_mean and saved_inv_var can be nullptr. // saved_mean = nullptr; saved_inv_var = nullptr; scale[0] = 1.0; scale[1] = 1.0; scale[2] = 1.0; offset[0] = 0.0; offset[1] = 0.0; offset[2] = 0.0; running_mean[0] = 1.0; running_mean[1] = 1.0; running_mean[2] = 1.0; running_var[0] = 1.0; running_var[1] = 1.0; running_var[2] = 1.0; cudnnActivationDescriptor_t activation_desc; checkCUDNN(cudnnCreateActivationDescriptor(&activation_desc)); checkCUDNN(cudnnSetActivationDescriptor(activation_desc, CUDNN_ACTIVATION_IDENTITY, CUDNN_PROPAGATE_NAN, 0.0)); size_t workspace_size_bytes = 0; checkCUDNN(cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( /*handle=*/cudnn, /*mode=*/mode, /*bnOps=*/bn_ops, /*xDesc=*/x_descriptor, /*zDesc=*/NULL, /*yDesc=*/x_descriptor, /*bnScaleBiasMeanVarDesc=*/mean_descriptor, /*activationDesc=*/activation_desc, /*sizeInBytes=*/&workspace_size_bytes)); void *workspace = nullptr; if (workspace_size_bytes > 0) { checkCUDA(hipMalloc(&workspace, workspace_size_bytes)); } size_t reserve_space_size_bytes = 0; checkCUDNN(cudnnGetBatchNormalizationTrainingExReserveSpaceSize( /*handle=*/cudnn, /*mode=*/mode, /*bnOps=*/bn_ops, /*activationDesc=*/activation_desc, /*xDesc=*/x_descriptor, /*sizeInBytes=*/&reserve_space_size_bytes)); char *reserve_space; checkCUDA(hipMalloc(&reserve_space, reserve_space_size_bytes)); checkCUDNN(cudnnBatchNormalizationForwardTrainingEx( /*handle=*/cudnn, /*mode=*/mode, /*bnOps=*/bn_ops, /*alpha=*/&one, /*beta=*/&zero, /*xDesc=*/x_descriptor, /*xData=*/x, /*zDesc=*/NULL, /*zData=*/NULL, /*yDesc=*/x_descriptor, /*yData=*/y, /*bnScaleBiasMeanVarDesc=*/mean_descriptor, /*bnScale=*/scale, /*bnBias=*/offset, /*exponentialAverageFactor=*/0.5, /*resultRunningMean=*/running_mean, /*resultRunningVariance=*/running_var, /*epsilon=*/0.001, /*resultSaveMean=*/saved_mean, /*resultSaveInvVariance=*/saved_inv_var, /*activationDesc=*/activation_desc, /*workspace=*/workspace, /*workSpaceSizeInBytes=*/workspace_size_bytes, /*reserveSpace=*/reserve_space, /*reserveSpaceSizeInBytes=*/reserve_space_size_bytes)); checkCUDA(hipDeviceSynchronize()); print_array(y, x_size, "y NCHW format: "); checkCUDNN(cudnnBatchNormalizationBackwardEx( /*handle=*/cudnn, /*mode=*/mode, /*bnOps=*/bn_ops, /*alphaDataDiff=*/&one, /*betaDataDiff=*/&zero, /*alphaParamDiff=*/&one, /*betaParamDiff=*/&zero, /*xDesc=*/x_descriptor, /*xData=*/x, /*yDesc=*/nullptr, /*yData=*/nullptr, /*dyDesc=*/x_descriptor, /*dyData=*/dy, /*dzDesc=*/nullptr, /*dzData=*/nullptr, /*dxDesc=*/x_descriptor, /*dxData=*/dx, /*dBnScaleBiasDesc=*/mean_descriptor, /*bnScaleData=*/scale, /*bnBiasData=*/nullptr, /*dBnScaleData=*/dscale, /*dBnBiasData=*/doffset, /*epsilon=*/0.001, /*savedMean=*/saved_mean, /*savedInvVariance=*/saved_inv_var, /*activationDesc=*/activation_desc, /*workspace=*/workspace, /*workSpaceSizeInBytes=*/workspace_size_bytes, /*reserveSpace=*/reserve_space, /*reserveSpaceSizeInBytes=*/reserve_space_size_bytes)); checkCUDA(hipDeviceSynchronize()); print_array(dx, x_size, "dx NCHW format: "); print_array(dscale, mean_size, "dscale: "); print_array(doffset, mean_size, "doffset: "); checkCUDA(hipFree(x)); checkCUDA(hipFree(y)); checkCUDA(hipFree(dy)); checkCUDA(hipFree(dx)); checkCUDA(hipFree(scale)); checkCUDA(hipFree(offset)); checkCUDA(hipFree(dscale)); checkCUDA(hipFree(doffset)); checkCUDA(hipFree(running_mean)); checkCUDA(hipFree(running_var)); checkCUDA(hipFree(saved_mean)); checkCUDA(hipFree(saved_inv_var)); checkCUDA(hipFree(workspace)); checkCUDA(hipFree(reserve_space)); }
335e901f6ad62836c1a2923e595ba8d9b9cded3a.cu
#include <iostream> #include <cudnn.h> #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } #define checkCUDA(expression) \ { \ cudaError_t status = (expression); \ if (status != cudaSuccess) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudaGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } void print_array(float *array, int size, const char *name) { std::cout << name; for (int i = 0; i < size; i++) { std::cout << array[i] << " "; } std::cout << std::endl; } int main(int argc, char const *argv[]) { cudnnHandle_t cudnn; checkCUDNN(cudnnCreate(&cudnn)); auto mode = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; const cudnnBatchNormOps_t bn_ops = CUDNN_BATCHNORM_OPS_BN; float one = 1.0; float zero = 0.0; int N = 2, C = 3, H = 1, W = 2; int x_size = N * C * H * W; int x_size_bytes = x_size * sizeof(float); int mean_size = C; int mean_size_bytes = mean_size * sizeof(float); cudnnTensorDescriptor_t x_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&x_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(x_descriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/N, /*channels=*/C, /*image_height=*/H, /*image_width=*/W)); float *x, *y, *dy, *dx; checkCUDA(cudaMallocManaged(&x, x_size_bytes)); checkCUDA(cudaMallocManaged(&y, x_size_bytes)); checkCUDA(cudaMallocManaged(&dy, x_size_bytes)); checkCUDA(cudaMallocManaged(&dx, x_size_bytes)); x[0] = 0.16513085; x[2] = 0.9014813; x[4] = 0.6309742; x[1] = 0.4345461; x[3] = 0.29193902; x[5] = 0.64250207; x[6] = 0.9757855; x[8] = 0.43509948; x[10] = 0.6601019; x[7] = 0.60489583; x[9] = 0.6366315; x[11] = 0.6144488; dy[0] = 1.0; dy[2] = 1.0; dy[4] = 1.0; dy[1] = 1.0; dy[3] = 1.0; dy[5] = 1.0; dy[6] = 1.0; dy[8] = 1.0; dy[10] = 1.0; dy[7] = 1.0; dy[9] = 1.0; dy[11] = 1.0; cudnnTensorDescriptor_t mean_descriptor; checkCUDNN(cudnnCreateTensorDescriptor(&mean_descriptor)); checkCUDNN(cudnnSetTensor4dDescriptor(mean_descriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/1, /*image_width=*/1)); float *scale, *offset, *dscale, *doffset; float *running_mean, *running_var; float *saved_mean, *saved_inv_var; checkCUDA(cudaMallocManaged(&scale, mean_size_bytes)); checkCUDA(cudaMallocManaged(&offset, mean_size_bytes)); checkCUDA(cudaMallocManaged(&dscale, mean_size_bytes)); checkCUDA(cudaMallocManaged(&doffset, mean_size_bytes)); checkCUDA(cudaMallocManaged(&running_mean, mean_size_bytes)); checkCUDA(cudaMallocManaged(&running_var, mean_size_bytes)); checkCUDA(cudaMallocManaged(&saved_mean, mean_size_bytes)); checkCUDA(cudaMallocManaged(&saved_inv_var, mean_size_bytes)); // saved_mean and saved_inv_var can be nullptr. // saved_mean = nullptr; saved_inv_var = nullptr; scale[0] = 1.0; scale[1] = 1.0; scale[2] = 1.0; offset[0] = 0.0; offset[1] = 0.0; offset[2] = 0.0; running_mean[0] = 1.0; running_mean[1] = 1.0; running_mean[2] = 1.0; running_var[0] = 1.0; running_var[1] = 1.0; running_var[2] = 1.0; cudnnActivationDescriptor_t activation_desc; checkCUDNN(cudnnCreateActivationDescriptor(&activation_desc)); checkCUDNN(cudnnSetActivationDescriptor(activation_desc, CUDNN_ACTIVATION_IDENTITY, CUDNN_PROPAGATE_NAN, 0.0)); size_t workspace_size_bytes = 0; checkCUDNN(cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( /*handle=*/cudnn, /*mode=*/mode, /*bnOps=*/bn_ops, /*xDesc=*/x_descriptor, /*zDesc=*/NULL, /*yDesc=*/x_descriptor, /*bnScaleBiasMeanVarDesc=*/mean_descriptor, /*activationDesc=*/activation_desc, /*sizeInBytes=*/&workspace_size_bytes)); void *workspace = nullptr; if (workspace_size_bytes > 0) { checkCUDA(cudaMalloc(&workspace, workspace_size_bytes)); } size_t reserve_space_size_bytes = 0; checkCUDNN(cudnnGetBatchNormalizationTrainingExReserveSpaceSize( /*handle=*/cudnn, /*mode=*/mode, /*bnOps=*/bn_ops, /*activationDesc=*/activation_desc, /*xDesc=*/x_descriptor, /*sizeInBytes=*/&reserve_space_size_bytes)); char *reserve_space; checkCUDA(cudaMalloc(&reserve_space, reserve_space_size_bytes)); checkCUDNN(cudnnBatchNormalizationForwardTrainingEx( /*handle=*/cudnn, /*mode=*/mode, /*bnOps=*/bn_ops, /*alpha=*/&one, /*beta=*/&zero, /*xDesc=*/x_descriptor, /*xData=*/x, /*zDesc=*/NULL, /*zData=*/NULL, /*yDesc=*/x_descriptor, /*yData=*/y, /*bnScaleBiasMeanVarDesc=*/mean_descriptor, /*bnScale=*/scale, /*bnBias=*/offset, /*exponentialAverageFactor=*/0.5, /*resultRunningMean=*/running_mean, /*resultRunningVariance=*/running_var, /*epsilon=*/0.001, /*resultSaveMean=*/saved_mean, /*resultSaveInvVariance=*/saved_inv_var, /*activationDesc=*/activation_desc, /*workspace=*/workspace, /*workSpaceSizeInBytes=*/workspace_size_bytes, /*reserveSpace=*/reserve_space, /*reserveSpaceSizeInBytes=*/reserve_space_size_bytes)); checkCUDA(cudaDeviceSynchronize()); print_array(y, x_size, "y NCHW format: "); checkCUDNN(cudnnBatchNormalizationBackwardEx( /*handle=*/cudnn, /*mode=*/mode, /*bnOps=*/bn_ops, /*alphaDataDiff=*/&one, /*betaDataDiff=*/&zero, /*alphaParamDiff=*/&one, /*betaParamDiff=*/&zero, /*xDesc=*/x_descriptor, /*xData=*/x, /*yDesc=*/nullptr, /*yData=*/nullptr, /*dyDesc=*/x_descriptor, /*dyData=*/dy, /*dzDesc=*/nullptr, /*dzData=*/nullptr, /*dxDesc=*/x_descriptor, /*dxData=*/dx, /*dBnScaleBiasDesc=*/mean_descriptor, /*bnScaleData=*/scale, /*bnBiasData=*/nullptr, /*dBnScaleData=*/dscale, /*dBnBiasData=*/doffset, /*epsilon=*/0.001, /*savedMean=*/saved_mean, /*savedInvVariance=*/saved_inv_var, /*activationDesc=*/activation_desc, /*workspace=*/workspace, /*workSpaceSizeInBytes=*/workspace_size_bytes, /*reserveSpace=*/reserve_space, /*reserveSpaceSizeInBytes=*/reserve_space_size_bytes)); checkCUDA(cudaDeviceSynchronize()); print_array(dx, x_size, "dx NCHW format: "); print_array(dscale, mean_size, "dscale: "); print_array(doffset, mean_size, "doffset: "); checkCUDA(cudaFree(x)); checkCUDA(cudaFree(y)); checkCUDA(cudaFree(dy)); checkCUDA(cudaFree(dx)); checkCUDA(cudaFree(scale)); checkCUDA(cudaFree(offset)); checkCUDA(cudaFree(dscale)); checkCUDA(cudaFree(doffset)); checkCUDA(cudaFree(running_mean)); checkCUDA(cudaFree(running_var)); checkCUDA(cudaFree(saved_mean)); checkCUDA(cudaFree(saved_inv_var)); checkCUDA(cudaFree(workspace)); checkCUDA(cudaFree(reserve_space)); }
8284cf77448c24b9930166954e6629c9ac6da557.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "__fillToIndsLongX.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; long long A = 1; long long *B = NULL; hipMalloc(&B, XSIZE*YSIZE); long long len = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( __fillToIndsLongX), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,len); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( __fillToIndsLongX), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( __fillToIndsLongX), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8284cf77448c24b9930166954e6629c9ac6da557.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "__fillToIndsLongX.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; long long A = 1; long long *B = NULL; cudaMalloc(&B, XSIZE*YSIZE); long long len = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); __fillToIndsLongX<<<gridBlock,threadBlock>>>(A,B,len); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { __fillToIndsLongX<<<gridBlock,threadBlock>>>(A,B,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { __fillToIndsLongX<<<gridBlock,threadBlock>>>(A,B,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
33699e33bdf96dd1f30d6d62e12a48be0e98a492.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (label_value<0 || (has_ignore_label_ && label_value == ignore_label_)) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (label_value<0 || (has_ignore_label_ && label_value == ignore_label_)) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } Dtype diff_sum; Dtype prob_org_sum; static bool first_time_norm = true; if (gradient_norm_ == SoftmaxParameter_GradientNorm_EQUAL_NORM) { if (first_time_norm) { LOG(INFO) << "cross entropy loss with EQUAL_NROM"; first_time_norm = false; } caffe_gpu_asum(bottom[0]->count(), bottom_diff, &prob_org_sum); prob_org_sum = prob_org_sum / bottom[0]->num(); for (int idx = 0; idx < bottom[0]->num(); idx++) { caffe_gpu_asum(bottom[0]->channels(), bottom_diff + idx*bottom[0]->channels(), &diff_sum); Dtype scale_factor = prob_org_sum / diff_sum; caffe_gpu_scal(bottom[0]->channels(), scale_factor, bottom_diff + idx*bottom[0]->channels()); } } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
33699e33bdf96dd1f30d6d62e12a48be0e98a492.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (label_value<0 || (has_ignore_label_ && label_value == ignore_label_)) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (label_value<0 || (has_ignore_label_ && label_value == ignore_label_)) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } Dtype diff_sum; Dtype prob_org_sum; static bool first_time_norm = true; if (gradient_norm_ == SoftmaxParameter_GradientNorm_EQUAL_NORM) { if (first_time_norm) { LOG(INFO) << "cross entropy loss with EQUAL_NROM"; first_time_norm = false; } caffe_gpu_asum(bottom[0]->count(), bottom_diff, &prob_org_sum); prob_org_sum = prob_org_sum / bottom[0]->num(); for (int idx = 0; idx < bottom[0]->num(); idx++) { caffe_gpu_asum(bottom[0]->channels(), bottom_diff + idx*bottom[0]->channels(), &diff_sum); Dtype scale_factor = prob_org_sum / diff_sum; caffe_gpu_scal(bottom[0]->channels(), scale_factor, bottom_diff + idx*bottom[0]->channels()); } } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
5db4c81031b2bdb50438425513f2174eaea5e1d7.hip
// !!! This is a file automatically generated by hipify!!! /* Modified from brent.c by CM on 2004 August 13, so as to accept an absolute tolerance argument in addition to the fractional tolerance argument */ extern "C" { #include "../shape/head.h" } #define ITMAX 100 #define CGOLD 0.3819660 #define ZEPS 1.0e-10 #define SIGN(a,b) ((b) > 0.0 ? fabs(a) : -fabs(a)) #define SHFT(a,b,c,d) (a)=(b);(b)=(c);(c)=(d); __host__ double brent_abs_gpu(double ax,double bx,double cx,double (*f)(double, struct vertices_t**, unsigned char*, unsigned char*, int*, int*, int*, int, int, hipStream_t*, double**), double tol,double abstol,double *xmin, struct vertices_t **verts, unsigned char *htype, unsigned char *dtype, int *nframes, int *nviews, int *lc_n, int nsets, int nf, hipStream_t *bf_stream, double **fit_overflow) { int iter; double a, b, d, etemp, fu, fv, fw, fx, p, q, r, tol1, tol2, u, v, w, x, xm; double e = 0.0; double abstol_use = ((abstol > ZEPS) ? abstol : ZEPS); a = ((ax < cx) ? ax : cx); b = ((ax > cx) ? ax : cx); x = w = v = bx; fw = fv = fx = (*f)(x, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow); for (iter=1; iter<=ITMAX; iter++) { xm = 0.5 * (a + b); tol2 = 2.0 * (tol1 = tol * fabs(x) + abstol_use); if (fabs(x - xm) <= (tol2 - 0.5 * (b - a))) { *xmin = x; return fx; } if (fabs(e) > tol1) { r = (x-w) * (fx-fv); q = (x-v) * (fx-fw); p = (x-v) *q - (x-w) *r; q = 2.0 * (q-r); if (q > 0.0) p = -p; q = fabs(q); etemp = e; e = d; if (fabs(p) >= fabs(0.5 * q * etemp) || p <= q*(a-x) || p >= q*(b-x)) d = CGOLD*(e=(x >= xm ? a-x : b-x)); else { d = p / q; u = x + d; if (u-a < tol2 || b-u < tol2) d = SIGN(tol1, xm-x); } } else d = CGOLD * (e = (x >= xm ? a-x : b-x)); u = (fabs(d) >= tol1 ? x+d : x+SIGN(tol1,d)); fu = (*f)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow); if (fu <= fx) { if (u >= x) { a = x; } else b = x; SHFT(v, w, x, u) SHFT(fv, fw, fx, fu) } else { if (u < x) a = u; else b = u; if (fu <= fw || w == x) { v = w; w = u; fv = fw; fw = fu; } else if (fu <= fv || v == x || v == w) { v = u; fv = fu; } } } nrerror("Too many iterations in BRENT"); *xmin=x; return fx; } #undef ITMAX #undef CGOLD #undef ZEPS #undef SIGN
5db4c81031b2bdb50438425513f2174eaea5e1d7.cu
/* Modified from brent.c by CM on 2004 August 13, so as to accept an absolute tolerance argument in addition to the fractional tolerance argument */ extern "C" { #include "../shape/head.h" } #define ITMAX 100 #define CGOLD 0.3819660 #define ZEPS 1.0e-10 #define SIGN(a,b) ((b) > 0.0 ? fabs(a) : -fabs(a)) #define SHFT(a,b,c,d) (a)=(b);(b)=(c);(c)=(d); __host__ double brent_abs_gpu(double ax,double bx,double cx,double (*f)(double, struct vertices_t**, unsigned char*, unsigned char*, int*, int*, int*, int, int, cudaStream_t*, double**), double tol,double abstol,double *xmin, struct vertices_t **verts, unsigned char *htype, unsigned char *dtype, int *nframes, int *nviews, int *lc_n, int nsets, int nf, cudaStream_t *bf_stream, double **fit_overflow) { int iter; double a, b, d, etemp, fu, fv, fw, fx, p, q, r, tol1, tol2, u, v, w, x, xm; double e = 0.0; double abstol_use = ((abstol > ZEPS) ? abstol : ZEPS); a = ((ax < cx) ? ax : cx); b = ((ax > cx) ? ax : cx); x = w = v = bx; fw = fv = fx = (*f)(x, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow); for (iter=1; iter<=ITMAX; iter++) { xm = 0.5 * (a + b); tol2 = 2.0 * (tol1 = tol * fabs(x) + abstol_use); if (fabs(x - xm) <= (tol2 - 0.5 * (b - a))) { *xmin = x; return fx; } if (fabs(e) > tol1) { r = (x-w) * (fx-fv); q = (x-v) * (fx-fw); p = (x-v) *q - (x-w) *r; q = 2.0 * (q-r); if (q > 0.0) p = -p; q = fabs(q); etemp = e; e = d; if (fabs(p) >= fabs(0.5 * q * etemp) || p <= q*(a-x) || p >= q*(b-x)) d = CGOLD*(e=(x >= xm ? a-x : b-x)); else { d = p / q; u = x + d; if (u-a < tol2 || b-u < tol2) d = SIGN(tol1, xm-x); } } else d = CGOLD * (e = (x >= xm ? a-x : b-x)); u = (fabs(d) >= tol1 ? x+d : x+SIGN(tol1,d)); fu = (*f)(u, verts, htype, dtype, nframes, nviews, lc_n, nsets, nf, bf_stream, fit_overflow); if (fu <= fx) { if (u >= x) { a = x; } else b = x; SHFT(v, w, x, u) SHFT(fv, fw, fx, fu) } else { if (u < x) a = u; else b = u; if (fu <= fw || w == x) { v = w; w = u; fv = fw; fw = fu; } else if (fu <= fv || v == x || v == w) { v = u; fv = fu; } } } nrerror("Too many iterations in BRENT"); *xmin=x; return fx; } #undef ITMAX #undef CGOLD #undef ZEPS #undef SIGN
92d6e1e567fa65da3cd6de9cecea24856e61ac41.hip
// !!! This is a file automatically generated by hipify!!! // // Created by ss on 19-1-15. // #include "thundergbm/metric/multiclass_metric.h" #include "thundergbm/util/device_lambda.cuh" #include "thrust/reduce.h" float_type MulticlassAccuracy::get_score(const SyncArray<float_type> &y_p) const { CHECK_EQ(num_class * y.size(), y_p.size()) << num_class << " * " << y.size() << " != " << y_p.size(); int n_instances = y.size(); auto y_data = y.device_data(); auto yp_data = y_p.device_data(); SyncArray<int> is_true(n_instances); auto is_true_data = is_true.device_data(); int num_class = this->num_class; device_loop(n_instances, [=] __device__(int i){ int max_k = 0; float_type max_p = yp_data[i]; for (int k = 1; k < num_class; ++k) { if (yp_data[k * n_instances + i] > max_p) { max_p = yp_data[k * n_instances + i]; max_k = k; } } is_true_data[i] = max_k == y_data[i]; }); float acc = thrust::reduce(thrust::hip::par, is_true_data, is_true_data + n_instances) / (float) n_instances; return acc; } float_type BinaryClassMetric::get_score(const SyncArray<float_type> &y_p) const { int n_instances = y.size(); auto y_data = y.device_data(); auto yp_data = y_p.device_data(); SyncArray<int> is_true(n_instances); auto is_true_data = is_true.device_data(); device_loop(n_instances, [=] __device__(int i){ int max_k = (yp_data[i] > 0.5) ? 1 : 0; is_true_data[i] = max_k == y_data[i]; }); float acc = thrust::reduce(thrust::hip::par, is_true_data, is_true_data + n_instances) / (float) n_instances; return 1 - acc; }
92d6e1e567fa65da3cd6de9cecea24856e61ac41.cu
// // Created by ss on 19-1-15. // #include "thundergbm/metric/multiclass_metric.h" #include "thundergbm/util/device_lambda.cuh" #include "thrust/reduce.h" float_type MulticlassAccuracy::get_score(const SyncArray<float_type> &y_p) const { CHECK_EQ(num_class * y.size(), y_p.size()) << num_class << " * " << y.size() << " != " << y_p.size(); int n_instances = y.size(); auto y_data = y.device_data(); auto yp_data = y_p.device_data(); SyncArray<int> is_true(n_instances); auto is_true_data = is_true.device_data(); int num_class = this->num_class; device_loop(n_instances, [=] __device__(int i){ int max_k = 0; float_type max_p = yp_data[i]; for (int k = 1; k < num_class; ++k) { if (yp_data[k * n_instances + i] > max_p) { max_p = yp_data[k * n_instances + i]; max_k = k; } } is_true_data[i] = max_k == y_data[i]; }); float acc = thrust::reduce(thrust::cuda::par, is_true_data, is_true_data + n_instances) / (float) n_instances; return acc; } float_type BinaryClassMetric::get_score(const SyncArray<float_type> &y_p) const { int n_instances = y.size(); auto y_data = y.device_data(); auto yp_data = y_p.device_data(); SyncArray<int> is_true(n_instances); auto is_true_data = is_true.device_data(); device_loop(n_instances, [=] __device__(int i){ int max_k = (yp_data[i] > 0.5) ? 1 : 0; is_true_data[i] = max_k == y_data[i]; }); float acc = thrust::reduce(thrust::cuda::par, is_true_data, is_true_data + n_instances) / (float) n_instances; return 1 - acc; }
d3328f69e84dc14421b47f336ceb759eada0b809.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void cuda_Shrink_CalU_Vector(float *Y, float *U, float *X, float lambda, float *L1Weight, int nRows, int nCols, int nFilts) { unsigned int Tidx = threadIdx.x + blockIdx.x * blockDim.x; unsigned int Tidy = threadIdx.y + blockIdx.y * blockDim.y, index; float WLambda; float absxV1, X_temp, U_temp, Y_temp; if ((Tidx < nCols) && (Tidy < nRows)) { for (int k = 0; k < nFilts; k += 1) { index = Tidx + (Tidy + nRows * k) * nCols; X_temp = (X[index] / (nRows * nCols)); U_temp = U[index]; WLambda = lambda * L1Weight[k]; Y_temp = X_temp + U_temp; absxV1 = fabs(Y_temp) - WLambda; Y_temp = signbit(-absxV1) * copysign(absxV1, Y_temp); Y[index] = Y_temp; U[index] = U_temp + X_temp - Y_temp; } } }
d3328f69e84dc14421b47f336ceb759eada0b809.cu
#include "includes.h" __global__ void cuda_Shrink_CalU_Vector(float *Y, float *U, float *X, float lambda, float *L1Weight, int nRows, int nCols, int nFilts) { unsigned int Tidx = threadIdx.x + blockIdx.x * blockDim.x; unsigned int Tidy = threadIdx.y + blockIdx.y * blockDim.y, index; float WLambda; float absxV1, X_temp, U_temp, Y_temp; if ((Tidx < nCols) && (Tidy < nRows)) { for (int k = 0; k < nFilts; k += 1) { index = Tidx + (Tidy + nRows * k) * nCols; X_temp = (X[index] / (nRows * nCols)); U_temp = U[index]; WLambda = lambda * L1Weight[k]; Y_temp = X_temp + U_temp; absxV1 = fabs(Y_temp) - WLambda; Y_temp = signbit(-absxV1) * copysign(absxV1, Y_temp); Y[index] = Y_temp; U[index] = U_temp + X_temp - Y_temp; } } }
18665cefcc1ec43dae006b12ddf6f43026e09e4f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "d_bucketsort.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned int *d_in = NULL; hipMalloc(&d_in, XSIZE*YSIZE); unsigned int *d_indices = NULL; hipMalloc(&d_indices, XSIZE*YSIZE); unsigned int *d_sublist = NULL; hipMalloc(&d_sublist, XSIZE*YSIZE); unsigned int *r_outputlist = NULL; hipMalloc(&r_outputlist, XSIZE*YSIZE); unsigned int *d_bucketoffsets = NULL; hipMalloc(&d_bucketoffsets, XSIZE*YSIZE); int itemCount = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( d_bucketsort), dim3(gridBlock),dim3(threadBlock), 0, 0, d_in,d_indices,d_sublist,r_outputlist,d_bucketoffsets,itemCount); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( d_bucketsort), dim3(gridBlock),dim3(threadBlock), 0, 0, d_in,d_indices,d_sublist,r_outputlist,d_bucketoffsets,itemCount); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( d_bucketsort), dim3(gridBlock),dim3(threadBlock), 0, 0, d_in,d_indices,d_sublist,r_outputlist,d_bucketoffsets,itemCount); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
18665cefcc1ec43dae006b12ddf6f43026e09e4f.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "d_bucketsort.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned int *d_in = NULL; cudaMalloc(&d_in, XSIZE*YSIZE); unsigned int *d_indices = NULL; cudaMalloc(&d_indices, XSIZE*YSIZE); unsigned int *d_sublist = NULL; cudaMalloc(&d_sublist, XSIZE*YSIZE); unsigned int *r_outputlist = NULL; cudaMalloc(&r_outputlist, XSIZE*YSIZE); unsigned int *d_bucketoffsets = NULL; cudaMalloc(&d_bucketoffsets, XSIZE*YSIZE); int itemCount = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); d_bucketsort<<<gridBlock,threadBlock>>>(d_in,d_indices,d_sublist,r_outputlist,d_bucketoffsets,itemCount); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { d_bucketsort<<<gridBlock,threadBlock>>>(d_in,d_indices,d_sublist,r_outputlist,d_bucketoffsets,itemCount); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { d_bucketsort<<<gridBlock,threadBlock>>>(d_in,d_indices,d_sublist,r_outputlist,d_bucketoffsets,itemCount); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3216ca81e16b65ec186113100af0a8b032548d39.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ void softmax_device(float *input, int n, float temp, int stride, float *output) { int i; float sum = 0; float largest = -INFINITY; for(i = 0; i < n; ++i){ int val = input[i*stride]; largest = (val>largest) ? val : largest; } for(i = 0; i < n; ++i){ float e = expf(input[i*stride]/temp - largest/temp); sum += e; output[i*stride] = e; } for(i = 0; i < n; ++i){ output[i*stride] /= sum; } } __device__ void softmax_device(int n, float *input, float temp, float *output) { int i; float sum = 0; float largest = -INFINITY; for(i = 0; i < n; ++i){ int val = input[i]; largest = (val>largest) ? val : largest; } for(i = 0; i < n; ++i){ float e = exp(input[i]/temp - largest/temp); sum += e; output[i] = e; } for(i = 0; i < n; ++i){ output[i] /= sum; } } __global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= batch*groups) return; int b = id / groups; int g = id % groups; softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset); }
3216ca81e16b65ec186113100af0a8b032548d39.cu
#include "includes.h" __device__ void softmax_device(float *input, int n, float temp, int stride, float *output) { int i; float sum = 0; float largest = -INFINITY; for(i = 0; i < n; ++i){ int val = input[i*stride]; largest = (val>largest) ? val : largest; } for(i = 0; i < n; ++i){ float e = expf(input[i*stride]/temp - largest/temp); sum += e; output[i*stride] = e; } for(i = 0; i < n; ++i){ output[i*stride] /= sum; } } __device__ void softmax_device(int n, float *input, float temp, float *output) { int i; float sum = 0; float largest = -INFINITY; for(i = 0; i < n; ++i){ int val = input[i]; largest = (val>largest) ? val : largest; } for(i = 0; i < n; ++i){ float e = exp(input[i]/temp - largest/temp); sum += e; output[i] = e; } for(i = 0; i < n; ++i){ output[i] /= sum; } } __global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= batch*groups) return; int b = id / groups; int g = id % groups; softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset); }
f7fca518ff97fcf820de8347164a5f3ef7d9d708.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #include <opencv2/opencv.hpp> #include <cfloat> #include <opencv2/core/cuda/common.hpp> #include <opencv2/core/cuda/border_interpolate.hpp> #include <opencv2/core/cuda/vec_traits.hpp> #include <opencv2/core/cuda/vec_math.hpp> #include <thrust/device_vector.h> #define MAX_ARRAY_SIZE 400 #define TILE_SIZE 15 /** * Clamps the value val in the interval [lo, high]. * Equivalent to max(lo, min(val, high)). * * @param val: value to clamp. * @param lo: lower bound for the clamping. * @param high: higher bound for the clamping. * @return val clamped between lo and high. */ template< typename T > __device__ T clamp(T val, T lo, T high) { return max(lo, min(val, high)); } /** * Swaps two elements of an array. * @param a: Memory location of first element. * @param b: Memory location of second element. */ template< typename T > __device__ void swap(T* a, T* b) { T temp = *a; *a = *b; *b = temp; return; } /** * Returns the length of a vector. * * @param elem: the vector to obtain the length from. * @return the length of the given vector. */ template< typename T > __device__ float length(T elem) { return sqrt(pow((float)elem.x, 2) + pow((float)elem.y, 2) + pow((float)elem.z, 2)); } /** * Implementation of a standard Lomuto partiton algorithm. * * @param arr: the array to partition. * @param low: starting point for the partition in the array. * @param high: ending point for the partition in the array. * @return the position of the pivot. */ template< typename T > __device__ int partition(T arr[], int low, int high) { T pivot = arr[high]; int i = (low - 1); for (int j = low; j <= high - 1; j++) { if (length<T>(arr[j]) <= length<T>(pivot)) { i++; swap<T>(&arr[i], &arr[j]); } } swap<T>(&arr[i + 1], &arr[high]); return (i + 1); } /** * Returns the kth smallest element from a given array. * * @param arr: the array to find the smallest element from. * @param left: starting point for the search in the array. * @param right: ending point for the search in the array. * @param k: value corresponding to the position of the element to find in the * original array when sorted. * @return the kth smallest element in the array. */ template< typename T > __device__ T kth_smallest(T a[], int left, int right, int k) { while (left <= right) { // Partition a[left..right] around a pivot // and find the position of the pivot int pivotIndex = partition<T>(a, left, right); // If pivot itself is the k-th smallest element if (pivotIndex == k - 1) return a[pivotIndex]; // If there are more than k-1 elements on // left of pivot, then k-th smallest must be // on left side. else if (pivotIndex > k - 1) right = pivotIndex - 1; // Else k-th smallest is on right side. else left = pivotIndex + 1; } return a[0]; } __global__ void process(const cv::cuda::PtrStep<uchar3> src, cv::cuda::PtrStep<uchar3> dst, int rows, int cols, int kernel_size) { const int dst_x = TILE_SIZE * blockIdx.x + threadIdx.x-kernel_size; const int dst_y = TILE_SIZE * blockIdx.y + threadIdx.y-kernel_size; // Filter radius const int kernel_div2 = kernel_size / 2; // Create shared memory using externally passed size extern __shared__ uchar3 tile[]; int px = clamp<float>(dst_x, 0, cols-1); int py = clamp<float>(dst_y, 0, rows-1); // Cache pixels in shared memory tile[threadIdx.y*(TILE_SIZE+kernel_size)+threadIdx.x] = src(py, px); // Wait until all thread cache their pixes values __syncthreads(); bool is_inside_tile = kernel_div2 <= threadIdx.x && threadIdx.x < TILE_SIZE + kernel_div2 && kernel_div2 <= threadIdx.y && threadIdx.y < TILE_SIZE + kernel_div2; if (dst_x < cols && dst_y < rows && is_inside_tile) { uchar3 vals[MAX_ARRAY_SIZE]; int count = 0; for (int m = -kernel_div2; m <= kernel_div2; m++) { for (int n = -kernel_div2; n <= kernel_div2; n++) { int ty = threadIdx.y+n; int tx = threadIdx.x+m; vals[count] = tile[ty*(TILE_SIZE+kernel_size)+tx]; count++; } } int arr_size = (int)pow(kernel_size, 2); uchar3 median = kth_smallest<uchar3>(vals, 0, arr_size, arr_size/2); dst(dst_y, dst_x).x = median.x; dst(dst_y, dst_x).y = median.y; dst(dst_y, dst_x).z = median.z; } } int divUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); } void startCUDA (cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, int KERNEL_SIZE) { const dim3 block(TILE_SIZE+KERNEL_SIZE, TILE_SIZE+KERNEL_SIZE); const dim3 grid(divUp(dst.cols, TILE_SIZE)+1, divUp(dst.rows, TILE_SIZE)+1); // Create a tile to process pixels within a block's shared memory int shmem_size = sizeof(uchar3)*(TILE_SIZE+KERNEL_SIZE)*(TILE_SIZE+KERNEL_SIZE); hipLaunchKernelGGL(( process), dim3(grid), dim3(block), shmem_size, 0, src, dst, dst.rows, dst.cols, KERNEL_SIZE); }
f7fca518ff97fcf820de8347164a5f3ef7d9d708.cu
#include<stdio.h> #include<stdlib.h> #include <opencv2/opencv.hpp> #include <cfloat> #include <opencv2/core/cuda/common.hpp> #include <opencv2/core/cuda/border_interpolate.hpp> #include <opencv2/core/cuda/vec_traits.hpp> #include <opencv2/core/cuda/vec_math.hpp> #include <thrust/device_vector.h> #define MAX_ARRAY_SIZE 400 #define TILE_SIZE 15 /** * Clamps the value val in the interval [lo, high]. * Equivalent to max(lo, min(val, high)). * * @param val: value to clamp. * @param lo: lower bound for the clamping. * @param high: higher bound for the clamping. * @return val clamped between lo and high. */ template< typename T > __device__ T clamp(T val, T lo, T high) { return max(lo, min(val, high)); } /** * Swaps two elements of an array. * @param a: Memory location of first element. * @param b: Memory location of second element. */ template< typename T > __device__ void swap(T* a, T* b) { T temp = *a; *a = *b; *b = temp; return; } /** * Returns the length of a vector. * * @param elem: the vector to obtain the length from. * @return the length of the given vector. */ template< typename T > __device__ float length(T elem) { return sqrt(pow((float)elem.x, 2) + pow((float)elem.y, 2) + pow((float)elem.z, 2)); } /** * Implementation of a standard Lomuto partiton algorithm. * * @param arr: the array to partition. * @param low: starting point for the partition in the array. * @param high: ending point for the partition in the array. * @return the position of the pivot. */ template< typename T > __device__ int partition(T arr[], int low, int high) { T pivot = arr[high]; int i = (low - 1); for (int j = low; j <= high - 1; j++) { if (length<T>(arr[j]) <= length<T>(pivot)) { i++; swap<T>(&arr[i], &arr[j]); } } swap<T>(&arr[i + 1], &arr[high]); return (i + 1); } /** * Returns the kth smallest element from a given array. * * @param arr: the array to find the smallest element from. * @param left: starting point for the search in the array. * @param right: ending point for the search in the array. * @param k: value corresponding to the position of the element to find in the * original array when sorted. * @return the kth smallest element in the array. */ template< typename T > __device__ T kth_smallest(T a[], int left, int right, int k) { while (left <= right) { // Partition a[left..right] around a pivot // and find the position of the pivot int pivotIndex = partition<T>(a, left, right); // If pivot itself is the k-th smallest element if (pivotIndex == k - 1) return a[pivotIndex]; // If there are more than k-1 elements on // left of pivot, then k-th smallest must be // on left side. else if (pivotIndex > k - 1) right = pivotIndex - 1; // Else k-th smallest is on right side. else left = pivotIndex + 1; } return a[0]; } __global__ void process(const cv::cuda::PtrStep<uchar3> src, cv::cuda::PtrStep<uchar3> dst, int rows, int cols, int kernel_size) { const int dst_x = TILE_SIZE * blockIdx.x + threadIdx.x-kernel_size; const int dst_y = TILE_SIZE * blockIdx.y + threadIdx.y-kernel_size; // Filter radius const int kernel_div2 = kernel_size / 2; // Create shared memory using externally passed size extern __shared__ uchar3 tile[]; int px = clamp<float>(dst_x, 0, cols-1); int py = clamp<float>(dst_y, 0, rows-1); // Cache pixels in shared memory tile[threadIdx.y*(TILE_SIZE+kernel_size)+threadIdx.x] = src(py, px); // Wait until all thread cache their pixes values __syncthreads(); bool is_inside_tile = kernel_div2 <= threadIdx.x && threadIdx.x < TILE_SIZE + kernel_div2 && kernel_div2 <= threadIdx.y && threadIdx.y < TILE_SIZE + kernel_div2; if (dst_x < cols && dst_y < rows && is_inside_tile) { uchar3 vals[MAX_ARRAY_SIZE]; int count = 0; for (int m = -kernel_div2; m <= kernel_div2; m++) { for (int n = -kernel_div2; n <= kernel_div2; n++) { int ty = threadIdx.y+n; int tx = threadIdx.x+m; vals[count] = tile[ty*(TILE_SIZE+kernel_size)+tx]; count++; } } int arr_size = (int)pow(kernel_size, 2); uchar3 median = kth_smallest<uchar3>(vals, 0, arr_size, arr_size/2); dst(dst_y, dst_x).x = median.x; dst(dst_y, dst_x).y = median.y; dst(dst_y, dst_x).z = median.z; } } int divUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); } void startCUDA (cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, int KERNEL_SIZE) { const dim3 block(TILE_SIZE+KERNEL_SIZE, TILE_SIZE+KERNEL_SIZE); const dim3 grid(divUp(dst.cols, TILE_SIZE)+1, divUp(dst.rows, TILE_SIZE)+1); // Create a tile to process pixels within a block's shared memory int shmem_size = sizeof(uchar3)*(TILE_SIZE+KERNEL_SIZE)*(TILE_SIZE+KERNEL_SIZE); process<<<grid, block, shmem_size>>>(src, dst, dst.rows, dst.cols, KERNEL_SIZE); }
39547a53488a864a31be266eb5c8bc57640d34d4.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2009-2019 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: jglaser / Anyone is free to add their own pair potentials here /*! \file AllDriverPotentialSpecialPairGPU.cu \brief Defines the driver functions for computing all types of special pair forces on the GPU */ #include "EvaluatorSpecialPairLJ.h" #include "EvaluatorSpecialPairCoulomb.h" #include "AllDriverPotentialSpecialPairGPU.cuh" //! LJ special pair potential, internal hipError_t gpu_compute_lj_forces(const bond_args_t& bond_args, const Scalar3 *d_params, unsigned int *d_flags) { return gpu_compute_bond_forces<EvaluatorSpecialPairLJ>(bond_args, d_params, d_flags); } //! Coulomb special pair potential, internal hipError_t gpu_compute_coulomb_forces(const bond_args_t& bond_args, const Scalar2 *d_params, unsigned int *d_flags) { return gpu_compute_bond_forces<EvaluatorSpecialPairCoulomb>(bond_args, d_params, d_flags); }
39547a53488a864a31be266eb5c8bc57640d34d4.cu
// Copyright (c) 2009-2019 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: jglaser / Anyone is free to add their own pair potentials here /*! \file AllDriverPotentialSpecialPairGPU.cu \brief Defines the driver functions for computing all types of special pair forces on the GPU */ #include "EvaluatorSpecialPairLJ.h" #include "EvaluatorSpecialPairCoulomb.h" #include "AllDriverPotentialSpecialPairGPU.cuh" //! LJ special pair potential, internal cudaError_t gpu_compute_lj_forces(const bond_args_t& bond_args, const Scalar3 *d_params, unsigned int *d_flags) { return gpu_compute_bond_forces<EvaluatorSpecialPairLJ>(bond_args, d_params, d_flags); } //! Coulomb special pair potential, internal cudaError_t gpu_compute_coulomb_forces(const bond_args_t& bond_args, const Scalar2 *d_params, unsigned int *d_flags) { return gpu_compute_bond_forces<EvaluatorSpecialPairCoulomb>(bond_args, d_params, d_flags); }
9548e4e40e4e1b37486e1e6a07c545c953b82c27.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common/bboxUtils.h" #include "common/kernel.h" #define CUBLAS_CHECK(condition) \ do \ { \ hipblasStatus_t status = condition; \ if (status != HIPBLAS_STATUS_SUCCESS) \ { \ printf("%s %d CUBLAS FAIL %s\n", __FILE__, __LINE__, cublasGetErrorString(status)); \ } \ } while (0) namespace nvinfer1 { namespace plugin { size_t normalizePluginWorkspaceSize(bool acrossSpatial, int C, int H, int W) { if (acrossSpatial) return sizeof(float) * C * H * W; else return (size_t) 0; } } // namespace plugin } // namespace nvinfer1 size_t normalizePluginWorkspaceSize(bool acrossSpatial, int C, int H, int W) { if (acrossSpatial) return sizeof(float) * C * H * W; else return (size_t) 0; } template <unsigned nthds_per_cta> __launch_bounds__(nthds_per_cta) __global__ void normalizeNotAcrossSpatialKernel( const bool channelShared, const int N, const int C, const int H, const int W, const float eps, const float* scale, float* inputData, float* outputData) { const int dim = C * H * W; const int spatialDim = H * W; const int tile = 32; const int numTile = (spatialDim + tile - 1) / tile; for (int n = blockIdx.x; n < N * numTile; n += gridDim.x) { float* input = inputData + (n / numTile) * dim; float* output = outputData + (n / numTile) * dim; __shared__ float sum[tile]; float localsum = 0.0F; for (int i = threadIdx.x; i < tile; i += nthds_per_cta) { sum[i] = 0.0F; } __syncthreads(); for (int i = threadIdx.x; i < C * tile; i += nthds_per_cta) { int row = i / tile; int col = (n % numTile) * tile + i % tile; float data = 0.0F; if (col < spatialDim) data = input[row * spatialDim + col]; localsum += data * data; } atomicAdd(&sum[threadIdx.x & 31], localsum); __syncthreads(); for (int i = threadIdx.x; i < C * tile; i += nthds_per_cta) { int row = i / tile; int col = (n % numTile) * tile + i % tile; if (col < spatialDim) { int offset = row * spatialDim + col; output[offset] = input[offset] / sqrt(sum[threadIdx.x & 31] + eps); } } if (channelShared) { for (int i = threadIdx.x; i < C * tile; i += nthds_per_cta) { int row = i / tile; int col = (n % numTile) * tile + i % tile; if (col < spatialDim) output[row * spatialDim + col] *= scale[0]; } } else { for (int i = threadIdx.x; i < C * tile; i += nthds_per_cta) { int row = i / tile; int col = (n % numTile) * tile + i % tile; if (col < spatialDim) output[row * spatialDim + col] *= scale[row]; } } } } pluginStatus_t normalizeNotAcrossSpatialGpu( hipStream_t stream, const bool channelShared, const int N, const int C, const int H, const int W, const float eps, const void* scale, const void* inputData, void* outputData) { const int BS = 128; const int GS = 256; // assumes warp size == 32 PLUGIN_ASSERT(BS % 32 == 0); hipLaunchKernelGGL(( normalizeNotAcrossSpatialKernel<BS>), dim3(GS), dim3(BS), 0, stream, channelShared, N, C, H, W, eps, (const float*) scale, (float*) inputData, (float*) outputData); CSC(hipGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } __global__ void squareKernel( const int n, const float* x, float* y) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += gridDim.x * blockDim.x) { y[i] = x[i] * x[i]; } } __global__ void scalChannelKernel( const int n, const int spatialDim, const float* inputData, const float* scale, float* outputData) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += gridDim.x * blockDim.x) { // scale factors are indepedent across different channels // scale[i / spatialDim]: find the right scale factor for specific channels outputData[i] = inputData[i] / scale[i / spatialDim]; } } namespace nvinfer1 { namespace plugin { pluginStatus_t normalizeInference( hipStream_t stream, hipblasHandle_t handle, const bool acrossSpatial, const bool channelShared, const int N, const int C, const int H, const int W, const float eps, const void* scale, const void* inputData, void* outputData, void* workspace) { const int dim = C * H * W; // Normalization is conducted for each sample from the batch indepdently if (acrossSpatial) { float* input = (float*) const_cast<void*>(inputData); float* output = (float*) outputData; float* buffer = (float*) workspace; for (int n = 0; n < N; ++n) { // Take the square of each element in the input hipLaunchKernelGGL(( squareKernel), dim3((dim + 511) / 512), dim3(512), 0, stream, dim, input, buffer); float normsqr = 0.0F; // Sum up all the squared elements CUBLAS_CHECK(hipblasSasum(handle, dim, buffer, 1, &normsqr)); // Make a copy of the input to the output CUBLAS_CHECK(hipblasScopy(handle, dim, input, 1, output, 1)); // Calculate the inverse of the square root of the sum // Use eps to prevent being divided by zero normsqr = 1 / sqrt(normsqr + eps); // Scale all the outputs by normsqr CUBLAS_CHECK(hipblasSscal(handle, dim, &normsqr, output, 1)); // If channel shared is true, scale all the outputs if (channelShared) { CUBLAS_CHECK(hipblasSscal(handle, dim, (float*) scale, output, 1)); } // Use different scale factors for different channels else { // scale the output according to channels hipLaunchKernelGGL(( scalChannelKernel), dim3((dim + 511) / 512), dim3(512), 0, stream, dim, H * W, output, (float*) scale, output); } // Move cursors input += dim; output += dim; } return STATUS_SUCCESS; } // Normalization ignoring the batch else { return normalizeNotAcrossSpatialGpu(stream, channelShared, N, C, H, W, eps, scale, inputData, outputData); } } } // namespace plugin } // namespace nvinfer1 pluginStatus_t normalizeInference( hipStream_t stream, hipblasHandle_t handle, const bool acrossSpatial, const bool channelShared, const int N, const int C, const int H, const int W, const float eps, const void* scale, const void* inputData, void* outputData, void* workspace) { const int dim = C * H * W; // Normalization is conducted for each sample from the batch indepdently if (acrossSpatial) { float* input = (float*) const_cast<void*>(inputData); float* output = (float*) outputData; float* buffer = (float*) workspace; for (int n = 0; n < N; ++n) { // Take the square of each element in the input hipLaunchKernelGGL(( squareKernel), dim3((dim + 511) / 512), dim3(512), 0, stream, dim, input, buffer); float normsqr = 0.0F; // Sum up all the squared elements CUBLAS_CHECK(hipblasSasum(handle, dim, buffer, 1, &normsqr)); // Make a copy of the input to the output CUBLAS_CHECK(hipblasScopy(handle, dim, input, 1, output, 1)); // Calculate the inverse of the square root of the sum // Use eps to prevent being divided by zero normsqr = 1 / sqrt(normsqr + eps); // Scale all the outputs by normsqr CUBLAS_CHECK(hipblasSscal(handle, dim, &normsqr, output, 1)); // If channel shared is true, scale all the outputs if (channelShared) { CUBLAS_CHECK(hipblasSscal(handle, dim, (float*) scale, output, 1)); } // Use different scale factors for different channels else { // scale the output according to channels hipLaunchKernelGGL(( scalChannelKernel), dim3((dim + 511) / 512), dim3(512), 0, stream, dim, H * W, output, (float*) scale, output); } // Move cursors input += dim; output += dim; } return STATUS_SUCCESS; } // Normalization ignoring the batch else { return normalizeNotAcrossSpatialGpu(stream, channelShared, N, C, H, W, eps, scale, inputData, outputData); } }
9548e4e40e4e1b37486e1e6a07c545c953b82c27.cu
/* * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common/bboxUtils.h" #include "common/kernel.h" #define CUBLAS_CHECK(condition) \ do \ { \ cublasStatus_t status = condition; \ if (status != CUBLAS_STATUS_SUCCESS) \ { \ printf("%s %d CUBLAS FAIL %s\n", __FILE__, __LINE__, cublasGetErrorString(status)); \ } \ } while (0) namespace nvinfer1 { namespace plugin { size_t normalizePluginWorkspaceSize(bool acrossSpatial, int C, int H, int W) { if (acrossSpatial) return sizeof(float) * C * H * W; else return (size_t) 0; } } // namespace plugin } // namespace nvinfer1 size_t normalizePluginWorkspaceSize(bool acrossSpatial, int C, int H, int W) { if (acrossSpatial) return sizeof(float) * C * H * W; else return (size_t) 0; } template <unsigned nthds_per_cta> __launch_bounds__(nthds_per_cta) __global__ void normalizeNotAcrossSpatialKernel( const bool channelShared, const int N, const int C, const int H, const int W, const float eps, const float* scale, float* inputData, float* outputData) { const int dim = C * H * W; const int spatialDim = H * W; const int tile = 32; const int numTile = (spatialDim + tile - 1) / tile; for (int n = blockIdx.x; n < N * numTile; n += gridDim.x) { float* input = inputData + (n / numTile) * dim; float* output = outputData + (n / numTile) * dim; __shared__ float sum[tile]; float localsum = 0.0F; for (int i = threadIdx.x; i < tile; i += nthds_per_cta) { sum[i] = 0.0F; } __syncthreads(); for (int i = threadIdx.x; i < C * tile; i += nthds_per_cta) { int row = i / tile; int col = (n % numTile) * tile + i % tile; float data = 0.0F; if (col < spatialDim) data = input[row * spatialDim + col]; localsum += data * data; } atomicAdd(&sum[threadIdx.x & 31], localsum); __syncthreads(); for (int i = threadIdx.x; i < C * tile; i += nthds_per_cta) { int row = i / tile; int col = (n % numTile) * tile + i % tile; if (col < spatialDim) { int offset = row * spatialDim + col; output[offset] = input[offset] / sqrt(sum[threadIdx.x & 31] + eps); } } if (channelShared) { for (int i = threadIdx.x; i < C * tile; i += nthds_per_cta) { int row = i / tile; int col = (n % numTile) * tile + i % tile; if (col < spatialDim) output[row * spatialDim + col] *= scale[0]; } } else { for (int i = threadIdx.x; i < C * tile; i += nthds_per_cta) { int row = i / tile; int col = (n % numTile) * tile + i % tile; if (col < spatialDim) output[row * spatialDim + col] *= scale[row]; } } } } pluginStatus_t normalizeNotAcrossSpatialGpu( cudaStream_t stream, const bool channelShared, const int N, const int C, const int H, const int W, const float eps, const void* scale, const void* inputData, void* outputData) { const int BS = 128; const int GS = 256; // assumes warp size == 32 PLUGIN_ASSERT(BS % 32 == 0); normalizeNotAcrossSpatialKernel<BS><<<GS, BS, 0, stream>>>( channelShared, N, C, H, W, eps, (const float*) scale, (float*) inputData, (float*) outputData); CSC(cudaGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } __global__ void squareKernel( const int n, const float* x, float* y) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += gridDim.x * blockDim.x) { y[i] = x[i] * x[i]; } } __global__ void scalChannelKernel( const int n, const int spatialDim, const float* inputData, const float* scale, float* outputData) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += gridDim.x * blockDim.x) { // scale factors are indepedent across different channels // scale[i / spatialDim]: find the right scale factor for specific channels outputData[i] = inputData[i] / scale[i / spatialDim]; } } namespace nvinfer1 { namespace plugin { pluginStatus_t normalizeInference( cudaStream_t stream, cublasHandle_t handle, const bool acrossSpatial, const bool channelShared, const int N, const int C, const int H, const int W, const float eps, const void* scale, const void* inputData, void* outputData, void* workspace) { const int dim = C * H * W; // Normalization is conducted for each sample from the batch indepdently if (acrossSpatial) { float* input = (float*) const_cast<void*>(inputData); float* output = (float*) outputData; float* buffer = (float*) workspace; for (int n = 0; n < N; ++n) { // Take the square of each element in the input squareKernel<<<(dim + 511) / 512, 512, 0, stream>>>(dim, input, buffer); float normsqr = 0.0F; // Sum up all the squared elements CUBLAS_CHECK(cublasSasum(handle, dim, buffer, 1, &normsqr)); // Make a copy of the input to the output CUBLAS_CHECK(cublasScopy(handle, dim, input, 1, output, 1)); // Calculate the inverse of the square root of the sum // Use eps to prevent being divided by zero normsqr = 1 / sqrt(normsqr + eps); // Scale all the outputs by normsqr CUBLAS_CHECK(cublasSscal(handle, dim, &normsqr, output, 1)); // If channel shared is true, scale all the outputs if (channelShared) { CUBLAS_CHECK(cublasSscal(handle, dim, (float*) scale, output, 1)); } // Use different scale factors for different channels else { // scale the output according to channels scalChannelKernel<<<(dim + 511) / 512, 512, 0, stream>>>(dim, H * W, output, (float*) scale, output); } // Move cursors input += dim; output += dim; } return STATUS_SUCCESS; } // Normalization ignoring the batch else { return normalizeNotAcrossSpatialGpu(stream, channelShared, N, C, H, W, eps, scale, inputData, outputData); } } } // namespace plugin } // namespace nvinfer1 pluginStatus_t normalizeInference( cudaStream_t stream, cublasHandle_t handle, const bool acrossSpatial, const bool channelShared, const int N, const int C, const int H, const int W, const float eps, const void* scale, const void* inputData, void* outputData, void* workspace) { const int dim = C * H * W; // Normalization is conducted for each sample from the batch indepdently if (acrossSpatial) { float* input = (float*) const_cast<void*>(inputData); float* output = (float*) outputData; float* buffer = (float*) workspace; for (int n = 0; n < N; ++n) { // Take the square of each element in the input squareKernel<<<(dim + 511) / 512, 512, 0, stream>>>(dim, input, buffer); float normsqr = 0.0F; // Sum up all the squared elements CUBLAS_CHECK(cublasSasum(handle, dim, buffer, 1, &normsqr)); // Make a copy of the input to the output CUBLAS_CHECK(cublasScopy(handle, dim, input, 1, output, 1)); // Calculate the inverse of the square root of the sum // Use eps to prevent being divided by zero normsqr = 1 / sqrt(normsqr + eps); // Scale all the outputs by normsqr CUBLAS_CHECK(cublasSscal(handle, dim, &normsqr, output, 1)); // If channel shared is true, scale all the outputs if (channelShared) { CUBLAS_CHECK(cublasSscal(handle, dim, (float*) scale, output, 1)); } // Use different scale factors for different channels else { // scale the output according to channels scalChannelKernel<<<(dim + 511) / 512, 512, 0, stream>>>(dim, H * W, output, (float*) scale, output); } // Move cursors input += dim; output += dim; } return STATUS_SUCCESS; } // Normalization ignoring the batch else { return normalizeNotAcrossSpatialGpu(stream, channelShared, N, C, H, W, eps, scale, inputData, outputData); } }
f2f90f363bdf2c6bfda499992de363e1c0e271a7.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "calcEnergyParallel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *atoms = NULL; hipMalloc(&atoms, XSIZE*YSIZE); int numAtoms = 1; int *energies = NULL; hipMalloc(&energies, XSIZE*YSIZE); int numEnergies = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( calcEnergyParallel), dim3(gridBlock),dim3(threadBlock), 0, 0, atoms,numAtoms,energies,numEnergies); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( calcEnergyParallel), dim3(gridBlock),dim3(threadBlock), 0, 0, atoms,numAtoms,energies,numEnergies); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( calcEnergyParallel), dim3(gridBlock),dim3(threadBlock), 0, 0, atoms,numAtoms,energies,numEnergies); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f2f90f363bdf2c6bfda499992de363e1c0e271a7.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "calcEnergyParallel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *atoms = NULL; cudaMalloc(&atoms, XSIZE*YSIZE); int numAtoms = 1; int *energies = NULL; cudaMalloc(&energies, XSIZE*YSIZE); int numEnergies = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); calcEnergyParallel<<<gridBlock,threadBlock>>>(atoms,numAtoms,energies,numEnergies); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { calcEnergyParallel<<<gridBlock,threadBlock>>>(atoms,numAtoms,energies,numEnergies); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { calcEnergyParallel<<<gridBlock,threadBlock>>>(atoms,numAtoms,energies,numEnergies); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0222a6a606d3d4569eb2bcf0ac6ce8afc1274d51.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime_api.h> #include "common.h" #define BLOCK_SIZE 1024 /* **LA_Variance: Rank: 4 dims: 5,1024,20,64 overaxes: 1,1,1,0; **LA_Variance: Rank: 4 dims: 5,1024,20,128 overaxes: 1,1,1,0; **LA_Variance: Rank: 4 dims: 5,1024,1,1024 overaxes: 1,1,1,0; **LA_Variance: Rank: 4 dims: 5,1024,20,64 overaxes: 1,1,1,0; **LA_Variance: Rank: 4 dims: 5,1024,20,64 overaxes: 1,1,1,0; **LA_Variance: Rank: 4 dims: 5,1024,20,64 overaxes: 1,1,1,0; **LA_Variance: Rank: 4 dims: 5,1024,20,128 overaxes: 1,1,1,0; **LA_Variance: Rank: 4 dims: 5,1024,1,1024 overaxes: 1,1,1,0; **LA_Variance: Rank: 2 dims: 5,512,0,0 overaxes: 1,0,0,0; **LA_Variance: Rank: 2 dims: 5,256,0,0 overaxes: 1,0,0,0; **LA_Variance: Rank: 2 dims: 5,512,0,0 overaxes: 1,0,0,0; **LA_Variance: Rank: 2 dims: 5,256,0,0 overaxes: 1,0,0,0; Rank and Axes : -Rank=4 ==> TTTF -Rank=2 ==> TF */ extern __global__ void kernel_reduce_sum_4d_try04( const float * __restrict__ g_idata, float * __restrict__ g_buff, float * __restrict__ g_odata, const int pow_y, const unsigned long dim0, const unsigned long dim1, const unsigned long dim2, const unsigned long dim3, const bool overaxis0, const bool overaxis1, const bool overaxis2, const bool overaxis3, const unsigned long TGC, const unsigned long TGPB, const unsigned long SPT, const unsigned long TGO); extern __global__ void kernel_divide_by_const_try01( const float * __restrict__ g_idata, float * __restrict__ g_odata, const unsigned long dim, const float coef); // g_odata = g_a[i] / const_k - g_b[i]*g_b[i] __global__ void kernel_multiply_const_sub_try01( const float * __restrict__ g_a, const float * __restrict__ g_b, const float const_k, float * __restrict__ g_odata, const unsigned long dim) { unsigned long tidx = blockIdx.x * blockDim.x + threadIdx.x; if(tidx<dim){ //printf("*** tidx: %ld, coef: %f \t\t g_a: %f \t\t g_b: %f\n",tidx,const_k, g_a[tidx], g_b[tidx]); g_odata[tidx] = g_a[tidx] / const_k - g_b[tidx]*g_b[tidx]; //printf("*** tidx: %ld, g_o: %f\n",tidx, g_odata[tidx]); } } void reduce_variance_4d_try01( float* g_idata, float* g_odata, unsigned long dim0, unsigned long dim1, unsigned long dim2, unsigned long dim3, bool overaxis0, bool overaxis1, bool overaxis2, bool overaxis3) { hipStream_t local_stream; hipStreamCreate(&local_stream); float* g_tempbuff; float* g_variance_xi2; float* g_median; if( !(overaxis0 && overaxis1 && overaxis2 && !overaxis3) ) { printf("ERROR @reduce_sum_4d_try01 --NOT IMPLEMENTED\n"); return; } // 1. reduce_sum (MEDIAN ) { unsigned long block = BLOCK_SIZE; unsigned long SPT, TGC, TGO, TGPB, grid, TPG; //Dim3 slice per thread SPT = 512; //cte //thread group offset TGO = dim3 * SPT; //thread group count TGC = (unsigned long) ((dim0 * dim1 * dim2 + (SPT - 1)) / SPT); //thread group per block TGPB = (unsigned long) ((BLOCK_SIZE) / dim3); if (TGPB % 2 && TGPB > 1) TGPB--; //grid size grid = (TGC + (TGPB - 1)) / TGPB; TPG = (unsigned long) dim3; //threads per group //printf("-------------------------------------------------------\n"); //printf("KERNEL_GRID : %ld\n", grid); //printf("KERNEL_BLOCK : %ld\n", block); //printf("KERNEL_SPT : %ld\n", SPT); //printf("KERNEL_TGO : %ld\n", TGO); //printf("KERNEL_TGC : %ld\n", TGC); //printf("KERNEL_TGPB : %ld\n", TGPB); CHECK(hipMalloc((float **) &g_tempbuff, (dim3) * sizeof(float))); // ThreadGroupCount * ThreadsPerGroup CHECK(hipMalloc((float **) &g_median, (dim3) * sizeof(float))); // ThreadGroupCount * ThreadsPerGroup CHECK(hipMalloc((float **) &g_variance_xi2, (dim3) * sizeof(float))); // ThreadGroupCount * ThreadsPerGroup CHECK(hipMemset(g_tempbuff, 0, (dim3) * sizeof(float))); kernel_reduce_sum_4d_try04 << < grid, block, TGPB * TPG * sizeof(float), local_stream >> > ( g_idata, nullptr, g_tempbuff,1, dim0, dim1, dim2, dim3, overaxis0, overaxis1, overaxis2, overaxis3, TGC, TGPB, SPT, TGO ); CHECK(hipMemset(g_variance_xi2, 0, (dim3) * sizeof(float))); kernel_reduce_sum_4d_try04 << < grid, block, TGPB * TPG * sizeof(float), local_stream >> > ( g_idata, nullptr, g_variance_xi2,2, dim0, dim1, dim2, dim3, overaxis0, overaxis1, overaxis2, overaxis3, TGC, TGPB, SPT, TGO ); } // 2. Multiplying (1/n) to each element of resulted tensor from step 1. (MEDIAN ) // 3. Compute Variance { unsigned long len = dim3; //Axes combination is TTTF unsigned long block,grid; float coef = (dim0*dim1*dim2); //printf("WRAPPER: COEF: %f\n",coef); block = BLOCK_SIZE; grid = (len + block -1 )/(block); hipLaunchKernelGGL(( kernel_divide_by_const_try01) , dim3(grid), dim3(block), 0, local_stream , g_tempbuff, g_median, len,coef ); CHECK(hipFree(g_tempbuff)); hipLaunchKernelGGL(( kernel_multiply_const_sub_try01) , dim3(grid), dim3(block), 0, local_stream , g_variance_xi2, g_median, coef, g_odata, len); CHECK(hipFree(g_variance_xi2)); CHECK(hipFree(g_median)); } }
0222a6a606d3d4569eb2bcf0ac6ce8afc1274d51.cu
#include <stdio.h> #include <cuda_runtime_api.h> #include "common.h" #define BLOCK_SIZE 1024 /* **LA_Variance: Rank: 4 dims: 5,1024,20,64 overaxes: 1,1,1,0; **LA_Variance: Rank: 4 dims: 5,1024,20,128 overaxes: 1,1,1,0; **LA_Variance: Rank: 4 dims: 5,1024,1,1024 overaxes: 1,1,1,0; **LA_Variance: Rank: 4 dims: 5,1024,20,64 overaxes: 1,1,1,0; **LA_Variance: Rank: 4 dims: 5,1024,20,64 overaxes: 1,1,1,0; **LA_Variance: Rank: 4 dims: 5,1024,20,64 overaxes: 1,1,1,0; **LA_Variance: Rank: 4 dims: 5,1024,20,128 overaxes: 1,1,1,0; **LA_Variance: Rank: 4 dims: 5,1024,1,1024 overaxes: 1,1,1,0; **LA_Variance: Rank: 2 dims: 5,512,0,0 overaxes: 1,0,0,0; **LA_Variance: Rank: 2 dims: 5,256,0,0 overaxes: 1,0,0,0; **LA_Variance: Rank: 2 dims: 5,512,0,0 overaxes: 1,0,0,0; **LA_Variance: Rank: 2 dims: 5,256,0,0 overaxes: 1,0,0,0; Rank and Axes : -Rank=4 ==> TTTF -Rank=2 ==> TF */ extern __global__ void kernel_reduce_sum_4d_try04( const float * __restrict__ g_idata, float * __restrict__ g_buff, float * __restrict__ g_odata, const int pow_y, const unsigned long dim0, const unsigned long dim1, const unsigned long dim2, const unsigned long dim3, const bool overaxis0, const bool overaxis1, const bool overaxis2, const bool overaxis3, const unsigned long TGC, const unsigned long TGPB, const unsigned long SPT, const unsigned long TGO); extern __global__ void kernel_divide_by_const_try01( const float * __restrict__ g_idata, float * __restrict__ g_odata, const unsigned long dim, const float coef); // g_odata = g_a[i] / const_k - g_b[i]*g_b[i] __global__ void kernel_multiply_const_sub_try01( const float * __restrict__ g_a, const float * __restrict__ g_b, const float const_k, float * __restrict__ g_odata, const unsigned long dim) { unsigned long tidx = blockIdx.x * blockDim.x + threadIdx.x; if(tidx<dim){ //printf("*** tidx: %ld, coef: %f \t\t g_a: %f \t\t g_b: %f\n",tidx,const_k, g_a[tidx], g_b[tidx]); g_odata[tidx] = g_a[tidx] / const_k - g_b[tidx]*g_b[tidx]; //printf("*** tidx: %ld, g_o: %f\n",tidx, g_odata[tidx]); } } void reduce_variance_4d_try01( float* g_idata, float* g_odata, unsigned long dim0, unsigned long dim1, unsigned long dim2, unsigned long dim3, bool overaxis0, bool overaxis1, bool overaxis2, bool overaxis3) { cudaStream_t local_stream; cudaStreamCreate(&local_stream); float* g_tempbuff; float* g_variance_xi2; float* g_median; if( !(overaxis0 && overaxis1 && overaxis2 && !overaxis3) ) { printf("ERROR @reduce_sum_4d_try01 --NOT IMPLEMENTED\n"); return; } // 1. reduce_sum (MEDIAN ) { unsigned long block = BLOCK_SIZE; unsigned long SPT, TGC, TGO, TGPB, grid, TPG; //Dim3 slice per thread SPT = 512; //cte //thread group offset TGO = dim3 * SPT; //thread group count TGC = (unsigned long) ((dim0 * dim1 * dim2 + (SPT - 1)) / SPT); //thread group per block TGPB = (unsigned long) ((BLOCK_SIZE) / dim3); if (TGPB % 2 && TGPB > 1) TGPB--; //grid size grid = (TGC + (TGPB - 1)) / TGPB; TPG = (unsigned long) dim3; //threads per group //printf("-------------------------------------------------------\n"); //printf("KERNEL_GRID : %ld\n", grid); //printf("KERNEL_BLOCK : %ld\n", block); //printf("KERNEL_SPT : %ld\n", SPT); //printf("KERNEL_TGO : %ld\n", TGO); //printf("KERNEL_TGC : %ld\n", TGC); //printf("KERNEL_TGPB : %ld\n", TGPB); CHECK(cudaMalloc((float **) &g_tempbuff, (dim3) * sizeof(float))); // ThreadGroupCount * ThreadsPerGroup CHECK(cudaMalloc((float **) &g_median, (dim3) * sizeof(float))); // ThreadGroupCount * ThreadsPerGroup CHECK(cudaMalloc((float **) &g_variance_xi2, (dim3) * sizeof(float))); // ThreadGroupCount * ThreadsPerGroup CHECK(cudaMemset(g_tempbuff, 0, (dim3) * sizeof(float))); kernel_reduce_sum_4d_try04 << < grid, block, TGPB * TPG * sizeof(float), local_stream >> > ( g_idata, nullptr, g_tempbuff,1, dim0, dim1, dim2, dim3, overaxis0, overaxis1, overaxis2, overaxis3, TGC, TGPB, SPT, TGO ); CHECK(cudaMemset(g_variance_xi2, 0, (dim3) * sizeof(float))); kernel_reduce_sum_4d_try04 << < grid, block, TGPB * TPG * sizeof(float), local_stream >> > ( g_idata, nullptr, g_variance_xi2,2, dim0, dim1, dim2, dim3, overaxis0, overaxis1, overaxis2, overaxis3, TGC, TGPB, SPT, TGO ); } // 2. Multiplying (1/n) to each element of resulted tensor from step 1. (MEDIAN ) // 3. Compute Variance { unsigned long len = dim3; //Axes combination is TTTF unsigned long block,grid; float coef = (dim0*dim1*dim2); //printf("WRAPPER: COEF: %f\n",coef); block = BLOCK_SIZE; grid = (len + block -1 )/(block); kernel_divide_by_const_try01 <<< grid, block, 0, local_stream >>> ( g_tempbuff, g_median, len,coef ); CHECK(cudaFree(g_tempbuff)); kernel_multiply_const_sub_try01 <<< grid, block, 0, local_stream >>>( g_variance_xi2, g_median, coef, g_odata, len); CHECK(cudaFree(g_variance_xi2)); CHECK(cudaFree(g_median)); } }
dd0d08ffe14e824cdcac5c4fb061100dcc8361ec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //#include <thrust/device_vector.h> #include <string.h> #include <hipfft.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include "GpuTimer.h" typedef float2 Complex; #define NUM_RECORDS 2000 #define RECORD_LENGTH 500 #define MAX_THREADS_PER_BLOCK 512 __global__ void hit(hipfftComplex* d_matrix, int num_records, int record_length) { int signalIndex = threadIdx.x; if(signalIndex >= record_length) return; int recordIndex = blockIdx.x; int idx = recordIndex*record_length + signalIndex; float2 temp = d_matrix[idx]; if(signalIndex <= record_length/2) { temp.x = temp.x * 2; temp.y = temp.y * 2; d_matrix[idx] = temp; } else if(signalIndex < record_length) { temp.x = 0; temp.y = 0; d_matrix[idx] = temp; } } __global__ void hft(hipfftComplex* d_matrix, hipfftComplex* d_original, int num_record, int record_length) { int signalIndex = threadIdx.x; if(signalIndex >= record_length) return; int recordIndex = blockIdx.x; int idx = recordIndex*record_length + signalIndex; //d_matrix[idx].x = d_original[idx].x ; d_matrix[idx].y = d_matrix[idx].y / record_length; d_matrix[idx].x = sqrtf(powf(d_original[idx].x,2) + powf(d_matrix[idx].y,2)); } __global__ void max_kernel(float * d_out, hipfftComplex * d_in) { int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; // Compare elements in first half with second half for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { if(d_in[myId + s].x > d_in[myId].x) d_in[myId].x = d_in[myId + s].x; } __syncthreads(); // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = d_in[myId].x; } } /********************/ /* CUDA ERROR CHECK */ /********************/ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPU assert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } int main() { int blockSize = RECORD_LENGTH; int gridSize = NUM_RECORDS; //float samples[10] = {-0.1, 0.2,0.3,-0.2,-0.1, 0.2,0.3,-0.2,-0.4, 0.01}; int mem_size = sizeof(Complex)*NUM_RECORDS*RECORD_LENGTH; Complex* h_matrix = (Complex*)malloc(mem_size); for (int j=0; j<NUM_RECORDS; j++) { for (int i=0; i<RECORD_LENGTH; i++) { float2 temp; temp.x = rand()/float(RAND_MAX); //temp.x = samples[i]; temp.y = 0.f; h_matrix[j*RECORD_LENGTH+i] = temp; if(j==0 && RECORD_LENGTH < 100) printf("(%2.2f\t %2.2f) \n", temp.x, temp.y); } //printf("\n"); } // --- Advanced data layout // input[b * idist + x * istride] // output[b * odist + x * ostride] // b = signal number // x = element of the b-th signal hipfftHandle handle; int rank = 1; // --- 1D FFTs int n[] = { RECORD_LENGTH }; // --- Size of the Fourier transform int istride = 1, ostride = 1; // --- Distance between two successive input/output elements int idist = RECORD_LENGTH, odist = RECORD_LENGTH; // --- Distance between batches int inembed[] = { 0 }; // --- Input size with pitch (ignored for 1D transforms) int onembed[] = { 0 }; // --- Output size with pitch (ignored for 1D transforms) int batch = NUM_RECORDS; // --- Number of batched executions hipfftPlanMany(&handle, rank, n, inembed, istride, idist, onembed, ostride, odist, HIPFFT_C2C, batch); GpuTimer timer; Complex* h_pinned; Complex* d_matrix; Complex* d_original; float* h_max = (float*)malloc(NUM_RECORDS*sizeof(float)); float* d_max; hipHostMalloc((void**)&h_pinned, mem_size); hipMalloc((void**)&d_matrix, mem_size); hipMalloc((void**)&d_original, mem_size); hipMalloc((void**)&d_max, NUM_RECORDS*sizeof(float)); timer.Start(); // Pinned Host Memory memcpy(h_pinned, h_matrix, mem_size); hipMemcpy(d_matrix, h_pinned, mem_size, hipMemcpyHostToDevice); hipMemcpy(d_original, h_pinned, mem_size, hipMemcpyHostToDevice); //- Pinned Host Memory //// Pageable Host Memory //hipMemcpy(d_matrix, h_matrix, mem_size, hipMemcpyHostToDevice); //hipMemcpy(d_original, h_matrix, mem_size, hipMemcpyHostToDevice); ////- Pageable Host Memory hipfftExecC2C(handle, (hipfftComplex*)(d_matrix), (hipfftComplex*)(d_matrix), HIPFFT_FORWARD); hipLaunchKernelGGL(( hit), dim3(gridSize), dim3(blockSize), 0, 0, (hipfftComplex*)(d_matrix), NUM_RECORDS, RECORD_LENGTH); hipfftExecC2C(handle, (hipfftComplex*)(d_matrix), (hipfftComplex*)(d_matrix), HIPFFT_BACKWARD); hipLaunchKernelGGL(( hft), dim3(gridSize), dim3(blockSize), 0, 0, (hipfftComplex*)(d_matrix), (hipfftComplex*)(d_original), NUM_RECORDS, RECORD_LENGTH); hipLaunchKernelGGL(( max_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_max, (hipfftComplex*)(d_matrix)); hipMemcpy(h_max, d_max, NUM_RECORDS*sizeof(float), hipMemcpyDeviceToHost); // Pinned Host Memory hipMemcpy(h_pinned, d_matrix, mem_size, hipMemcpyDeviceToHost); //- Pinned Host Memory //// Pageable Host Memory //hipMemcpy(h_matrix, d_matrix, mem_size, hipMemcpyDeviceToHost); ////- Pageable Host Memory timer.Stop(); float ms = timer.Elapsed(); printf("Matrix Transform Done! Time Elapsed: %f ms\n", ms); float magnitude; float max =0; for (int j=0; j<NUM_RECORDS; j++) { for (int i=0; i<RECORD_LENGTH; i++) { float2 temp = h_pinned[j*RECORD_LENGTH+i]; if(j==0)// && RECORD_LENGTH < 100) { //magnitude = sqrtf(powf(temp.x,2.0f) + powf(temp.y,2.0f)); //printf("(%2.2f\t %2.2f), %2.2f\n", temp.x, temp.y); if(temp.x > max) max = temp.x; } } if(j==0) printf("%f %f\n", h_max[j], max); } hipfftDestroy(handle); free(h_matrix); hipFree(d_matrix); hipFree(d_original); hipHostFree(h_pinned); }
dd0d08ffe14e824cdcac5c4fb061100dcc8361ec.cu
//#include <thrust/device_vector.h> #include <string.h> #include <cufft.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include "GpuTimer.h" typedef float2 Complex; #define NUM_RECORDS 2000 #define RECORD_LENGTH 500 #define MAX_THREADS_PER_BLOCK 512 __global__ void hit(cufftComplex* d_matrix, int num_records, int record_length) { int signalIndex = threadIdx.x; if(signalIndex >= record_length) return; int recordIndex = blockIdx.x; int idx = recordIndex*record_length + signalIndex; float2 temp = d_matrix[idx]; if(signalIndex <= record_length/2) { temp.x = temp.x * 2; temp.y = temp.y * 2; d_matrix[idx] = temp; } else if(signalIndex < record_length) { temp.x = 0; temp.y = 0; d_matrix[idx] = temp; } } __global__ void hft(cufftComplex* d_matrix, cufftComplex* d_original, int num_record, int record_length) { int signalIndex = threadIdx.x; if(signalIndex >= record_length) return; int recordIndex = blockIdx.x; int idx = recordIndex*record_length + signalIndex; //d_matrix[idx].x = d_original[idx].x ; d_matrix[idx].y = d_matrix[idx].y / record_length; d_matrix[idx].x = sqrtf(powf(d_original[idx].x,2) + powf(d_matrix[idx].y,2)); } __global__ void max_kernel(float * d_out, cufftComplex * d_in) { int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; // Compare elements in first half with second half for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { if(d_in[myId + s].x > d_in[myId].x) d_in[myId].x = d_in[myId + s].x; } __syncthreads(); // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = d_in[myId].x; } } /********************/ /* CUDA ERROR CHECK */ /********************/ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPU assert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } int main() { int blockSize = RECORD_LENGTH; int gridSize = NUM_RECORDS; //float samples[10] = {-0.1, 0.2,0.3,-0.2,-0.1, 0.2,0.3,-0.2,-0.4, 0.01}; int mem_size = sizeof(Complex)*NUM_RECORDS*RECORD_LENGTH; Complex* h_matrix = (Complex*)malloc(mem_size); for (int j=0; j<NUM_RECORDS; j++) { for (int i=0; i<RECORD_LENGTH; i++) { float2 temp; temp.x = rand()/float(RAND_MAX); //temp.x = samples[i]; temp.y = 0.f; h_matrix[j*RECORD_LENGTH+i] = temp; if(j==0 && RECORD_LENGTH < 100) printf("(%2.2f\t %2.2f) \n", temp.x, temp.y); } //printf("\n"); } // --- Advanced data layout // input[b * idist + x * istride] // output[b * odist + x * ostride] // b = signal number // x = element of the b-th signal cufftHandle handle; int rank = 1; // --- 1D FFTs int n[] = { RECORD_LENGTH }; // --- Size of the Fourier transform int istride = 1, ostride = 1; // --- Distance between two successive input/output elements int idist = RECORD_LENGTH, odist = RECORD_LENGTH; // --- Distance between batches int inembed[] = { 0 }; // --- Input size with pitch (ignored for 1D transforms) int onembed[] = { 0 }; // --- Output size with pitch (ignored for 1D transforms) int batch = NUM_RECORDS; // --- Number of batched executions cufftPlanMany(&handle, rank, n, inembed, istride, idist, onembed, ostride, odist, CUFFT_C2C, batch); GpuTimer timer; Complex* h_pinned; Complex* d_matrix; Complex* d_original; float* h_max = (float*)malloc(NUM_RECORDS*sizeof(float)); float* d_max; cudaMallocHost((void**)&h_pinned, mem_size); cudaMalloc((void**)&d_matrix, mem_size); cudaMalloc((void**)&d_original, mem_size); cudaMalloc((void**)&d_max, NUM_RECORDS*sizeof(float)); timer.Start(); // Pinned Host Memory memcpy(h_pinned, h_matrix, mem_size); cudaMemcpy(d_matrix, h_pinned, mem_size, cudaMemcpyHostToDevice); cudaMemcpy(d_original, h_pinned, mem_size, cudaMemcpyHostToDevice); //- Pinned Host Memory //// Pageable Host Memory //cudaMemcpy(d_matrix, h_matrix, mem_size, cudaMemcpyHostToDevice); //cudaMemcpy(d_original, h_matrix, mem_size, cudaMemcpyHostToDevice); ////- Pageable Host Memory cufftExecC2C(handle, (cufftComplex*)(d_matrix), (cufftComplex*)(d_matrix), CUFFT_FORWARD); hit<<<gridSize, blockSize>>>((cufftComplex*)(d_matrix), NUM_RECORDS, RECORD_LENGTH); cufftExecC2C(handle, (cufftComplex*)(d_matrix), (cufftComplex*)(d_matrix), CUFFT_INVERSE); hft<<<gridSize, blockSize>>>((cufftComplex*)(d_matrix), (cufftComplex*)(d_original), NUM_RECORDS, RECORD_LENGTH); max_kernel<<<gridSize, blockSize>>>(d_max, (cufftComplex*)(d_matrix)); cudaMemcpy(h_max, d_max, NUM_RECORDS*sizeof(float), cudaMemcpyDeviceToHost); // Pinned Host Memory cudaMemcpy(h_pinned, d_matrix, mem_size, cudaMemcpyDeviceToHost); //- Pinned Host Memory //// Pageable Host Memory //cudaMemcpy(h_matrix, d_matrix, mem_size, cudaMemcpyDeviceToHost); ////- Pageable Host Memory timer.Stop(); float ms = timer.Elapsed(); printf("Matrix Transform Done! Time Elapsed: %f ms\n", ms); float magnitude; float max =0; for (int j=0; j<NUM_RECORDS; j++) { for (int i=0; i<RECORD_LENGTH; i++) { float2 temp = h_pinned[j*RECORD_LENGTH+i]; if(j==0)// && RECORD_LENGTH < 100) { //magnitude = sqrtf(powf(temp.x,2.0f) + powf(temp.y,2.0f)); //printf("(%2.2f\t %2.2f), %2.2f\n", temp.x, temp.y); if(temp.x > max) max = temp.x; } } if(j==0) printf("%f %f\n", h_max[j], max); } cufftDestroy(handle); free(h_matrix); cudaFree(d_matrix); cudaFree(d_original); cudaFreeHost(h_pinned); }
5c587cdc2bed4b1f83902a8ce32e764b16eaea7e.hip
// !!! This is a file automatically generated by hipify!!! // Include CUDA implementations of BatchMatrix #include <cstdint> #include <memory> #include <hip/hip_runtime.h> #include "Log.hpp" #include "Cuda.hpp" #include "Util_Cuda.hpp" #include "BatchMatrix.hpp" namespace cbm { // Constructors template<typename ScaType, Type MemType> BatchMatrix<ScaType, MemType>::BatchMatrix(const BatchMatrix<ScaType, MemType>& t) : ptr_(nullptr) { static_assert(MemType==CUDA, "This function is only for constructing CUDA from CUDA"); CUDA_CHECK(hipMalloc(&data_, t.len()*sizeof(ScaType))); CUDA_CHECK(hipMemcpy( data_, t.data(), t.len()*sizeof(ScaType), hipMemcpyDeviceToDevice)); for (int i=0; i<3; i++) { dim_[i] = t.dim()[i]; stride_[i] = t.stride()[i]; } update_ptr(); } // Destructor template<typename ScaType, Type MemType> BatchMatrix<ScaType, MemType>::~BatchMatrix() { static_assert(MemType==CUDA, "This function is only for CUDA"); if (data_!=nullptr) CUDA_CHECK(hipFree(data_)); } template<typename ScaType, Type MemType> BatchMatrix<ScaType,MemType>::BatchMatrix(int a, int b, int c) : ptr_(nullptr) { static_assert(MemType==CUDA, "This function is only for CUDA"); // copy dim_[0] = a; dim_[1] = b; dim_[2] = c; // default: column major for each matrix stride_[1] = 1; stride_[2] = dim_[1]; stride_[0] = dim_[1]*dim_[2]; len_ = stride_[0]*dim_[0]; CUDA_CHECK(hipMalloc(&data_, len_*sizeof(ScaType))); update_ptr(); } template<typename ScaType, Type MemType> BatchMatrix<ScaType,MemType>::BatchMatrix(const std::vector<int>& d) : BatchMatrix(d[0], d[1], d[2]) {} template<typename ScaType, Type MemType> void BatchMatrix<ScaType,MemType>::update_ptr() { if (ptr_!=nullptr) CUDA_CHECK(hipFree(ptr_)); CUDA_CHECK(hipMalloc(&ptr_, dim_[0]*sizeof(ScaType*))); std::unique_ptr<ScaType*[]> tmp(new ScaType*[dim_[0]]); for (int i = 0; i < dim_[0]; i++) tmp[i] = data_ + i*stride_[0]; CUDA_CHECK(hipMemcpy(ptr_, tmp.get(), dim_[0]*sizeof(ScaType*), hipMemcpyHostToDevice)); // //!The following code also works, but calling a kernel instead! // inplace_set_inc<<<::floor(dim_[0]/TPB)+1, TPB>>>(reinterpret_cast<std::intptr_t*>(ptr_), // reinterpret_cast<std::intptr_t>(data_), // static_cast<std::intptr_t>(stride_[0]*sizeof(std::intptr_t)), // 1, dim_[0]); } template<typename ScaType, Type MemType> BatchMatrix<ScaType, MemType>* BatchMatrix<ScaType, MemType>::ones(const std::vector<int>& d) { static_assert(MemType==CUDA, "This function is only for CUDA"); auto ret = new BatchMatrix<ScaType, MemType>(d); hipLaunchKernelGGL(( set_to_const), dim3(::floor(ret->len()/TPB)+1), dim3(TPB), 0, 0, ret->data(), static_cast<ScaType>(1), ret->len()); CUDA_CHECK(hipDeviceSynchronize()); return ret; } template<typename ScaType, Type MemType> BatchMatrix<ScaType, MemType>* BatchMatrix<ScaType, MemType>::zeros(const std::vector<int>& d) { static_assert(MemType==CUDA, "This function is only for CUDA"); auto ret = new BatchMatrix<ScaType, MemType>(d); hipLaunchKernelGGL(( set_to_const), dim3(::floor(ret->len()/TPB)+1), dim3(TPB), 0, 0, ret->data(), static_cast<ScaType>(0), ret->len()); return ret; } template<typename ScaType, Type MemType> void BatchMatrix<ScaType, MemType>::add(ScaType v) { hipLaunchKernelGGL(( inplace_add), dim3(::floor(len_/TPB)+1), dim3(TPB), 0, 0, data_, v, len_); } template class BatchMatrix<float, CUDA>; template class BatchMatrix<int, CUDA>; template class BatchMatrix<double, CUDA>; } // namespace cbm
5c587cdc2bed4b1f83902a8ce32e764b16eaea7e.cu
// Include CUDA implementations of BatchMatrix #include <cstdint> #include <memory> #include <cuda.h> #include "Log.hpp" #include "Cuda.hpp" #include "Util_Cuda.hpp" #include "BatchMatrix.hpp" namespace cbm { // Constructors template<typename ScaType, Type MemType> BatchMatrix<ScaType, MemType>::BatchMatrix(const BatchMatrix<ScaType, MemType>& t) : ptr_(nullptr) { static_assert(MemType==CUDA, "This function is only for constructing CUDA from CUDA"); CUDA_CHECK(cudaMalloc(&data_, t.len()*sizeof(ScaType))); CUDA_CHECK(cudaMemcpy( data_, t.data(), t.len()*sizeof(ScaType), cudaMemcpyDeviceToDevice)); for (int i=0; i<3; i++) { dim_[i] = t.dim()[i]; stride_[i] = t.stride()[i]; } update_ptr(); } // Destructor template<typename ScaType, Type MemType> BatchMatrix<ScaType, MemType>::~BatchMatrix() { static_assert(MemType==CUDA, "This function is only for CUDA"); if (data_!=nullptr) CUDA_CHECK(cudaFree(data_)); } template<typename ScaType, Type MemType> BatchMatrix<ScaType,MemType>::BatchMatrix(int a, int b, int c) : ptr_(nullptr) { static_assert(MemType==CUDA, "This function is only for CUDA"); // copy dim_[0] = a; dim_[1] = b; dim_[2] = c; // default: column major for each matrix stride_[1] = 1; stride_[2] = dim_[1]; stride_[0] = dim_[1]*dim_[2]; len_ = stride_[0]*dim_[0]; CUDA_CHECK(cudaMalloc(&data_, len_*sizeof(ScaType))); update_ptr(); } template<typename ScaType, Type MemType> BatchMatrix<ScaType,MemType>::BatchMatrix(const std::vector<int>& d) : BatchMatrix(d[0], d[1], d[2]) {} template<typename ScaType, Type MemType> void BatchMatrix<ScaType,MemType>::update_ptr() { if (ptr_!=nullptr) CUDA_CHECK(cudaFree(ptr_)); CUDA_CHECK(cudaMalloc(&ptr_, dim_[0]*sizeof(ScaType*))); std::unique_ptr<ScaType*[]> tmp(new ScaType*[dim_[0]]); for (int i = 0; i < dim_[0]; i++) tmp[i] = data_ + i*stride_[0]; CUDA_CHECK(cudaMemcpy(ptr_, tmp.get(), dim_[0]*sizeof(ScaType*), cudaMemcpyHostToDevice)); // //!The following code also works, but calling a kernel instead! // inplace_set_inc<<<std::floor(dim_[0]/TPB)+1, TPB>>>(reinterpret_cast<std::intptr_t*>(ptr_), // reinterpret_cast<std::intptr_t>(data_), // static_cast<std::intptr_t>(stride_[0]*sizeof(std::intptr_t)), // 1, dim_[0]); } template<typename ScaType, Type MemType> BatchMatrix<ScaType, MemType>* BatchMatrix<ScaType, MemType>::ones(const std::vector<int>& d) { static_assert(MemType==CUDA, "This function is only for CUDA"); auto ret = new BatchMatrix<ScaType, MemType>(d); set_to_const<<<std::floor(ret->len()/TPB)+1, TPB>>>(ret->data(), static_cast<ScaType>(1), ret->len()); CUDA_CHECK(cudaDeviceSynchronize()); return ret; } template<typename ScaType, Type MemType> BatchMatrix<ScaType, MemType>* BatchMatrix<ScaType, MemType>::zeros(const std::vector<int>& d) { static_assert(MemType==CUDA, "This function is only for CUDA"); auto ret = new BatchMatrix<ScaType, MemType>(d); set_to_const<<<std::floor(ret->len()/TPB)+1, TPB>>>(ret->data(), static_cast<ScaType>(0), ret->len()); return ret; } template<typename ScaType, Type MemType> void BatchMatrix<ScaType, MemType>::add(ScaType v) { inplace_add<<<std::floor(len_/TPB)+1, TPB>>>(data_, v, len_); } template class BatchMatrix<float, CUDA>; template class BatchMatrix<int, CUDA>; template class BatchMatrix<double, CUDA>; } // namespace cbm
2900f5837e67811d64d43387fd8f55113f583dbb.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <math.h> #include <hiprand/hiprand_kernel.h> #include <time.h> #include <unistd.h> #include <thrust/scan.h> #include <thrust/remove.h> #include <thrust/execution_policy.h> #include <iostream> #include "custom_temporary_allocation.cuh" #include "parameter.cuh" using namespace std; typedef hiprandStatePhilox4_32_10_t myCurandState_t; //#define DEBUG #define cudaCheckError() { \ hipError_t e=hipGetLastError(); \ if(e!=hipSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \ exit(0); \ } \ } #define FACTOR 5 #define ITERATIONS 100 #define TOTAL (SIZE * SIZE) #define GRIDSIZE (SIZE+2) #define GRIDTOTAL (SIZE+2)*(SIZE+2) #define SRAND_VALUE 200 #define PENUMBER (TOTAL/PESIZE) #define SAM_NUM_VALUES ((SIZE+2)*(SIZE+2)) #define SAM_PENUMBER (SAM_NUM_VALUES / SAM_PESIZE) const int agentTypeOneNumber = agentNumber / 2; const int agentTypeTwoNumber = agentNumber - agentTypeOneNumber; const int happinessThreshold = 5; void printOutput(int [SIZE+2][SIZE+2]); void initPos(int grid [SIZE+2][SIZE+2]); int random_location(); __device__ static const int FAK_LEN = 1024; // length of factorial table __device__ int hyp_n_last[SAM_PENUMBER], hyp_m_last[SAM_PENUMBER], hyp_N_last[SAM_PENUMBER]; // Last values of parameters __device__ int hyp_mode[SAM_PENUMBER], hyp_mp[SAM_PENUMBER]; // Mode, mode+1 __device__ int hyp_bound[SAM_PENUMBER]; // Safety upper bound __device__ double hyp_a[SAM_PENUMBER]; // hat center __device__ double hyp_h[SAM_PENUMBER]; // hat width __device__ double hyp_fm[SAM_PENUMBER]; // Value at mode __device__ int device_pe_inuse; __device__ int device_num_inuse; __device__ int device_removed_move_list_end; __device__ int device_removed_space_list_end; __device__ int device_penumber_inuse; __device__ int device_reduced_pe_position; __device__ float getnextrand(myCurandState_t *state){ return (hiprand_uniform(state)); } __global__ void initSamCurand(myCurandState_t state[SAM_PENUMBER]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < SAM_PENUMBER){ hiprand_init(idx, 0 , 0, &state[idx]); } } __device__ const double C0 = 0.918938533204672722, // ln(sqrt(2*pi)) C1 = 1./12., C3 = -1./360.; __device__ double fac_table[FAK_LEN]; __device__ int initialized = 0; __device__ double LnFac(int n) { if (n < FAK_LEN) { if (n <= 1) { if (n < 0) printf("Parameter negative in LnFac function\n"); return 0; } if (!initialized) { // first time. Must initialize table // make table of ln(n!) double sum = fac_table[0] = 0.; for (int i=1; i<FAK_LEN; i++) { sum += log(double(i)); fac_table[i] = sum; } initialized = 1; } return fac_table[n]; } // not found in table. use Stirling approximation double n1, r; n1 = n; r = 1. / n1; return (n1 + 0.5)*log(n1) - n1 + C0 + r*(C1 + r*r*C3); //return logf(n); } __device__ double fc_lnpk(int k, int L, int m, int n) { // subfunction used by hypergeometric and Fisher's noncentral hypergeometric distribution return(LnFac(k) + LnFac(m - k) + LnFac(n - k) + LnFac(L + k)); } __device__ int HypInversionMod (myCurandState_t stateHyper[SAM_PENUMBER],int n, int m, int N, int idx) { /* Subfunction for Hypergeometric distribution. Assumes 0 <= n <= m <= N/2. Overflow protection is needed when N > 680 or n > 75. Hypergeometric distribution by inversion method, using down-up search starting at the mode using the chop-down technique. This method is faster than the rejection method when the variance is low. */ //int idx = threadIdx.x + blockIdx.x * blockDim.x; // Sampling int I; // Loop counter int L = N - m - n; // Parameter double modef; // mode, float double Mp, np; // m + 1, n + 1 double p; // temporary double U; // uniform random double c, d; // factors in iteration double divisor; // divisor, eliminated by scaling double k1, k2; // float version of loop counter double L1 = L; // float version of L Mp = (double)(m + 1); np = (double)(n + 1); if (N != hyp_N_last[idx] || m != hyp_m_last[idx] || n != hyp_n_last[idx]) { // set-up when parameters have changed hyp_N_last[idx] = N; hyp_m_last[idx] = m; hyp_n_last[idx] = n; p = Mp / (N + 2.); modef = np * p; // mode, real hyp_mode[idx] = (int)modef; // mode, integer if (hyp_mode[idx] == modef && p == 0.5) { hyp_mp[idx] = hyp_mode[idx]--; } else { hyp_mp[idx] = hyp_mode[idx] + 1; } // mode probability, using log factorial function // (may read directly from fac_table if N < FAK_LEN) hyp_fm[idx] = exp(LnFac(N-m) - LnFac(L+hyp_mode[idx]) - LnFac(n-hyp_mode[idx]) + LnFac(m) - LnFac(m-hyp_mode[idx]) - LnFac(hyp_mode[idx]) - LnFac(N) + LnFac(N-n) + LnFac(n) ); // safety bound - guarantees at least 17 significant decimal digits // bound = min(n, (int)(modef + k*c')) hyp_bound[idx] = (int)(modef + 11. * sqrt(modef * (1.-p) * (1.-n/(double)N)+1.)); if (hyp_bound[idx] > n) hyp_bound[idx] = n; } // loop until accepted //int max_iterations = 1000; while(1) { // if(!(max_iterations--)) // break; U = getnextrand(&stateHyper[idx]); // uniform random number to be converted //printf(" U is %lf\n",U); // start chop-down search at mode if ((U -= hyp_fm[idx]) <= 0.) return(hyp_mode[idx]); c = d = hyp_fm[idx]; // alternating down- and upward search from the mode k1 = hyp_mp[idx] - 1; k2 = hyp_mode[idx] + 1; for (I = 1; I <= hyp_mode[idx]; I++, k1--, k2++) { // if(!(max_iterations--)) // break; // Downward search from k1 = hyp_mp - 1 divisor = (np - k1)*(Mp - k1); // Instead of dividing c with divisor, we multiply U and d because // multiplication is faster. This will give overflow if N > 800 U *= divisor; d *= divisor; c *= k1 * (L1 + k1); if ((U -= c) <= 0.) return(hyp_mp[idx] - I - 1); // = k1 - 1 //printf("Line 228 I %d \n",I); // Upward search from k2 = hyp_mode + 1 divisor = k2 * (L1 + k2); // re-scale parameters to avoid time-consuming division U *= divisor; c *= divisor; d *= (np - k2) * (Mp - k2); if ((U -= d) <= 0.) return(hyp_mode[idx] + I); // = k2 // Values of n > 75 or N > 680 may give overflow if you leave out this.. // overflow protection // if (U > 1.E100) {U *= 1.E-100; c *= 1.E-100; d *= 1.E-100;} } // Upward search from k2 = 2*mode + 1 to bound for (k2 = I = hyp_mp[idx] + hyp_mode[idx]; I <= hyp_bound[idx]; I++, k2++) { //if(!(max_iterations--)) // break; divisor = k2 * (L1 + k2); U *= divisor; d *= (np - k2) * (Mp - k2); if ((U -= d) <= 0.) return(I); // more overflow protection // if (U > 1.E100) {U *= 1.E-100; d *= 1.E-100;} } } } __device__ int HypRatioOfUnifoms (myCurandState_t stateHyper[SAM_PENUMBER], int n, int m, int N, int idx) { /* Subfunction for Hypergeometric distribution using the ratio-of-uniforms rejection method. This code is valid for 0 < n <= m <= N/2. The computation time hardly depends on the parameters, except that it matters a lot whether parameters are within the range where the LnFac function is tabulated. Reference: E. Stadlober: "The ratio of uniforms approach for generating discrete random variates". Journal of Computational and Applied Mathematics, vol. 31, no. 1, 1990, pp. 181-189. */ //int idx = threadIdx.x + blockIdx.x * blockDim.x; const double SHAT1 = 2.943035529371538573; // 8/e const double SHAT2 = 0.8989161620588987408; // 3-sqrt(12/e) int L; // N-m-n int mode; // mode int k; // integer sample double x; // real sample double rNN; // 1/(N*(N+2)) double my; // mean double var; // variance double u; // uniform random double lf; // ln(f(x)) L = N - m - n; if (hyp_N_last[idx] != N || hyp_m_last[idx] != m || hyp_n_last[idx] != n) { hyp_N_last[idx] = N; hyp_m_last[idx] = m; hyp_n_last[idx] = n; // Set-up rNN = 1. / ((double)N*(N+2)); // make two divisions in one my = (double)n * m * rNN * (N+2); // mean = n*m/N mode = (int)(double(n+1) * double(m+1) * rNN * N); // mode = floor((n+1)*(m+1)/(N+2)) var = (double)n * m * (N-m) * (N-n) / ((double)N*N*(N-1));// variance hyp_h[idx] = sqrt(SHAT1 * (var+0.5)) + SHAT2; // hat width hyp_a[idx] = my + 0.5; // hat center hyp_fm[idx] = fc_lnpk(mode, L, m, n); // maximum hyp_bound[idx] = (int)(hyp_a[idx] + 4.0 * hyp_h[idx]); // safety-bound if (hyp_bound[idx] > n) hyp_bound[idx] = n; } while(1) { u = getnextrand(&stateHyper[idx]); // uniform random number if (u == 0) continue; // avoid division by 0 x = hyp_a[idx] + hyp_h[idx] * (getnextrand(&stateHyper[idx])-0.5) / u; // generate hat distribution if (x < 0. || x > 2E9) continue; // reject, avoid overflow k = (int)x; if (k > hyp_bound[idx]) continue; // reject if outside range lf = hyp_fm[idx] - fc_lnpk(k,L,m,n); // ln(f(k)) if (u * (4.0 - u) - 3.0 <= lf) break; // lower squeeze accept if (u * (u-lf) > 1.0) continue; // upper squeeze reject if (2.0 * log(u) <= lf) break; // final acceptance } return k; } __device__ int Hypergeometric (myCurandState_t stateHyper[SAM_PENUMBER], int n, int m, int N, int idx) { /* This function generates a random variate with the hypergeometric distribution. This is the distribution you get when drawing balls without replacement from an urn with two colors. n is the number of balls you take, m is the number of red balls in the urn, N is the total number of balls in the urn, and the return value is the number of red balls you get. This function uses inversion by chop-down search from the mode when parameters are small, and the ratio-of-uniforms method when the former method would be too slow or would give overflow. */ int fak, addd; // used for undoing transformations int x; // result hyp_n_last[idx] = hyp_m_last[idx] = hyp_N_last[idx] = -1; // Last values of hypergeometric parameters // check if parameters are valid if (n > N || m > N || n < 0 || m < 0) { printf("Parameter out of range in hypergeometric function n %ld m %ld N %ld idx %d\n",n,m,N,idx); printf("Parameter out of range in hypergeometric function %d,%d,%d,%d\n", n > N, m > N, n < 0, m < 0); return 0; } // symmetry transformations fak = 1; addd = 0; if (m > N/2) { // invert m m = N - m; fak = -1; addd = n; } if (n > N/2) { // invert n n = N - n; addd += fak * m; fak = - fak; } if (n > m) { // swap n and m x = n; n = m; m = x; } // cases with only one possible result end here if (n == 0) return addd; //------------------------------------------------------------------ // choose method //------------------------------------------------------------------ if (N > 680 || n > 70) { // use ratio-of-uniforms method x = HypRatioOfUnifoms (stateHyper, n, m, N,idx); } else { // inversion method, using chop-down search from mode x = HypInversionMod (stateHyper, n, m, N,idx); } // undo symmetry transformations return x * fak + addd; } __global__ void clearSamples(int samples[SAM_NUM_VALUES]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < (SAM_NUM_VALUES)){ samples[idx] = 0; } } __device__ void methodA(myCurandState_t state[SAM_PENUMBER],int N, int n, int num_sample, int initialTocurrent,int device_list[SAM_NUM_VALUES],int samples[SAM_NUM_VALUES]) { //ASSERT_LEQ(n, N); int idx = threadIdx.x + blockIdx.x * blockDim.x; // Initialization int sample = 0; double Nreal = (double) N; double top = Nreal - n; // Main loop while (n >= 2) { int S = 0; double V = getnextrand(&state[idx]); double quot = top / Nreal; while (quot > V) { S++; top -= 1.0; Nreal -= 1.0; quot = (quot * top) / Nreal; } // Skip over next S records and select the following one sample += S + 1; //samples[idx][num_sample++] = sample + initialTocurrent; samples[idx*SAM_PESIZE + num_sample++] = device_list[idx*SAM_PESIZE + sample + initialTocurrent-1]; //callback(sample); Nreal -= 1.0; n--; } if (n == 1) { int S = round(Nreal) * getnextrand(&state[idx]); sample += S + 1; //samples[idx][num_sample++] = sample + initialTocurrent; samples[idx*SAM_PESIZE + num_sample++] = device_list[idx*SAM_PESIZE + sample + initialTocurrent-1]; //callback(sample); } } // Sampling method D from Vitter et al. // // \param N Size of population. // \param n Number of samples. // \param gen Uniform random variate generator. // \param samples Function to process sample. // __device__ void sample(myCurandState_t state[SAM_PENUMBER], int N, int n, int device_list[SAM_NUM_VALUES], int samples[SAM_NUM_VALUES]) { //ASSERT_LEQ(n, N); int idx = threadIdx.x + blockIdx.x * blockDim.x; int initialN = N; // Initialization int sample = 0; int num_sample = 0; double nreal = (double) n; double ninv = 1.0 / nreal; double Nreal = (double) N; double Vprime = exp(log(getnextrand(&state[idx])) * ninv); int qu1 = N + 1 - n; double qu1real = Nreal + 1.0 - nreal; int negalphainv = -13; int threshold = n * (-negalphainv); int S = 0; // Main loop while (n > 1 && threshold < N) { double nmin1inv = 1.0 / (nreal - 1.0); double negSreal = 0.0; while (true) { // Step D2: Generate U and X double X; while (true) { X = Nreal * (1.0 - Vprime); S = X; if (S < qu1) break; Vprime = exp(log(getnextrand(&state[idx])) * ninv); } double U = getnextrand(&state[idx]); negSreal = -(double)S; // Step D3: Accept? double y1 = exp(log(U * Nreal / qu1real) * nmin1inv); Vprime = y1 * (-X / Nreal + 1.0) * (qu1real / (negSreal + qu1real)); if (Vprime <= 1.0) break; // Accept! // Step D4: Accept? double y2 = 1.0; double top = Nreal - 1.0; double bottom; double limit; if (n - 1 > S) { bottom = Nreal - nreal; limit = N - S; } else { bottom = negSreal + Nreal - 1.0; limit = qu1; } for (int t = N; t > limit; t--) { y2 = (y2 * top) / bottom; top -= 1.0; bottom -= 1.0; } if (Nreal / (Nreal - X) >= y1 * exp(log(y2) * nmin1inv)) { // Accept! Vprime = exp(log(getnextrand(&state[idx])) * nmin1inv); break; } Vprime = exp(log(getnextrand(&state[idx])) * ninv); } // Skip over next S records and select the following one sample += S + 1; //samples[idx][num_sample++] = sample; samples[idx*SAM_PESIZE + num_sample++] = device_list[idx*SAM_PESIZE +sample-1]; //callback(sample); N = (N - 1) - S; Nreal = (Nreal - 1.0) + negSreal; n--; nreal -= 1.0; ninv = nmin1inv; qu1 -= S; qu1real += negSreal; threshold += negalphainv; } if (n > 1) { int currentN = N; methodA(state, N, n, num_sample, initialN - currentN, device_list,samples); //samples[num_sample++] = sample + initialN - currentN; //methodA(N, n, [&](int sample) { // callback(sample + initialN - currentN); //}); } else if (n == 1) { S = N * Vprime; // Skip over next S records and select the following one sample += S + 1; //samples[idx][num_sample++] = sample; samples[idx*SAM_PESIZE + num_sample++] = device_list[idx*SAM_PESIZE +sample-1]; //callback(sample); } } __global__ void sampleP(myCurandState_t state[SAM_PENUMBER], myCurandState_t stateHyper[SAM_PENUMBER], int device_list[SAM_NUM_VALUES],int samples[SAM_NUM_VALUES], int n, int j, int k) { int idx = threadIdx.x + blockIdx.x * blockDim.x; //idx += 1; if(idx < device_pe_inuse){ int seed = 1; //int counter = 0; int m,x; while(j - k != 0) { hiprand_init(seed, 0 , 0, &stateHyper[idx]); m = floor( (j+k)/2.0 ); //printf("sampleP1 n %d idx %d m %d\n",n,idx,m); //__device__ int Hypergeometric (hiprandState_t stateHyper[PENUMBER], //int n, int m, int N, int idx) { /* This function generates a random variate with the hypergeometric distribution. This is the distribution you get when drawing balls without replacement from an urn with two colors. n is the number of balls you take, m is the number of red balls in the urn, N is the total number of balls in the urn, and the return value is the number of red balls you get. */ //printf("would call Hypergeometric(stateHyper, %d, %d, %d, %d)\n", n, (m-j)*PESIZE + 1, (k-j)*PESIZE + 1, idx); //printf("j is now %d, k is %d, m is %d, sums are %d and %d\n", j, k, m, k - (j - 1), m - (j - 1)); if(k != device_pe_inuse - 1){ x = Hypergeometric(stateHyper, n, (m-(j-1))*SAM_PESIZE, (k-(j-1))*SAM_PESIZE, idx); } else{ x = Hypergeometric(stateHyper, n, (m-(j-1))*SAM_PESIZE, ((k-1)-(j-1))*SAM_PESIZE + device_num_inuse % SAM_PESIZE, idx); } //printf("sampleP2 n %d idx %d x %d\n",n,idx,x); //int x = m; if(idx <= m) { n = x; k = m; seed = seed * 2; } else { n = n-x; j = m + 1; seed = seed * 2 + 1; } } //printf("sample n %d \n",n); if(idx != device_pe_inuse - 1 ) { //printf("idx %d sampling %d values\n", idx, n); sample(state, SAM_PESIZE, n, device_list, samples); } else { //printf("n > PESIZE %d \n",n); sample(state, device_num_inuse % SAM_PESIZE, n, device_list, samples); } /*if(n <= PESIZE ) { //printf("idx %d sampling %d values\n", idx, n); sample(state, PESIZE, n, device_list, samples); } else { printf("n > PESIZE %d \n",n); }*/ } } //__global__ void print_device_reduced_pe_position(){ //printf("reduced_pe_position %d \n",( int( 0.5 + ceil((float)device_reduced_pe_position / (PESIZE) )) ) ); //printf("device_reduced_pe_position %d \n",(device_reduced_pe_position ) ); //} __global__ void initCurand(myCurandState_t state[][PESIZE]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; if(idx < PENUMBER && idy<PESIZE){ hiprand_init(idx*(PESIZE)+idy,0 , 0, &state[idx][idy]); } } __global__ void compute(int grid[][SIZE+2], int new_grid[][SIZE+2], int * move_list, int * space_list, int iteration){ int idx=blockIdx.x*blockDim.x+threadIdx.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; int sameTypeCount=0; int current_id = idx*(SIZE+2)+idy; if(grid[idx][idy] != 0){ int currentType = grid[idx][idy]; if(grid[idx-1][idy-1] == currentType){ sameTypeCount += 1; } if(grid[idx-1][idy] == currentType){ sameTypeCount += 1; } if(grid[idx-1][idy+1] == currentType){ sameTypeCount += 1; } if(grid[idx][idy-1] == currentType){ sameTypeCount += 1; } if(grid[idx][idy+1] == currentType){ sameTypeCount += 1; } if(grid[idx+1][idy-1] == currentType){ sameTypeCount += 1; } if(grid[idx+1][idy] == currentType){ sameTypeCount += 1; } if(grid[idx+1][idy+1] == currentType){ sameTypeCount += 1; } if(sameTypeCount < happinessThreshold){ move_list[current_id] = current_id; space_list[current_id] = current_id; } } else if(idx != 0 && idy !=0 && idx != (SIZE+1) && idy != (SIZE+1) ){ space_list[current_id] = current_id; } } __global__ void update (int grid[][SIZE+2], int new_grid[][SIZE+2], int * move_list, int * space_list){ int idx=blockIdx.x*blockDim.x+threadIdx.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; grid[idy][idx] = new_grid[idy][idx]; move_list[idx*(SIZE+2)+idy] = 0; space_list[idx*(SIZE+2)+idy] = 0; } __global__ void sendToRandomPerpe(myCurandState_t state[][PESIZE],int device_list[SAM_NUM_VALUES], int temp_device_list[][PESIZE*FACTOR],int random_list_counter[PENUMBER]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < device_penumber_inuse -1 ){ for(int i=0; i < PESIZE; i++ ){ float r = getnextrand(&state[idx][0]); int random_position = r * (device_penumber_inuse-1); int acquired_position = atomicAdd(&random_list_counter[random_position],1); temp_device_list[random_position][acquired_position] = device_list[idx*PESIZE+i]; } } else if(idx == device_penumber_inuse - 1 ){ for(int i=0; i < device_removed_move_list_end % PESIZE; i++ ){ float r = getnextrand(&state[idx][0]); int random_position = r * (device_penumber_inuse-1); int acquired_position = atomicAdd(&random_list_counter[random_position],1); temp_device_list[random_position][acquired_position] = device_list[idx*PESIZE+i]; } } } __global__ void sendToRandom(myCurandState_t state[][PESIZE],int device_list[SAM_NUM_VALUES], int temp_device_list[][PESIZE*FACTOR],int random_list_counter[PENUMBER]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if(idx*PESIZE +idy < device_removed_move_list_end ){ float r = getnextrand(&state[idx][idy]); int random_position = r * (device_penumber_inuse-1); int acquired_position = atomicAdd(&random_list_counter[random_position],1); temp_device_list[random_position][acquired_position] = device_list[idx*PESIZE+idy]; } } __global__ void clearCounter(int random_list_counter[PENUMBER]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < device_penumber_inuse){ random_list_counter[idx] = 0; } } __global__ void generateList(int device_list[][PESIZE]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if(idx*PESIZE +idy < device_removed_space_list_end ){ device_list[idx][idy] = idx*PESIZE +idy; } } static __device__ void swap(int *data, int x, int y) { int temp = data[x]; data[x] = data[y]; data[y] = temp; } static __device__ int partition(int *data, int left, int right) { const int mid = left + (right - left) / 2; const int pivot = data[(mid)]; swap(data, (mid), (left)); int i = left + 1; int j = right; while (i <= j) { while (i <= j && data[(i)] <= pivot) { i++; } while (i <= j && data[(j)] > pivot) { j--; } if (i < j) { swap(data, (i), (j)); } } swap(data, (i - 1), (left)); return i - 1; } typedef struct sort_data { int left; int right; } sort_data; __device__ void quicksort_seq(int *data, int right) { int left = 0; if(left == right) return; if (left > right) { right = 1 + right; } int stack_size = 0; sort_data stack[PESIZE*FACTOR]; stack[stack_size++] = { left, right }; while (stack_size > 0) { int curr_left = stack[stack_size - 1].left; int curr_right = stack[stack_size - 1].right; stack_size--; if (curr_left < curr_right) { int part = partition(data, curr_left, curr_right); stack[stack_size++] = {curr_left, part - 1}; stack[stack_size++] = {part + 1, curr_right}; } } } __global__ void sortList(int temp_device_list[][PESIZE*FACTOR], int random_list_counter[PENUMBER]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < device_penumber_inuse){ int number = random_list_counter[idx]; if(number != 0){ quicksort_seq(temp_device_list[idx], number - 1 ); } } } __global__ void randomPermute(myCurandState_t state[][PESIZE], int temp_device_list[][PESIZE*FACTOR], int random_list_counter[PENUMBER]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; int reduced_pe = device_penumber_inuse; if(idx < reduced_pe){ for (int i = 0; i < random_list_counter[idx]; i++){ float r = getnextrand(&state[idx][0]); int j = r * (random_list_counter[idx]-1); int temp = temp_device_list[idx][i] ; temp_device_list[idx][i] = temp_device_list[idx][j] ; temp_device_list[idx][j] = temp; } } } __global__ void recoverSize(int device_list[][PESIZE], int temp_device_list[][PESIZE*FACTOR],int random_list_counter[PENUMBER], int scanned_random_list_counter[PENUMBER]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; int reduced_pe = device_penumber_inuse; if(idx < reduced_pe){ int delta = scanned_random_list_counter[idx]; for(int i=0; i<random_list_counter[idx]; i++){ int addValue = delta + i; int interResult = device_penumber_inuse*addValue/(PESIZE*device_penumber_inuse); device_list[interResult][(delta- (PESIZE*device_penumber_inuse/device_penumber_inuse)*interResult + i)] = temp_device_list[idx][i]; } } } struct smaller_than { __device__ bool operator()(const int x) { return (x < device_removed_space_list_end) == 0; } }; struct greater_than { __device__ bool operator()(int x) { return x > device_removed_move_list_end; } }; __global__ void printTempList(int temp_device_list[][PESIZE*FACTOR], int random_list_counter[PENUMBER]){ for(int i =0; i<device_penumber_inuse; i++){ for(int j=0; j<random_list_counter[i];j++){ printf("%d ",temp_device_list[i][j]); } printf("\n"); } } __global__ void printList(int * list,int *removed_list_end){ printf( "SIZE %d \n",removed_list_end - list) ; for(int i=0; i<removed_list_end - list; i++){ printf("%d ",list[i]); } printf("\n"); } __global__ void printListPre(int * list){ printf( "SIZE %d \n",device_removed_space_list_end) ; for(int i=0; i<device_removed_space_list_end; i++){ printf("%d ",list[i]); } printf("\n"); } __global__ void prepareNewGrid (int new_grid[][SIZE+2], int * move_list, int permutation[][PESIZE]){ int idx=blockIdx.x*blockDim.x+threadIdx.x; if(idx<device_removed_move_list_end){ int idxTox = idx / PESIZE; int idxToy = idx % PESIZE; int agent_position = permutation[idxTox][idxToy]; new_grid[agent_position/(SIZE+2)][agent_position%(SIZE+2)] = 0; } } __global__ void assign (int grid[][SIZE+2], int new_grid[][SIZE+2], int permutation[][PESIZE], int * move_list, int * space_list, int samples[SAM_NUM_VALUES]){ int idx=blockIdx.x*blockDim.x+threadIdx.x; if(idx < (device_removed_move_list_end) ){ int idxTox = idx / PESIZE; int idxToy = idx % PESIZE; int space_position = space_list[samples[idx]-1]; int agent_position = permutation[idxTox][idxToy]; new_grid[space_position/(SIZE+2)][space_position%(SIZE+2)] = grid[agent_position/(SIZE+2)][agent_position%(SIZE+2)]; } } __global__ void checkNumberDevice(int new_grid[][SIZE+2]){ int agentTypeOne = 0; int agentTypeTwo = 0; for(int i=0; i<SIZE+2; i++){ for(int j=0; j<SIZE+2; j++){ if(new_grid[i][j] == 1){ agentTypeOne +=1; } else if(new_grid[i][j] == 2){ agentTypeTwo += 1; } } } printf("Type One %d, Type Two %d\n",agentTypeOne, agentTypeTwo); } void checkNumber(int grid [SIZE+2][SIZE+2]){ int agentTypeOne = 0; int agentTypeTwo = 0; for(int i=0; i<SIZE+2; i++){ for(int j=0; j<SIZE+2; j++){ if(grid[i][j] == 1){ agentTypeOne +=1; } else if(grid[i][j] == 2){ agentTypeTwo += 1; } } } printf("Type One %d, Type Two %d\n",agentTypeOne, agentTypeTwo); } __global__ void devicePrintOutput(int device_list[][PESIZE]){ for(int i =0; i<device_penumber_inuse; i++){ //for(int j=0; j<random_list_counter[i];j++){ // printf("%d \n",i); for(int j=0; j<PESIZE;j++){ //printf("PE %d, index %d, value %d\n", i, j, device_list[i][j]); printf("%d ",device_list[i][j]); } printf("\n"); } } __global__ void initSamValue(int device_list[SAM_NUM_VALUES]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; device_list[idx] = idx+1; } __global__ void printSamples(int samples[SAM_NUM_VALUES]){ for(int i=0; i<(device_removed_move_list_end); i++){ printf("%d %d \n",i,samples[i]); } } __global__ void printSamValue(int device_sam_list[SAM_NUM_VALUES]){ for(int i=0; i<(device_pe_inuse*SAM_PESIZE); i++){ printf("%d ",device_sam_list[i]); } } int host_grid[SIZE+2][SIZE+2]; int main(int argc, char* argv[]) { //Initialization struct timespec start, stop; double accum; int (*device_grid)[SIZE + 2]; int (*device_newGrid)[SIZE + 2]; int (*device_permutation_list)[PESIZE]; int (*device_temp_permutation_list)[PESIZE*FACTOR]; int (*random_list_counter); int (*scanned_random_list_counter); int (*move_list); int (*removed_move_list_end); int (*space_list); int (*removed_space_list_end); int (*samples); int (*device_sam_list); srand(SRAND_VALUE); size_t bytes = sizeof(int)*(SIZE + 2)*(SIZE + 2); myCurandState_t (*devState)[PESIZE]; myCurandState_t (*devStateHyper); myCurandState_t (*devStateSam); hipMalloc((void**)&devState, TOTAL * sizeof(myCurandState_t)); hipMalloc(&random_list_counter, sizeof(int)*(PENUMBER)); hipMalloc(&scanned_random_list_counter, sizeof(int)*(PENUMBER)); hipMalloc(&device_sam_list, sizeof(int)*(SAM_PESIZE)*(SAM_PENUMBER)); hipMalloc((void**)&device_grid, bytes); hipMalloc((void**)&device_newGrid, bytes); hipMalloc((void**)&device_permutation_list, sizeof(int)*(TOTAL)); hipMalloc((void**)&device_temp_permutation_list, sizeof(int)*(agentNumber)*FACTOR); hipMalloc(&move_list, sizeof(int)*(SIZE + 2)*(SIZE + 2)); hipMalloc(&space_list, sizeof(int)*(SIZE + 2)*(SIZE + 2)); hipMalloc(&samples, sizeof(int)*(SAM_PESIZE)*(SAM_PENUMBER)); hipMalloc(&devStateHyper, SAM_PENUMBER * sizeof(myCurandState_t)); hipMalloc(&devStateSam, SAM_PENUMBER * sizeof(myCurandState_t)); #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif int blockSizeVerPermu = numThreadsPerBlock / PESIZE; dim3 blockSizePermu(blockSizeVerPermu, PESIZE, 1); hipLaunchKernelGGL(( initCurand), dim3((ceil(TOTAL/double(numThreadsPerBlock)))),dim3(blockSizePermu), 0, 0, devState); #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif for (int i=0; i<(SIZE+2); i++){ for (int j=0; j<SIZE+2; j++){ host_grid[i][j] = 0; } } int blockSizePerDim = sqrt(numThreadsPerBlock); int gridSizePerDim = (SIZE + 2) / blockSizePerDim; dim3 blockSize(blockSizePerDim, blockSizePerDim, 1); dim3 gridSize(gridSizePerDim, gridSizePerDim, 1); initPos(host_grid); //printOutput(host_grid); #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif hipMemcpy(device_grid,host_grid,bytes,hipMemcpyHostToDevice); hipMemcpy(device_newGrid,host_grid,bytes,hipMemcpyHostToDevice); #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif hipLaunchKernelGGL(( initSamCurand), dim3(((double)SAM_PENUMBER / SAM_numThreadsPerBlock)),dim3(SAM_numThreadsPerBlock), 0, 0, devStateSam); #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif update << <gridSize, blockSize >> >(device_grid, device_newGrid,move_list,space_list); if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) { perror( "clock gettime" ); exit( EXIT_FAILURE ); } cached_allocator alloc; int removed_list_number = 0; int space_list_number = 0; for(int i=0; i<ITERATIONS; i++){ //Simulation cycles #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif //Compute Happiness compute << <gridSize, blockSize >> >(device_grid, device_newGrid, move_list, space_list, i); #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif //Remove 0 to form a list of moveable agents removed_move_list_end = thrust::remove(thrust::hip::par(alloc), move_list, move_list + ((SIZE+2)*(SIZE+2)), 0); #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif removed_list_number = removed_move_list_end - move_list; hipMemcpyToSymbol(device_removed_move_list_end, &removed_list_number, sizeof(int)); int TwoDimGridSize = ceil(removed_list_number/double(numThreadsPerBlock)); #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif //Remove 0 to form a list of available cells removed_space_list_end = thrust::remove(thrust::hip::par(alloc), space_list, space_list + ((SIZE+2)*(SIZE+2)), 0); space_list_number = removed_space_list_end - space_list; #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif hipMemcpyToSymbol(device_removed_space_list_end, &space_list_number, sizeof(int)); int penumberinuse = ceil(removed_list_number/ double(PESIZE)); hipMemcpyToSymbol(device_penumber_inuse, &penumberinuse, sizeof(int)); hipLaunchKernelGGL(( generateList), dim3(ceil(space_list_number/double(numThreadsPerBlock))),dim3(blockSizePermu), 0, 0, device_permutation_list); int sam_num_inuse = space_list_number; int sam_pe_inuse = ceil(double(sam_num_inuse) / SAM_PESIZE); hipMemcpyToSymbol(device_pe_inuse, &sam_pe_inuse, sizeof(int)); hipMemcpyToSymbol(device_num_inuse, &sam_num_inuse, sizeof(int)); hipLaunchKernelGGL(( clearSamples), dim3(ceil(sam_pe_inuse*SAM_PESIZE / (double)SAM_numThreadsPerBlock)), dim3(SAM_numThreadsPerBlock), 0, 0, samples); #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif int sam_gridSize = ceil((double)sam_pe_inuse / SAM_numThreadsPerBlock); hipLaunchKernelGGL(( initSamValue), dim3(ceil(double(sam_num_inuse) / SAM_numThreadsPerBlock)), dim3(SAM_numThreadsPerBlock), 0, 0, device_sam_list); #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif //Perfrom sampling hipLaunchKernelGGL(( sampleP), dim3(sam_gridSize), dim3(SAM_numThreadsPerBlock), 0, 0, devStateSam, devStateHyper, device_sam_list, samples, removed_list_number, 0, sam_pe_inuse-1); #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif int OneDimGridSize = ceil(penumberinuse / double(numThreadsPerBlock)); hipLaunchKernelGGL(( clearCounter), dim3(OneDimGridSize),dim3((numThreadsPerBlock)), 0, 0, random_list_counter); #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif //Perfrom permutation hipLaunchKernelGGL(( sendToRandom), dim3(ceil(removed_list_number/double(numThreadsPerBlock))),dim3(blockSizePermu) , 0, 0, devState,move_list,device_temp_permutation_list,random_list_counter); #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif hipLaunchKernelGGL(( sortList), dim3(OneDimGridSize),dim3((numThreadsPerBlock)), 0, 0, device_temp_permutation_list,random_list_counter); #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif thrust::exclusive_scan(thrust::hip::par(alloc), random_list_counter, random_list_counter + penumberinuse, scanned_random_list_counter); hipLaunchKernelGGL(( randomPermute), dim3(OneDimGridSize),dim3((numThreadsPerBlock)), 0, 0, devState,device_temp_permutation_list,random_list_counter); #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif hipLaunchKernelGGL(( recoverSize), dim3(OneDimGridSize),dim3((numThreadsPerBlock)), 0, 0, device_permutation_list, device_temp_permutation_list,random_list_counter,scanned_random_list_counter); thrust::remove(thrust::device, samples, samples + sam_pe_inuse*SAM_PESIZE , 0); #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif hipLaunchKernelGGL(( prepareNewGrid) , dim3(TwoDimGridSize), dim3(numThreadsPerBlock) , 0, 0, device_newGrid, move_list,device_permutation_list); #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif //Mapping hipLaunchKernelGGL(( assign) , dim3(TwoDimGridSize), dim3(numThreadsPerBlock), 0, 0, device_grid, device_newGrid, device_permutation_list, move_list, space_list,samples); #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif update << <gridSize, blockSize >> >(device_grid, device_newGrid,move_list,space_list); #ifdef DEBUG hipDeviceSynchronize(); cudaCheckError(); #endif } //End Timing if( clock_gettime( CLOCK_REALTIME, &stop) == -1 ) { perror( "clock gettime" ); exit( EXIT_FAILURE ); } accum = ( stop.tv_sec - start.tv_sec ) * 1e6 + ( stop.tv_nsec - start.tv_nsec ) / 1e3; printf( "%.1f Time is %.5f s \n",float(OCCUPANCY), accum / 1e6); hipMemcpy(host_grid, device_newGrid, bytes, hipMemcpyDeviceToHost); //printOutput(host_grid); //checkNumber(host_grid); hipFree(device_grid); hipFree(device_newGrid); hipFree(device_permutation_list); hipFree(device_temp_permutation_list); hipFree(move_list); hipFree(random_list_counter); hipFree(scanned_random_list_counter); hipFree(space_list); hipFree(devState); hipFree(samples); hipFree(devStateSam); hipFree(devStateHyper); hipFree(device_sam_list); return 0; } void printOutput(int grid [SIZE+2][SIZE+2] ){ //output grid from 1 t o SIZE+1 for (int i=1; i<SIZE+1; i++){ for (int j=1; j<SIZE+1; j++){ printf("%d ",grid[i][j]); //if(i%SIZE) } printf("\n"); } printf("\n"); } void initPos(int grid [SIZE+2][SIZE+2]){ // type 1 and 2 to grid randomly int row; int column; for(int i=0; i<agentTypeOneNumber; i++){ do{ row = random_location(); column = random_location(); }while(grid[row][column] != 0); grid[row][column] = 1; } for(int i=0; i<agentTypeTwoNumber; i++){ do{ row = random_location(); column = random_location(); }while(grid[row][column] != 0); grid[row][column] = 2; } } int random_location() { //generate a random number from 1 to SIZE+1 int r; r = rand(); return (r % (SIZE) +1 ); }
2900f5837e67811d64d43387fd8f55113f583dbb.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <curand.h> #include <math.h> #include <curand_kernel.h> #include <time.h> #include <unistd.h> #include <thrust/scan.h> #include <thrust/remove.h> #include <thrust/execution_policy.h> #include <iostream> #include "custom_temporary_allocation.cuh" #include "parameter.cuh" using namespace std; typedef curandStatePhilox4_32_10_t myCurandState_t; //#define DEBUG #define cudaCheckError() { \ cudaError_t e=cudaGetLastError(); \ if(e!=cudaSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \ exit(0); \ } \ } #define FACTOR 5 #define ITERATIONS 100 #define TOTAL (SIZE * SIZE) #define GRIDSIZE (SIZE+2) #define GRIDTOTAL (SIZE+2)*(SIZE+2) #define SRAND_VALUE 200 #define PENUMBER (TOTAL/PESIZE) #define SAM_NUM_VALUES ((SIZE+2)*(SIZE+2)) #define SAM_PENUMBER (SAM_NUM_VALUES / SAM_PESIZE) const int agentTypeOneNumber = agentNumber / 2; const int agentTypeTwoNumber = agentNumber - agentTypeOneNumber; const int happinessThreshold = 5; void printOutput(int [SIZE+2][SIZE+2]); void initPos(int grid [SIZE+2][SIZE+2]); int random_location(); __device__ static const int FAK_LEN = 1024; // length of factorial table __device__ int hyp_n_last[SAM_PENUMBER], hyp_m_last[SAM_PENUMBER], hyp_N_last[SAM_PENUMBER]; // Last values of parameters __device__ int hyp_mode[SAM_PENUMBER], hyp_mp[SAM_PENUMBER]; // Mode, mode+1 __device__ int hyp_bound[SAM_PENUMBER]; // Safety upper bound __device__ double hyp_a[SAM_PENUMBER]; // hat center __device__ double hyp_h[SAM_PENUMBER]; // hat width __device__ double hyp_fm[SAM_PENUMBER]; // Value at mode __device__ int device_pe_inuse; __device__ int device_num_inuse; __device__ int device_removed_move_list_end; __device__ int device_removed_space_list_end; __device__ int device_penumber_inuse; __device__ int device_reduced_pe_position; __device__ float getnextrand(myCurandState_t *state){ return (curand_uniform(state)); } __global__ void initSamCurand(myCurandState_t state[SAM_PENUMBER]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < SAM_PENUMBER){ curand_init(idx, 0 , 0, &state[idx]); } } __device__ const double C0 = 0.918938533204672722, // ln(sqrt(2*pi)) C1 = 1./12., C3 = -1./360.; __device__ double fac_table[FAK_LEN]; __device__ int initialized = 0; __device__ double LnFac(int n) { if (n < FAK_LEN) { if (n <= 1) { if (n < 0) printf("Parameter negative in LnFac function\n"); return 0; } if (!initialized) { // first time. Must initialize table // make table of ln(n!) double sum = fac_table[0] = 0.; for (int i=1; i<FAK_LEN; i++) { sum += log(double(i)); fac_table[i] = sum; } initialized = 1; } return fac_table[n]; } // not found in table. use Stirling approximation double n1, r; n1 = n; r = 1. / n1; return (n1 + 0.5)*log(n1) - n1 + C0 + r*(C1 + r*r*C3); //return logf(n); } __device__ double fc_lnpk(int k, int L, int m, int n) { // subfunction used by hypergeometric and Fisher's noncentral hypergeometric distribution return(LnFac(k) + LnFac(m - k) + LnFac(n - k) + LnFac(L + k)); } __device__ int HypInversionMod (myCurandState_t stateHyper[SAM_PENUMBER],int n, int m, int N, int idx) { /* Subfunction for Hypergeometric distribution. Assumes 0 <= n <= m <= N/2. Overflow protection is needed when N > 680 or n > 75. Hypergeometric distribution by inversion method, using down-up search starting at the mode using the chop-down technique. This method is faster than the rejection method when the variance is low. */ //int idx = threadIdx.x + blockIdx.x * blockDim.x; // Sampling int I; // Loop counter int L = N - m - n; // Parameter double modef; // mode, float double Mp, np; // m + 1, n + 1 double p; // temporary double U; // uniform random double c, d; // factors in iteration double divisor; // divisor, eliminated by scaling double k1, k2; // float version of loop counter double L1 = L; // float version of L Mp = (double)(m + 1); np = (double)(n + 1); if (N != hyp_N_last[idx] || m != hyp_m_last[idx] || n != hyp_n_last[idx]) { // set-up when parameters have changed hyp_N_last[idx] = N; hyp_m_last[idx] = m; hyp_n_last[idx] = n; p = Mp / (N + 2.); modef = np * p; // mode, real hyp_mode[idx] = (int)modef; // mode, integer if (hyp_mode[idx] == modef && p == 0.5) { hyp_mp[idx] = hyp_mode[idx]--; } else { hyp_mp[idx] = hyp_mode[idx] + 1; } // mode probability, using log factorial function // (may read directly from fac_table if N < FAK_LEN) hyp_fm[idx] = exp(LnFac(N-m) - LnFac(L+hyp_mode[idx]) - LnFac(n-hyp_mode[idx]) + LnFac(m) - LnFac(m-hyp_mode[idx]) - LnFac(hyp_mode[idx]) - LnFac(N) + LnFac(N-n) + LnFac(n) ); // safety bound - guarantees at least 17 significant decimal digits // bound = min(n, (int)(modef + k*c')) hyp_bound[idx] = (int)(modef + 11. * sqrt(modef * (1.-p) * (1.-n/(double)N)+1.)); if (hyp_bound[idx] > n) hyp_bound[idx] = n; } // loop until accepted //int max_iterations = 1000; while(1) { // if(!(max_iterations--)) // break; U = getnextrand(&stateHyper[idx]); // uniform random number to be converted //printf(" U is %lf\n",U); // start chop-down search at mode if ((U -= hyp_fm[idx]) <= 0.) return(hyp_mode[idx]); c = d = hyp_fm[idx]; // alternating down- and upward search from the mode k1 = hyp_mp[idx] - 1; k2 = hyp_mode[idx] + 1; for (I = 1; I <= hyp_mode[idx]; I++, k1--, k2++) { // if(!(max_iterations--)) // break; // Downward search from k1 = hyp_mp - 1 divisor = (np - k1)*(Mp - k1); // Instead of dividing c with divisor, we multiply U and d because // multiplication is faster. This will give overflow if N > 800 U *= divisor; d *= divisor; c *= k1 * (L1 + k1); if ((U -= c) <= 0.) return(hyp_mp[idx] - I - 1); // = k1 - 1 //printf("Line 228 I %d \n",I); // Upward search from k2 = hyp_mode + 1 divisor = k2 * (L1 + k2); // re-scale parameters to avoid time-consuming division U *= divisor; c *= divisor; d *= (np - k2) * (Mp - k2); if ((U -= d) <= 0.) return(hyp_mode[idx] + I); // = k2 // Values of n > 75 or N > 680 may give overflow if you leave out this.. // overflow protection // if (U > 1.E100) {U *= 1.E-100; c *= 1.E-100; d *= 1.E-100;} } // Upward search from k2 = 2*mode + 1 to bound for (k2 = I = hyp_mp[idx] + hyp_mode[idx]; I <= hyp_bound[idx]; I++, k2++) { //if(!(max_iterations--)) // break; divisor = k2 * (L1 + k2); U *= divisor; d *= (np - k2) * (Mp - k2); if ((U -= d) <= 0.) return(I); // more overflow protection // if (U > 1.E100) {U *= 1.E-100; d *= 1.E-100;} } } } __device__ int HypRatioOfUnifoms (myCurandState_t stateHyper[SAM_PENUMBER], int n, int m, int N, int idx) { /* Subfunction for Hypergeometric distribution using the ratio-of-uniforms rejection method. This code is valid for 0 < n <= m <= N/2. The computation time hardly depends on the parameters, except that it matters a lot whether parameters are within the range where the LnFac function is tabulated. Reference: E. Stadlober: "The ratio of uniforms approach for generating discrete random variates". Journal of Computational and Applied Mathematics, vol. 31, no. 1, 1990, pp. 181-189. */ //int idx = threadIdx.x + blockIdx.x * blockDim.x; const double SHAT1 = 2.943035529371538573; // 8/e const double SHAT2 = 0.8989161620588987408; // 3-sqrt(12/e) int L; // N-m-n int mode; // mode int k; // integer sample double x; // real sample double rNN; // 1/(N*(N+2)) double my; // mean double var; // variance double u; // uniform random double lf; // ln(f(x)) L = N - m - n; if (hyp_N_last[idx] != N || hyp_m_last[idx] != m || hyp_n_last[idx] != n) { hyp_N_last[idx] = N; hyp_m_last[idx] = m; hyp_n_last[idx] = n; // Set-up rNN = 1. / ((double)N*(N+2)); // make two divisions in one my = (double)n * m * rNN * (N+2); // mean = n*m/N mode = (int)(double(n+1) * double(m+1) * rNN * N); // mode = floor((n+1)*(m+1)/(N+2)) var = (double)n * m * (N-m) * (N-n) / ((double)N*N*(N-1));// variance hyp_h[idx] = sqrt(SHAT1 * (var+0.5)) + SHAT2; // hat width hyp_a[idx] = my + 0.5; // hat center hyp_fm[idx] = fc_lnpk(mode, L, m, n); // maximum hyp_bound[idx] = (int)(hyp_a[idx] + 4.0 * hyp_h[idx]); // safety-bound if (hyp_bound[idx] > n) hyp_bound[idx] = n; } while(1) { u = getnextrand(&stateHyper[idx]); // uniform random number if (u == 0) continue; // avoid division by 0 x = hyp_a[idx] + hyp_h[idx] * (getnextrand(&stateHyper[idx])-0.5) / u; // generate hat distribution if (x < 0. || x > 2E9) continue; // reject, avoid overflow k = (int)x; if (k > hyp_bound[idx]) continue; // reject if outside range lf = hyp_fm[idx] - fc_lnpk(k,L,m,n); // ln(f(k)) if (u * (4.0 - u) - 3.0 <= lf) break; // lower squeeze accept if (u * (u-lf) > 1.0) continue; // upper squeeze reject if (2.0 * log(u) <= lf) break; // final acceptance } return k; } __device__ int Hypergeometric (myCurandState_t stateHyper[SAM_PENUMBER], int n, int m, int N, int idx) { /* This function generates a random variate with the hypergeometric distribution. This is the distribution you get when drawing balls without replacement from an urn with two colors. n is the number of balls you take, m is the number of red balls in the urn, N is the total number of balls in the urn, and the return value is the number of red balls you get. This function uses inversion by chop-down search from the mode when parameters are small, and the ratio-of-uniforms method when the former method would be too slow or would give overflow. */ int fak, addd; // used for undoing transformations int x; // result hyp_n_last[idx] = hyp_m_last[idx] = hyp_N_last[idx] = -1; // Last values of hypergeometric parameters // check if parameters are valid if (n > N || m > N || n < 0 || m < 0) { printf("Parameter out of range in hypergeometric function n %ld m %ld N %ld idx %d\n",n,m,N,idx); printf("Parameter out of range in hypergeometric function %d,%d,%d,%d\n", n > N, m > N, n < 0, m < 0); return 0; } // symmetry transformations fak = 1; addd = 0; if (m > N/2) { // invert m m = N - m; fak = -1; addd = n; } if (n > N/2) { // invert n n = N - n; addd += fak * m; fak = - fak; } if (n > m) { // swap n and m x = n; n = m; m = x; } // cases with only one possible result end here if (n == 0) return addd; //------------------------------------------------------------------ // choose method //------------------------------------------------------------------ if (N > 680 || n > 70) { // use ratio-of-uniforms method x = HypRatioOfUnifoms (stateHyper, n, m, N,idx); } else { // inversion method, using chop-down search from mode x = HypInversionMod (stateHyper, n, m, N,idx); } // undo symmetry transformations return x * fak + addd; } __global__ void clearSamples(int samples[SAM_NUM_VALUES]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < (SAM_NUM_VALUES)){ samples[idx] = 0; } } __device__ void methodA(myCurandState_t state[SAM_PENUMBER],int N, int n, int num_sample, int initialTocurrent,int device_list[SAM_NUM_VALUES],int samples[SAM_NUM_VALUES]) { //ASSERT_LEQ(n, N); int idx = threadIdx.x + blockIdx.x * blockDim.x; // Initialization int sample = 0; double Nreal = (double) N; double top = Nreal - n; // Main loop while (n >= 2) { int S = 0; double V = getnextrand(&state[idx]); double quot = top / Nreal; while (quot > V) { S++; top -= 1.0; Nreal -= 1.0; quot = (quot * top) / Nreal; } // Skip over next S records and select the following one sample += S + 1; //samples[idx][num_sample++] = sample + initialTocurrent; samples[idx*SAM_PESIZE + num_sample++] = device_list[idx*SAM_PESIZE + sample + initialTocurrent-1]; //callback(sample); Nreal -= 1.0; n--; } if (n == 1) { int S = round(Nreal) * getnextrand(&state[idx]); sample += S + 1; //samples[idx][num_sample++] = sample + initialTocurrent; samples[idx*SAM_PESIZE + num_sample++] = device_list[idx*SAM_PESIZE + sample + initialTocurrent-1]; //callback(sample); } } // Sampling method D from Vitter et al. // // \param N Size of population. // \param n Number of samples. // \param gen Uniform random variate generator. // \param samples Function to process sample. // __device__ void sample(myCurandState_t state[SAM_PENUMBER], int N, int n, int device_list[SAM_NUM_VALUES], int samples[SAM_NUM_VALUES]) { //ASSERT_LEQ(n, N); int idx = threadIdx.x + blockIdx.x * blockDim.x; int initialN = N; // Initialization int sample = 0; int num_sample = 0; double nreal = (double) n; double ninv = 1.0 / nreal; double Nreal = (double) N; double Vprime = exp(log(getnextrand(&state[idx])) * ninv); int qu1 = N + 1 - n; double qu1real = Nreal + 1.0 - nreal; int negalphainv = -13; int threshold = n * (-negalphainv); int S = 0; // Main loop while (n > 1 && threshold < N) { double nmin1inv = 1.0 / (nreal - 1.0); double negSreal = 0.0; while (true) { // Step D2: Generate U and X double X; while (true) { X = Nreal * (1.0 - Vprime); S = X; if (S < qu1) break; Vprime = exp(log(getnextrand(&state[idx])) * ninv); } double U = getnextrand(&state[idx]); negSreal = -(double)S; // Step D3: Accept? double y1 = exp(log(U * Nreal / qu1real) * nmin1inv); Vprime = y1 * (-X / Nreal + 1.0) * (qu1real / (negSreal + qu1real)); if (Vprime <= 1.0) break; // Accept! // Step D4: Accept? double y2 = 1.0; double top = Nreal - 1.0; double bottom; double limit; if (n - 1 > S) { bottom = Nreal - nreal; limit = N - S; } else { bottom = negSreal + Nreal - 1.0; limit = qu1; } for (int t = N; t > limit; t--) { y2 = (y2 * top) / bottom; top -= 1.0; bottom -= 1.0; } if (Nreal / (Nreal - X) >= y1 * exp(log(y2) * nmin1inv)) { // Accept! Vprime = exp(log(getnextrand(&state[idx])) * nmin1inv); break; } Vprime = exp(log(getnextrand(&state[idx])) * ninv); } // Skip over next S records and select the following one sample += S + 1; //samples[idx][num_sample++] = sample; samples[idx*SAM_PESIZE + num_sample++] = device_list[idx*SAM_PESIZE +sample-1]; //callback(sample); N = (N - 1) - S; Nreal = (Nreal - 1.0) + negSreal; n--; nreal -= 1.0; ninv = nmin1inv; qu1 -= S; qu1real += negSreal; threshold += negalphainv; } if (n > 1) { int currentN = N; methodA(state, N, n, num_sample, initialN - currentN, device_list,samples); //samples[num_sample++] = sample + initialN - currentN; //methodA(N, n, [&](int sample) { // callback(sample + initialN - currentN); //}); } else if (n == 1) { S = N * Vprime; // Skip over next S records and select the following one sample += S + 1; //samples[idx][num_sample++] = sample; samples[idx*SAM_PESIZE + num_sample++] = device_list[idx*SAM_PESIZE +sample-1]; //callback(sample); } } __global__ void sampleP(myCurandState_t state[SAM_PENUMBER], myCurandState_t stateHyper[SAM_PENUMBER], int device_list[SAM_NUM_VALUES],int samples[SAM_NUM_VALUES], int n, int j, int k) { int idx = threadIdx.x + blockIdx.x * blockDim.x; //idx += 1; if(idx < device_pe_inuse){ int seed = 1; //int counter = 0; int m,x; while(j - k != 0) { curand_init(seed, 0 , 0, &stateHyper[idx]); m = floor( (j+k)/2.0 ); //printf("sampleP1 n %d idx %d m %d\n",n,idx,m); //__device__ int Hypergeometric (curandState stateHyper[PENUMBER], //int n, int m, int N, int idx) { /* This function generates a random variate with the hypergeometric distribution. This is the distribution you get when drawing balls without replacement from an urn with two colors. n is the number of balls you take, m is the number of red balls in the urn, N is the total number of balls in the urn, and the return value is the number of red balls you get. */ //printf("would call Hypergeometric(stateHyper, %d, %d, %d, %d)\n", n, (m-j)*PESIZE + 1, (k-j)*PESIZE + 1, idx); //printf("j is now %d, k is %d, m is %d, sums are %d and %d\n", j, k, m, k - (j - 1), m - (j - 1)); if(k != device_pe_inuse - 1){ x = Hypergeometric(stateHyper, n, (m-(j-1))*SAM_PESIZE, (k-(j-1))*SAM_PESIZE, idx); } else{ x = Hypergeometric(stateHyper, n, (m-(j-1))*SAM_PESIZE, ((k-1)-(j-1))*SAM_PESIZE + device_num_inuse % SAM_PESIZE, idx); } //printf("sampleP2 n %d idx %d x %d\n",n,idx,x); //int x = m; if(idx <= m) { n = x; k = m; seed = seed * 2; } else { n = n-x; j = m + 1; seed = seed * 2 + 1; } } //printf("sample n %d \n",n); if(idx != device_pe_inuse - 1 ) { //printf("idx %d sampling %d values\n", idx, n); sample(state, SAM_PESIZE, n, device_list, samples); } else { //printf("n > PESIZE %d \n",n); sample(state, device_num_inuse % SAM_PESIZE, n, device_list, samples); } /*if(n <= PESIZE ) { //printf("idx %d sampling %d values\n", idx, n); sample(state, PESIZE, n, device_list, samples); } else { printf("n > PESIZE %d \n",n); }*/ } } //__global__ void print_device_reduced_pe_position(){ //printf("reduced_pe_position %d \n",( int( 0.5 + ceil((float)device_reduced_pe_position / (PESIZE) )) ) ); //printf("device_reduced_pe_position %d \n",(device_reduced_pe_position ) ); //} __global__ void initCurand(myCurandState_t state[][PESIZE]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; if(idx < PENUMBER && idy<PESIZE){ curand_init(idx*(PESIZE)+idy,0 , 0, &state[idx][idy]); } } __global__ void compute(int grid[][SIZE+2], int new_grid[][SIZE+2], int * move_list, int * space_list, int iteration){ int idx=blockIdx.x*blockDim.x+threadIdx.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; int sameTypeCount=0; int current_id = idx*(SIZE+2)+idy; if(grid[idx][idy] != 0){ int currentType = grid[idx][idy]; if(grid[idx-1][idy-1] == currentType){ sameTypeCount += 1; } if(grid[idx-1][idy] == currentType){ sameTypeCount += 1; } if(grid[idx-1][idy+1] == currentType){ sameTypeCount += 1; } if(grid[idx][idy-1] == currentType){ sameTypeCount += 1; } if(grid[idx][idy+1] == currentType){ sameTypeCount += 1; } if(grid[idx+1][idy-1] == currentType){ sameTypeCount += 1; } if(grid[idx+1][idy] == currentType){ sameTypeCount += 1; } if(grid[idx+1][idy+1] == currentType){ sameTypeCount += 1; } if(sameTypeCount < happinessThreshold){ move_list[current_id] = current_id; space_list[current_id] = current_id; } } else if(idx != 0 && idy !=0 && idx != (SIZE+1) && idy != (SIZE+1) ){ space_list[current_id] = current_id; } } __global__ void update (int grid[][SIZE+2], int new_grid[][SIZE+2], int * move_list, int * space_list){ int idx=blockIdx.x*blockDim.x+threadIdx.x; int idy=blockIdx.y*blockDim.y+threadIdx.y; grid[idy][idx] = new_grid[idy][idx]; move_list[idx*(SIZE+2)+idy] = 0; space_list[idx*(SIZE+2)+idy] = 0; } __global__ void sendToRandomPerpe(myCurandState_t state[][PESIZE],int device_list[SAM_NUM_VALUES], int temp_device_list[][PESIZE*FACTOR],int random_list_counter[PENUMBER]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < device_penumber_inuse -1 ){ for(int i=0; i < PESIZE; i++ ){ float r = getnextrand(&state[idx][0]); int random_position = r * (device_penumber_inuse-1); int acquired_position = atomicAdd(&random_list_counter[random_position],1); temp_device_list[random_position][acquired_position] = device_list[idx*PESIZE+i]; } } else if(idx == device_penumber_inuse - 1 ){ for(int i=0; i < device_removed_move_list_end % PESIZE; i++ ){ float r = getnextrand(&state[idx][0]); int random_position = r * (device_penumber_inuse-1); int acquired_position = atomicAdd(&random_list_counter[random_position],1); temp_device_list[random_position][acquired_position] = device_list[idx*PESIZE+i]; } } } __global__ void sendToRandom(myCurandState_t state[][PESIZE],int device_list[SAM_NUM_VALUES], int temp_device_list[][PESIZE*FACTOR],int random_list_counter[PENUMBER]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if(idx*PESIZE +idy < device_removed_move_list_end ){ float r = getnextrand(&state[idx][idy]); int random_position = r * (device_penumber_inuse-1); int acquired_position = atomicAdd(&random_list_counter[random_position],1); temp_device_list[random_position][acquired_position] = device_list[idx*PESIZE+idy]; } } __global__ void clearCounter(int random_list_counter[PENUMBER]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < device_penumber_inuse){ random_list_counter[idx] = 0; } } __global__ void generateList(int device_list[][PESIZE]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; if(idx*PESIZE +idy < device_removed_space_list_end ){ device_list[idx][idy] = idx*PESIZE +idy; } } static __device__ void swap(int *data, int x, int y) { int temp = data[x]; data[x] = data[y]; data[y] = temp; } static __device__ int partition(int *data, int left, int right) { const int mid = left + (right - left) / 2; const int pivot = data[(mid)]; swap(data, (mid), (left)); int i = left + 1; int j = right; while (i <= j) { while (i <= j && data[(i)] <= pivot) { i++; } while (i <= j && data[(j)] > pivot) { j--; } if (i < j) { swap(data, (i), (j)); } } swap(data, (i - 1), (left)); return i - 1; } typedef struct sort_data { int left; int right; } sort_data; __device__ void quicksort_seq(int *data, int right) { int left = 0; if(left == right) return; if (left > right) { right = 1 + right; } int stack_size = 0; sort_data stack[PESIZE*FACTOR]; stack[stack_size++] = { left, right }; while (stack_size > 0) { int curr_left = stack[stack_size - 1].left; int curr_right = stack[stack_size - 1].right; stack_size--; if (curr_left < curr_right) { int part = partition(data, curr_left, curr_right); stack[stack_size++] = {curr_left, part - 1}; stack[stack_size++] = {part + 1, curr_right}; } } } __global__ void sortList(int temp_device_list[][PESIZE*FACTOR], int random_list_counter[PENUMBER]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < device_penumber_inuse){ int number = random_list_counter[idx]; if(number != 0){ quicksort_seq(temp_device_list[idx], number - 1 ); } } } __global__ void randomPermute(myCurandState_t state[][PESIZE], int temp_device_list[][PESIZE*FACTOR], int random_list_counter[PENUMBER]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; int reduced_pe = device_penumber_inuse; if(idx < reduced_pe){ for (int i = 0; i < random_list_counter[idx]; i++){ float r = getnextrand(&state[idx][0]); int j = r * (random_list_counter[idx]-1); int temp = temp_device_list[idx][i] ; temp_device_list[idx][i] = temp_device_list[idx][j] ; temp_device_list[idx][j] = temp; } } } __global__ void recoverSize(int device_list[][PESIZE], int temp_device_list[][PESIZE*FACTOR],int random_list_counter[PENUMBER], int scanned_random_list_counter[PENUMBER]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; int reduced_pe = device_penumber_inuse; if(idx < reduced_pe){ int delta = scanned_random_list_counter[idx]; for(int i=0; i<random_list_counter[idx]; i++){ int addValue = delta + i; int interResult = device_penumber_inuse*addValue/(PESIZE*device_penumber_inuse); device_list[interResult][(delta- (PESIZE*device_penumber_inuse/device_penumber_inuse)*interResult + i)] = temp_device_list[idx][i]; } } } struct smaller_than { __device__ bool operator()(const int x) { return (x < device_removed_space_list_end) == 0; } }; struct greater_than { __device__ bool operator()(int x) { return x > device_removed_move_list_end; } }; __global__ void printTempList(int temp_device_list[][PESIZE*FACTOR], int random_list_counter[PENUMBER]){ for(int i =0; i<device_penumber_inuse; i++){ for(int j=0; j<random_list_counter[i];j++){ printf("%d ",temp_device_list[i][j]); } printf("\n"); } } __global__ void printList(int * list,int *removed_list_end){ printf( "SIZE %d \n",removed_list_end - list) ; for(int i=0; i<removed_list_end - list; i++){ printf("%d ",list[i]); } printf("\n"); } __global__ void printListPre(int * list){ printf( "SIZE %d \n",device_removed_space_list_end) ; for(int i=0; i<device_removed_space_list_end; i++){ printf("%d ",list[i]); } printf("\n"); } __global__ void prepareNewGrid (int new_grid[][SIZE+2], int * move_list, int permutation[][PESIZE]){ int idx=blockIdx.x*blockDim.x+threadIdx.x; if(idx<device_removed_move_list_end){ int idxTox = idx / PESIZE; int idxToy = idx % PESIZE; int agent_position = permutation[idxTox][idxToy]; new_grid[agent_position/(SIZE+2)][agent_position%(SIZE+2)] = 0; } } __global__ void assign (int grid[][SIZE+2], int new_grid[][SIZE+2], int permutation[][PESIZE], int * move_list, int * space_list, int samples[SAM_NUM_VALUES]){ int idx=blockIdx.x*blockDim.x+threadIdx.x; if(idx < (device_removed_move_list_end) ){ int idxTox = idx / PESIZE; int idxToy = idx % PESIZE; int space_position = space_list[samples[idx]-1]; int agent_position = permutation[idxTox][idxToy]; new_grid[space_position/(SIZE+2)][space_position%(SIZE+2)] = grid[agent_position/(SIZE+2)][agent_position%(SIZE+2)]; } } __global__ void checkNumberDevice(int new_grid[][SIZE+2]){ int agentTypeOne = 0; int agentTypeTwo = 0; for(int i=0; i<SIZE+2; i++){ for(int j=0; j<SIZE+2; j++){ if(new_grid[i][j] == 1){ agentTypeOne +=1; } else if(new_grid[i][j] == 2){ agentTypeTwo += 1; } } } printf("Type One %d, Type Two %d\n",agentTypeOne, agentTypeTwo); } void checkNumber(int grid [SIZE+2][SIZE+2]){ int agentTypeOne = 0; int agentTypeTwo = 0; for(int i=0; i<SIZE+2; i++){ for(int j=0; j<SIZE+2; j++){ if(grid[i][j] == 1){ agentTypeOne +=1; } else if(grid[i][j] == 2){ agentTypeTwo += 1; } } } printf("Type One %d, Type Two %d\n",agentTypeOne, agentTypeTwo); } __global__ void devicePrintOutput(int device_list[][PESIZE]){ for(int i =0; i<device_penumber_inuse; i++){ //for(int j=0; j<random_list_counter[i];j++){ // printf("%d \n",i); for(int j=0; j<PESIZE;j++){ //printf("PE %d, index %d, value %d\n", i, j, device_list[i][j]); printf("%d ",device_list[i][j]); } printf("\n"); } } __global__ void initSamValue(int device_list[SAM_NUM_VALUES]){ int idx = threadIdx.x + blockIdx.x * blockDim.x; device_list[idx] = idx+1; } __global__ void printSamples(int samples[SAM_NUM_VALUES]){ for(int i=0; i<(device_removed_move_list_end); i++){ printf("%d %d \n",i,samples[i]); } } __global__ void printSamValue(int device_sam_list[SAM_NUM_VALUES]){ for(int i=0; i<(device_pe_inuse*SAM_PESIZE); i++){ printf("%d ",device_sam_list[i]); } } int host_grid[SIZE+2][SIZE+2]; int main(int argc, char* argv[]) { //Initialization struct timespec start, stop; double accum; int (*device_grid)[SIZE + 2]; int (*device_newGrid)[SIZE + 2]; int (*device_permutation_list)[PESIZE]; int (*device_temp_permutation_list)[PESIZE*FACTOR]; int (*random_list_counter); int (*scanned_random_list_counter); int (*move_list); int (*removed_move_list_end); int (*space_list); int (*removed_space_list_end); int (*samples); int (*device_sam_list); srand(SRAND_VALUE); size_t bytes = sizeof(int)*(SIZE + 2)*(SIZE + 2); myCurandState_t (*devState)[PESIZE]; myCurandState_t (*devStateHyper); myCurandState_t (*devStateSam); cudaMalloc((void**)&devState, TOTAL * sizeof(myCurandState_t)); cudaMalloc(&random_list_counter, sizeof(int)*(PENUMBER)); cudaMalloc(&scanned_random_list_counter, sizeof(int)*(PENUMBER)); cudaMalloc(&device_sam_list, sizeof(int)*(SAM_PESIZE)*(SAM_PENUMBER)); cudaMalloc((void**)&device_grid, bytes); cudaMalloc((void**)&device_newGrid, bytes); cudaMalloc((void**)&device_permutation_list, sizeof(int)*(TOTAL)); cudaMalloc((void**)&device_temp_permutation_list, sizeof(int)*(agentNumber)*FACTOR); cudaMalloc(&move_list, sizeof(int)*(SIZE + 2)*(SIZE + 2)); cudaMalloc(&space_list, sizeof(int)*(SIZE + 2)*(SIZE + 2)); cudaMalloc(&samples, sizeof(int)*(SAM_PESIZE)*(SAM_PENUMBER)); cudaMalloc(&devStateHyper, SAM_PENUMBER * sizeof(myCurandState_t)); cudaMalloc(&devStateSam, SAM_PENUMBER * sizeof(myCurandState_t)); #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif int blockSizeVerPermu = numThreadsPerBlock / PESIZE; dim3 blockSizePermu(blockSizeVerPermu, PESIZE, 1); initCurand<<<(ceil(TOTAL/double(numThreadsPerBlock))),blockSizePermu>>>(devState); #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif for (int i=0; i<(SIZE+2); i++){ for (int j=0; j<SIZE+2; j++){ host_grid[i][j] = 0; } } int blockSizePerDim = sqrt(numThreadsPerBlock); int gridSizePerDim = (SIZE + 2) / blockSizePerDim; dim3 blockSize(blockSizePerDim, blockSizePerDim, 1); dim3 gridSize(gridSizePerDim, gridSizePerDim, 1); initPos(host_grid); //printOutput(host_grid); #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif cudaMemcpy(device_grid,host_grid,bytes,cudaMemcpyHostToDevice); cudaMemcpy(device_newGrid,host_grid,bytes,cudaMemcpyHostToDevice); #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif initSamCurand<<<((double)SAM_PENUMBER / SAM_numThreadsPerBlock),SAM_numThreadsPerBlock>>>(devStateSam); #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif update << <gridSize, blockSize >> >(device_grid, device_newGrid,move_list,space_list); if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) { perror( "clock gettime" ); exit( EXIT_FAILURE ); } cached_allocator alloc; int removed_list_number = 0; int space_list_number = 0; for(int i=0; i<ITERATIONS; i++){ //Simulation cycles #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif //Compute Happiness compute << <gridSize, blockSize >> >(device_grid, device_newGrid, move_list, space_list, i); #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif //Remove 0 to form a list of moveable agents removed_move_list_end = thrust::remove(thrust::cuda::par(alloc), move_list, move_list + ((SIZE+2)*(SIZE+2)), 0); #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif removed_list_number = removed_move_list_end - move_list; cudaMemcpyToSymbol(device_removed_move_list_end, &removed_list_number, sizeof(int)); int TwoDimGridSize = ceil(removed_list_number/double(numThreadsPerBlock)); #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif //Remove 0 to form a list of available cells removed_space_list_end = thrust::remove(thrust::cuda::par(alloc), space_list, space_list + ((SIZE+2)*(SIZE+2)), 0); space_list_number = removed_space_list_end - space_list; #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif cudaMemcpyToSymbol(device_removed_space_list_end, &space_list_number, sizeof(int)); int penumberinuse = ceil(removed_list_number/ double(PESIZE)); cudaMemcpyToSymbol(device_penumber_inuse, &penumberinuse, sizeof(int)); generateList<<<ceil(space_list_number/double(numThreadsPerBlock)),blockSizePermu>>>(device_permutation_list); int sam_num_inuse = space_list_number; int sam_pe_inuse = ceil(double(sam_num_inuse) / SAM_PESIZE); cudaMemcpyToSymbol(device_pe_inuse, &sam_pe_inuse, sizeof(int)); cudaMemcpyToSymbol(device_num_inuse, &sam_num_inuse, sizeof(int)); clearSamples<<<ceil(sam_pe_inuse*SAM_PESIZE / (double)SAM_numThreadsPerBlock), SAM_numThreadsPerBlock>>>(samples); #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif int sam_gridSize = ceil((double)sam_pe_inuse / SAM_numThreadsPerBlock); initSamValue<<<ceil(double(sam_num_inuse) / SAM_numThreadsPerBlock), SAM_numThreadsPerBlock>>>(device_sam_list); #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif //Perfrom sampling sampleP<<<sam_gridSize, SAM_numThreadsPerBlock>>>( devStateSam, devStateHyper, device_sam_list, samples, removed_list_number, 0, sam_pe_inuse-1); #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif int OneDimGridSize = ceil(penumberinuse / double(numThreadsPerBlock)); clearCounter<<<OneDimGridSize,(numThreadsPerBlock)>>>(random_list_counter); #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif //Perfrom permutation sendToRandom<<<ceil(removed_list_number/double(numThreadsPerBlock)),blockSizePermu >>>(devState,move_list,device_temp_permutation_list,random_list_counter); #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif sortList<<<OneDimGridSize,(numThreadsPerBlock)>>>(device_temp_permutation_list,random_list_counter); #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif thrust::exclusive_scan(thrust::cuda::par(alloc), random_list_counter, random_list_counter + penumberinuse, scanned_random_list_counter); randomPermute<<<OneDimGridSize,(numThreadsPerBlock)>>>(devState,device_temp_permutation_list,random_list_counter); #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif recoverSize<<<OneDimGridSize,(numThreadsPerBlock)>>>(device_permutation_list, device_temp_permutation_list,random_list_counter,scanned_random_list_counter); thrust::remove(thrust::device, samples, samples + sam_pe_inuse*SAM_PESIZE , 0); #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif prepareNewGrid <<<TwoDimGridSize, numThreadsPerBlock >>> (device_newGrid, move_list,device_permutation_list); #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif //Mapping assign <<<TwoDimGridSize, numThreadsPerBlock>>> (device_grid, device_newGrid, device_permutation_list, move_list, space_list,samples); #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif update << <gridSize, blockSize >> >(device_grid, device_newGrid,move_list,space_list); #ifdef DEBUG cudaDeviceSynchronize(); cudaCheckError(); #endif } //End Timing if( clock_gettime( CLOCK_REALTIME, &stop) == -1 ) { perror( "clock gettime" ); exit( EXIT_FAILURE ); } accum = ( stop.tv_sec - start.tv_sec ) * 1e6 + ( stop.tv_nsec - start.tv_nsec ) / 1e3; printf( "%.1f Time is %.5f s \n",float(OCCUPANCY), accum / 1e6); cudaMemcpy(host_grid, device_newGrid, bytes, cudaMemcpyDeviceToHost); //printOutput(host_grid); //checkNumber(host_grid); cudaFree(device_grid); cudaFree(device_newGrid); cudaFree(device_permutation_list); cudaFree(device_temp_permutation_list); cudaFree(move_list); cudaFree(random_list_counter); cudaFree(scanned_random_list_counter); cudaFree(space_list); cudaFree(devState); cudaFree(samples); cudaFree(devStateSam); cudaFree(devStateHyper); cudaFree(device_sam_list); return 0; } void printOutput(int grid [SIZE+2][SIZE+2] ){ //output grid from 1 t o SIZE+1 for (int i=1; i<SIZE+1; i++){ for (int j=1; j<SIZE+1; j++){ printf("%d ",grid[i][j]); //if(i%SIZE) } printf("\n"); } printf("\n"); } void initPos(int grid [SIZE+2][SIZE+2]){ // type 1 and 2 to grid randomly int row; int column; for(int i=0; i<agentTypeOneNumber; i++){ do{ row = random_location(); column = random_location(); }while(grid[row][column] != 0); grid[row][column] = 1; } for(int i=0; i<agentTypeTwoNumber; i++){ do{ row = random_location(); column = random_location(); }while(grid[row][column] != 0); grid[row][column] = 2; } } int random_location() { //generate a random number from 1 to SIZE+1 int r; r = rand(); return (r % (SIZE) +1 ); }
244147899f41e188dcaa04766556009a5646cdc2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "cuda_wrapper.h" #include <float.h> __constant__ double dijagonala[8192]; __device__ double result1 = 0.0; __device__ double result2 = 0.0; __device__ double atomicMax(double *address, double val) { unsigned long long int *address_to_ull = (unsigned long long *) address; unsigned long long int old; unsigned long long int compare; if (val == 0.0) return *address; do { compare = *address_to_ull; old = atomicCAS(address_to_ull, compare, __double_as_longlong(fmax(val , __longlong_as_double(compare)))); } while (old != compare); return __longlong_as_double(old); } template <unsigned int block_size> __global__ void reduction_8_blokova(double *in_data, int N) { double *data = in_data + 8 * blockIdx.x * blockDim.x; __shared__ double smem[block_size]; smem[threadIdx.x] = fmax(data[threadIdx.x + 0 * blockDim.x], data[threadIdx.x + 1 * blockDim.x]); smem[threadIdx.x] = fmax(smem[threadIdx.x], data[threadIdx.x + 2 * blockDim.x]); smem[threadIdx.x] = fmax(smem[threadIdx.x], data[threadIdx.x + 3 * blockDim.x]); smem[threadIdx.x] = fmax(smem[threadIdx.x], data[threadIdx.x + 4 * blockDim.x]); smem[threadIdx.x] = fmax(smem[threadIdx.x], data[threadIdx.x + 5 * blockDim.x]); smem[threadIdx.x] = fmax(smem[threadIdx.x], data[threadIdx.x + 6 * blockDim.x]); smem[threadIdx.x] = fmax(smem[threadIdx.x], data[threadIdx.x + 7 * blockDim.x]); __syncthreads(); if (block_size >= 1024 && threadIdx.x < 512) smem[threadIdx.x] = fmax(smem[threadIdx.x], smem[threadIdx.x + 512]); __syncthreads(); if (block_size >= 512 && threadIdx.x < 256) smem[threadIdx.x] = fmax(smem[threadIdx.x], smem[threadIdx.x + 256]); __syncthreads(); if (block_size >= 256 && threadIdx.x < 128) smem[threadIdx.x] = fmax(smem[threadIdx.x], smem[threadIdx.x + 128]); __syncthreads(); if (block_size >= 128 && threadIdx.x < 64) smem[threadIdx.x] = fmax(smem[threadIdx.x], smem[threadIdx.x + 64]); __syncthreads(); if (threadIdx.x < 32) { volatile double *tmp = smem; tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 32]); tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 16]); tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 8]); tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 4]); tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 2]); tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 1]); } if (threadIdx.x == 0) atomicAdd(&result1, smem[0]); } template <unsigned int block_size> __global__ void reduction_jedan_blok(double *in_data, int N) { __shared__ double smem2[block_size]; int broj_bloka = blockIdx.x + gridDim.x * blockIdx.y; smem2[threadIdx.x] = in_data[threadIdx.x + broj_bloka*block_size]; __syncthreads(); if (block_size >= 1024 && threadIdx.x < 512) smem2[threadIdx.x] = fmax(smem2[threadIdx.x], smem2[threadIdx.x + 512]); __syncthreads(); if (block_size >= 512 && threadIdx.x < 256) smem2[threadIdx.x] = fmax(smem2[threadIdx.x], smem2[threadIdx.x + 256]); __syncthreads(); if (block_size >= 256 && threadIdx.x < 128) smem2[threadIdx.x] = fmax(smem2[threadIdx.x], smem2[threadIdx.x + 128]); __syncthreads(); if (block_size >= 128 && threadIdx.x < 64) smem2[threadIdx.x] = fmax(smem2[threadIdx.x], smem2[threadIdx.x + 64]); __syncthreads(); if (threadIdx.x < 32) { volatile double *tmp = smem2; tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 32]); tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 16]); tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 8]); tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 4]); tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 2]); tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 1]); } if (threadIdx.x == 0) atomicMax(&result2, smem2[0]); } void ispis_matrice (double *a, int x, int y) { int i, j; for(i=0; i<x; ++i) { for(j=0; j<y; ++j) printf("%lg ", a[j*x+i]); printf("\n"); } printf("\n"); } void izracunaj_normalno(double *a, int n) { double max=-1; for(int i=0; i<n; i++) for(int j=0; j<n; j++) if(i!=j && a[i+n*j] + a[i*n+i] > max) max = a[i+n*j] + a[i*n+i]; printf("Normalno racunanje daje rezultat: %lg.\n", max); } int jednake(double *a, double *b, int n) { for(int i=0; i<n; i++) if(a[i] != b[i]) { printf("Mjesto greske je %d, a brojevi su: %lg i %lg\n", i, a[i], b[i]); return 0; } return 1; } __global__ void zbrajanje (double *a, int N) { //__shared__ double dijagonala[N]; //gridDim.x je broj blokova u redku //gridDim.y je broj blokova u stupcu int blockId = blockIdx.x + blockIdx.y * gridDim.x; int index = blockId * blockDim.x + threadIdx.x; int broj_redka = threadIdx.x+ blockIdx.x * blockDim.x; ///Inicijalizacija shared memorije s elementima s dijagonale /*if(index < N) dijagonala[N] = a[index*N + index]; __syncthreads();*/ //gledamo samo elemente matrice do N (jer je matrica prosirena s nulama) i razlicite od dijagonale //te elemente uvecamo za vrijednost na dijagonali if(broj_redka < N && blockIdx.y != broj_redka) { //printf("Indeks je %d, indeks dijagonale %d, a=%lg, threadIdx.x=%d, dijagonala=%lg\n", index, blockIdx.y, a[index], threadIdx.x, dijagonala[blockIdx.y]); a[index] = fabs(a[index]) + fabs(dijagonala[blockIdx.y]); } __syncthreads(); } int nadi_odgovarajuci_broj_dretvi(int N) { // if(N <= 8) //return 8; if(N <= 64) return 64; else if(N <= 128) return 128; else if(N <= 256) return 256; else if(N <= 512) return 512; else if(N <= 768) return 256; else if(N <= 1024) return 1024; else return 256; } void prosiri_matricu(double *a, double *b, int m, int n) { for(int i=0; i<m; i++) for(int j=0; j<n; j++) { if(j<m) b[i*n+j] = a[i*m+j]; else b[i*n+j] = 0; } } int main(int argc, char **argv) { dim3 gridDim, blockDim; int N, size, broj_blokova_po_redku, broj_dretvi_po_bloku, prosireni_size; double *hst_A, *hst_A_prosirena, *hst_dijagonala; double *dev_A, norma, norma2; FILE *fA; double gpu_prebacivanje_podataka = 0.0, gpu_zbrajanje = 0.0, gpu_redukcija_jedan_blok = 0.0, gpu_redukcija_8_blokova = 0.0, gpu_redukcija_jedan_stupac = 0.0, cpu = 0.0; if(argc != 3) { fprintf(stderr, "Za pokretanje %s program ocekuje 2 inta za dimenzije matrica, te 3 stringa koji oznacavaju imena datoteka iz kojih citamo, pisemo\n", argv[0]); return 0; } N = atoi(argv[1]); size = N*N; if(N > 8192) { fprintf(stderr, "Imamo vise od 8192 redka u matrici, pa nije moguce sve te podatke spremiti u konstantnu memoriju (konst mem 64kB).\n"); return 0; } ///Cilj je sto bolje optimizirati broj dretvi u odnosu na red matrice, tj. iskoristiti odgovarajuc broj blokova za redak matrice tako da /// imamo sta manji visak dretvi koje "ne rade nista". broj_dretvi_po_bloku = nadi_odgovarajuci_broj_dretvi(N); broj_blokova_po_redku = 1; while(broj_blokova_po_redku * broj_dretvi_po_bloku < N) broj_blokova_po_redku++; prosireni_size = broj_dretvi_po_bloku*broj_blokova_po_redku*N; printf("Broj dretvi po bloku je %d\n", broj_dretvi_po_bloku); printf("Broj blokova po redku je %d\n", broj_blokova_po_redku); gridDim = dim3(broj_blokova_po_redku, N, 1); blockDim = dim3(broj_dretvi_po_bloku, 1, 1); open_file(fA, argv[2], "r"); // alokacija memorije na hostu host_alloc(hst_A, double, size); host_alloc(hst_A_prosirena, double, prosireni_size); host_alloc(hst_dijagonala, double, N); // alokacija memorije na deviceu cuda_exec(hipMalloc(&dev_A, prosireni_size * sizeof(double))); // citanje podataka iz binarne datoteke read_file(hst_A, sizeof(double), size, fA); cpu -= timer(); izracunaj_normalno(hst_A, N); cpu += timer(); for(int i=0; i<N; i++) { hst_dijagonala[i] = (hst_A[i*N+i]); //printf("i=%d, a[i]=%lg dijag[i]=%lg\n", i, hst_A[i*N+i], hst_dijagonala[i]); } prosiri_matricu(hst_A, hst_A_prosirena, N, broj_dretvi_po_bloku*broj_blokova_po_redku); gpu_prebacivanje_podataka -= timer(); // kopiranje podataka na device cuda_exec(hipMemcpy(dev_A, hst_A_prosirena, prosireni_size * sizeof(double), hipMemcpyHostToDevice)); cuda_exec(hipMemcpyToSymbol(dijagonala, hst_dijagonala, N*sizeof(double))); gpu_prebacivanje_podataka += timer(); gpu_zbrajanje -= timer(); hipLaunchKernelGGL(( zbrajanje), dim3(gridDim), dim3(blockDim), 0, 0, dev_A, N); cuda_exec(hipDeviceSynchronize()); gpu_zbrajanje -= timer(); gpu_redukcija_jedan_blok -= timer(); switch (broj_dretvi_po_bloku) { case 1024: hipLaunchKernelGGL(( reduction_jedan_blok<1024>), dim3(gridDim), dim3(blockDim), 0, 0, dev_A, prosireni_size); break; case 512: hipLaunchKernelGGL(( reduction_jedan_blok< 512>), dim3(gridDim), dim3(blockDim), 0, 0, dev_A, prosireni_size); break; case 256: hipLaunchKernelGGL(( reduction_jedan_blok< 256>), dim3(gridDim), dim3(blockDim), 0, 0, dev_A, prosireni_size); break; case 128: hipLaunchKernelGGL(( reduction_jedan_blok< 128>), dim3(gridDim), dim3(blockDim), 0, 0, dev_A, prosireni_size); break; case 64: hipLaunchKernelGGL(( reduction_jedan_blok< 64>), dim3(gridDim), dim3(blockDim), 0, 0, dev_A, prosireni_size); break; case 32: hipLaunchKernelGGL(( reduction_jedan_blok< 32>), dim3(gridDim), dim3(blockDim), 0, 0, dev_A, prosireni_size); break; } gpu_redukcija_jedan_blok += timer(); /*gpu_redukcija_8_blokova -= timer(); switch (broj_dretvi_po_bloku) { case 1024: reduction_8_blokova<1024><<<, blockDim>>>(dev_A, prosireni_size); break; case 512: reduction_8_blokova< 512><<<, blockDim>>>(dev_A, prosireni_size); break; case 256: reduction_8_blokova< 256><<<, blockDim>>>(dev_A, prosireni_size); break; case 128: reduction_8_blokova< 128><<<, blockDim>>>(dev_A, prosireni_size); break; case 64: reduction_8_blokova< 64><<<, blockDim>>>(dev_A, prosireni_size); break; case 32: reduction_8_blokova< 32><<<, blockDim>>>(dev_A, prosireni_size); break; } gpu_redukcija_8_blokova += timer();*/ /*gpu_redukcija_jedan_stupac -= timer(); switch (broj_dretvi_po_bloku) { case 1024: redukcija_jedan_stupac<1024><<<gridDim.y, blockDim>>>(dev_A, prosireni_size); break; case 512: redukcija_jedan_stupac< 512><<<gridDim.y, blockDim>>>(dev_A, prosireni_size); break; case 256: redukcija_jedan_stupac< 256><<<gridDim.y, blockDim>>>(dev_A, prosireni_size); break; case 128: redukcija_jedan_stupac< 128><<<gridDim.y, blockDim>>>(dev_A, prosireni_size); break; case 64: redukcija_jedan_stupac< 64><<<gridDim.y, blockDim>>>(dev_A, prosireni_size); break; case 32: redukcija_jedan_stupac< 32><<<gridDim.y, blockDim>>>(dev_A, prosireni_size); break; } gpu_redukcija_jedan_stupac += timer();*/ gpu_prebacivanje_podataka -= timer(); //kopiranje podatak na host cuda_exec(hipMemcpy(hst_A_prosirena, dev_A, prosireni_size * sizeof(double), hipMemcpyDeviceToHost)); cuda_exec(hipMemcpyFromSymbol(&norma2, result2, sizeof(double))); gpu_prebacivanje_podataka += timer(); //to treba izbrisati //ispis_matrice(hst_A_prosirena, broj_dretvi_po_bloku*broj_blokova_po_redku, N); //ispis_matrice(hst_A, N, N); printf("GPU zbrajanje time: %#.3lgs\n", gpu_zbrajanje); printf("GPU redukcija jedan blok: %#.3lgs\n", gpu_redukcija_jedan_blok); //printf("GPU redukcija 8 blokova: %#.3lgs\n", gpu_redukcija_8_blokova); //printf("GPU redukcija jedan stupac: %#.3lgs\n", gpu_redukcija_jedan_stupac); printf("CPU execution time: %#.3lgs\n", cpu); printf("Norma pomocu GPU je: %lg\n", norma2); close_file(fA);printf("Nesto\n"); host_free(hst_A);printf("Nesto\n"); host_free(hst_A_prosirena); cuda_exec(hipFree(dev_A));printf("Nesto\n"); return 0; }
244147899f41e188dcaa04766556009a5646cdc2.cu
#include <stdio.h> #include "cuda_wrapper.h" #include <float.h> __constant__ double dijagonala[8192]; __device__ double result1 = 0.0; __device__ double result2 = 0.0; __device__ double atomicMax(double *address, double val) { unsigned long long int *address_to_ull = (unsigned long long *) address; unsigned long long int old; unsigned long long int compare; if (val == 0.0) return *address; do { compare = *address_to_ull; old = atomicCAS(address_to_ull, compare, __double_as_longlong(fmax(val , __longlong_as_double(compare)))); } while (old != compare); return __longlong_as_double(old); } template <unsigned int block_size> __global__ void reduction_8_blokova(double *in_data, int N) { double *data = in_data + 8 * blockIdx.x * blockDim.x; __shared__ double smem[block_size]; smem[threadIdx.x] = fmax(data[threadIdx.x + 0 * blockDim.x], data[threadIdx.x + 1 * blockDim.x]); smem[threadIdx.x] = fmax(smem[threadIdx.x], data[threadIdx.x + 2 * blockDim.x]); smem[threadIdx.x] = fmax(smem[threadIdx.x], data[threadIdx.x + 3 * blockDim.x]); smem[threadIdx.x] = fmax(smem[threadIdx.x], data[threadIdx.x + 4 * blockDim.x]); smem[threadIdx.x] = fmax(smem[threadIdx.x], data[threadIdx.x + 5 * blockDim.x]); smem[threadIdx.x] = fmax(smem[threadIdx.x], data[threadIdx.x + 6 * blockDim.x]); smem[threadIdx.x] = fmax(smem[threadIdx.x], data[threadIdx.x + 7 * blockDim.x]); __syncthreads(); if (block_size >= 1024 && threadIdx.x < 512) smem[threadIdx.x] = fmax(smem[threadIdx.x], smem[threadIdx.x + 512]); __syncthreads(); if (block_size >= 512 && threadIdx.x < 256) smem[threadIdx.x] = fmax(smem[threadIdx.x], smem[threadIdx.x + 256]); __syncthreads(); if (block_size >= 256 && threadIdx.x < 128) smem[threadIdx.x] = fmax(smem[threadIdx.x], smem[threadIdx.x + 128]); __syncthreads(); if (block_size >= 128 && threadIdx.x < 64) smem[threadIdx.x] = fmax(smem[threadIdx.x], smem[threadIdx.x + 64]); __syncthreads(); if (threadIdx.x < 32) { volatile double *tmp = smem; tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 32]); tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 16]); tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 8]); tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 4]); tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 2]); tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 1]); } if (threadIdx.x == 0) atomicAdd(&result1, smem[0]); } template <unsigned int block_size> __global__ void reduction_jedan_blok(double *in_data, int N) { __shared__ double smem2[block_size]; int broj_bloka = blockIdx.x + gridDim.x * blockIdx.y; smem2[threadIdx.x] = in_data[threadIdx.x + broj_bloka*block_size]; __syncthreads(); if (block_size >= 1024 && threadIdx.x < 512) smem2[threadIdx.x] = fmax(smem2[threadIdx.x], smem2[threadIdx.x + 512]); __syncthreads(); if (block_size >= 512 && threadIdx.x < 256) smem2[threadIdx.x] = fmax(smem2[threadIdx.x], smem2[threadIdx.x + 256]); __syncthreads(); if (block_size >= 256 && threadIdx.x < 128) smem2[threadIdx.x] = fmax(smem2[threadIdx.x], smem2[threadIdx.x + 128]); __syncthreads(); if (block_size >= 128 && threadIdx.x < 64) smem2[threadIdx.x] = fmax(smem2[threadIdx.x], smem2[threadIdx.x + 64]); __syncthreads(); if (threadIdx.x < 32) { volatile double *tmp = smem2; tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 32]); tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 16]); tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 8]); tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 4]); tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 2]); tmp[threadIdx.x] = fmax(tmp[threadIdx.x], tmp[threadIdx.x + 1]); } if (threadIdx.x == 0) atomicMax(&result2, smem2[0]); } void ispis_matrice (double *a, int x, int y) { int i, j; for(i=0; i<x; ++i) { for(j=0; j<y; ++j) printf("%lg ", a[j*x+i]); printf("\n"); } printf("\n"); } void izracunaj_normalno(double *a, int n) { double max=-1; for(int i=0; i<n; i++) for(int j=0; j<n; j++) if(i!=j && a[i+n*j] + a[i*n+i] > max) max = a[i+n*j] + a[i*n+i]; printf("Normalno racunanje daje rezultat: %lg.\n", max); } int jednake(double *a, double *b, int n) { for(int i=0; i<n; i++) if(a[i] != b[i]) { printf("Mjesto greske je %d, a brojevi su: %lg i %lg\n", i, a[i], b[i]); return 0; } return 1; } __global__ void zbrajanje (double *a, int N) { //__shared__ double dijagonala[N]; //gridDim.x je broj blokova u redku //gridDim.y je broj blokova u stupcu int blockId = blockIdx.x + blockIdx.y * gridDim.x; int index = blockId * blockDim.x + threadIdx.x; int broj_redka = threadIdx.x+ blockIdx.x * blockDim.x; ///Inicijalizacija shared memorije s elementima s dijagonale /*if(index < N) dijagonala[N] = a[index*N + index]; __syncthreads();*/ //gledamo samo elemente matrice do N (jer je matrica prosirena s nulama) i razlicite od dijagonale //te elemente uvecamo za vrijednost na dijagonali if(broj_redka < N && blockIdx.y != broj_redka) { //printf("Indeks je %d, indeks dijagonale %d, a=%lg, threadIdx.x=%d, dijagonala=%lg\n", index, blockIdx.y, a[index], threadIdx.x, dijagonala[blockIdx.y]); a[index] = fabs(a[index]) + fabs(dijagonala[blockIdx.y]); } __syncthreads(); } int nadi_odgovarajuci_broj_dretvi(int N) { // if(N <= 8) //return 8; if(N <= 64) return 64; else if(N <= 128) return 128; else if(N <= 256) return 256; else if(N <= 512) return 512; else if(N <= 768) return 256; else if(N <= 1024) return 1024; else return 256; } void prosiri_matricu(double *a, double *b, int m, int n) { for(int i=0; i<m; i++) for(int j=0; j<n; j++) { if(j<m) b[i*n+j] = a[i*m+j]; else b[i*n+j] = 0; } } int main(int argc, char **argv) { dim3 gridDim, blockDim; int N, size, broj_blokova_po_redku, broj_dretvi_po_bloku, prosireni_size; double *hst_A, *hst_A_prosirena, *hst_dijagonala; double *dev_A, norma, norma2; FILE *fA; double gpu_prebacivanje_podataka = 0.0, gpu_zbrajanje = 0.0, gpu_redukcija_jedan_blok = 0.0, gpu_redukcija_8_blokova = 0.0, gpu_redukcija_jedan_stupac = 0.0, cpu = 0.0; if(argc != 3) { fprintf(stderr, "Za pokretanje %s program ocekuje 2 inta za dimenzije matrica, te 3 stringa koji oznacavaju imena datoteka iz kojih citamo, pisemo\n", argv[0]); return 0; } N = atoi(argv[1]); size = N*N; if(N > 8192) { fprintf(stderr, "Imamo vise od 8192 redka u matrici, pa nije moguce sve te podatke spremiti u konstantnu memoriju (konst mem 64kB).\n"); return 0; } ///Cilj je sto bolje optimizirati broj dretvi u odnosu na red matrice, tj. iskoristiti odgovarajuc broj blokova za redak matrice tako da /// imamo sta manji visak dretvi koje "ne rade nista". broj_dretvi_po_bloku = nadi_odgovarajuci_broj_dretvi(N); broj_blokova_po_redku = 1; while(broj_blokova_po_redku * broj_dretvi_po_bloku < N) broj_blokova_po_redku++; prosireni_size = broj_dretvi_po_bloku*broj_blokova_po_redku*N; printf("Broj dretvi po bloku je %d\n", broj_dretvi_po_bloku); printf("Broj blokova po redku je %d\n", broj_blokova_po_redku); gridDim = dim3(broj_blokova_po_redku, N, 1); blockDim = dim3(broj_dretvi_po_bloku, 1, 1); open_file(fA, argv[2], "r"); // alokacija memorije na hostu host_alloc(hst_A, double, size); host_alloc(hst_A_prosirena, double, prosireni_size); host_alloc(hst_dijagonala, double, N); // alokacija memorije na deviceu cuda_exec(cudaMalloc(&dev_A, prosireni_size * sizeof(double))); // citanje podataka iz binarne datoteke read_file(hst_A, sizeof(double), size, fA); cpu -= timer(); izracunaj_normalno(hst_A, N); cpu += timer(); for(int i=0; i<N; i++) { hst_dijagonala[i] = (hst_A[i*N+i]); //printf("i=%d, a[i]=%lg dijag[i]=%lg\n", i, hst_A[i*N+i], hst_dijagonala[i]); } prosiri_matricu(hst_A, hst_A_prosirena, N, broj_dretvi_po_bloku*broj_blokova_po_redku); gpu_prebacivanje_podataka -= timer(); // kopiranje podataka na device cuda_exec(cudaMemcpy(dev_A, hst_A_prosirena, prosireni_size * sizeof(double), cudaMemcpyHostToDevice)); cuda_exec(cudaMemcpyToSymbol(dijagonala, hst_dijagonala, N*sizeof(double))); gpu_prebacivanje_podataka += timer(); gpu_zbrajanje -= timer(); zbrajanje<<<gridDim, blockDim>>>(dev_A, N); cuda_exec(cudaDeviceSynchronize()); gpu_zbrajanje -= timer(); gpu_redukcija_jedan_blok -= timer(); switch (broj_dretvi_po_bloku) { case 1024: reduction_jedan_blok<1024><<<gridDim, blockDim>>>(dev_A, prosireni_size); break; case 512: reduction_jedan_blok< 512><<<gridDim, blockDim>>>(dev_A, prosireni_size); break; case 256: reduction_jedan_blok< 256><<<gridDim, blockDim>>>(dev_A, prosireni_size); break; case 128: reduction_jedan_blok< 128><<<gridDim, blockDim>>>(dev_A, prosireni_size); break; case 64: reduction_jedan_blok< 64><<<gridDim, blockDim>>>(dev_A, prosireni_size); break; case 32: reduction_jedan_blok< 32><<<gridDim, blockDim>>>(dev_A, prosireni_size); break; } gpu_redukcija_jedan_blok += timer(); /*gpu_redukcija_8_blokova -= timer(); switch (broj_dretvi_po_bloku) { case 1024: reduction_8_blokova<1024><<<, blockDim>>>(dev_A, prosireni_size); break; case 512: reduction_8_blokova< 512><<<, blockDim>>>(dev_A, prosireni_size); break; case 256: reduction_8_blokova< 256><<<, blockDim>>>(dev_A, prosireni_size); break; case 128: reduction_8_blokova< 128><<<, blockDim>>>(dev_A, prosireni_size); break; case 64: reduction_8_blokova< 64><<<, blockDim>>>(dev_A, prosireni_size); break; case 32: reduction_8_blokova< 32><<<, blockDim>>>(dev_A, prosireni_size); break; } gpu_redukcija_8_blokova += timer();*/ /*gpu_redukcija_jedan_stupac -= timer(); switch (broj_dretvi_po_bloku) { case 1024: redukcija_jedan_stupac<1024><<<gridDim.y, blockDim>>>(dev_A, prosireni_size); break; case 512: redukcija_jedan_stupac< 512><<<gridDim.y, blockDim>>>(dev_A, prosireni_size); break; case 256: redukcija_jedan_stupac< 256><<<gridDim.y, blockDim>>>(dev_A, prosireni_size); break; case 128: redukcija_jedan_stupac< 128><<<gridDim.y, blockDim>>>(dev_A, prosireni_size); break; case 64: redukcija_jedan_stupac< 64><<<gridDim.y, blockDim>>>(dev_A, prosireni_size); break; case 32: redukcija_jedan_stupac< 32><<<gridDim.y, blockDim>>>(dev_A, prosireni_size); break; } gpu_redukcija_jedan_stupac += timer();*/ gpu_prebacivanje_podataka -= timer(); //kopiranje podatak na host cuda_exec(cudaMemcpy(hst_A_prosirena, dev_A, prosireni_size * sizeof(double), cudaMemcpyDeviceToHost)); cuda_exec(cudaMemcpyFromSymbol(&norma2, result2, sizeof(double))); gpu_prebacivanje_podataka += timer(); //to treba izbrisati //ispis_matrice(hst_A_prosirena, broj_dretvi_po_bloku*broj_blokova_po_redku, N); //ispis_matrice(hst_A, N, N); printf("GPU zbrajanje time: %#.3lgs\n", gpu_zbrajanje); printf("GPU redukcija jedan blok: %#.3lgs\n", gpu_redukcija_jedan_blok); //printf("GPU redukcija 8 blokova: %#.3lgs\n", gpu_redukcija_8_blokova); //printf("GPU redukcija jedan stupac: %#.3lgs\n", gpu_redukcija_jedan_stupac); printf("CPU execution time: %#.3lgs\n", cpu); printf("Norma pomocu GPU je: %lg\n", norma2); close_file(fA);printf("Nesto\n"); host_free(hst_A);printf("Nesto\n"); host_free(hst_A_prosirena); cuda_exec(cudaFree(dev_A));printf("Nesto\n"); return 0; }
abacdf1932d1cbde5f627e03f87e5b34728f5950.hip
// !!! This is a file automatically generated by hipify!!! /***stru************************************************************************ * * Copyright (C) 2012 by Ben Barsdell and Andrew Jameson * Licensed under the Academic Free License version 2.1 * ***************************************************************************/ #include <vector> #include <memory> #include <iostream> using std::cout; using std::cerr; using std::endl; #include <sstream> #include <iomanip> #include <string> #include <fstream> #include <sys/stat.h> //#include <utils/cmdline.hpp> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/gather.h> using thrust::host_vector; using thrust::device_vector; #include <thrust/version.h> #include <thrust/copy.h> #include <thrust/reduce.h> #include <thrust/iterator/constant_iterator.h> #include <pipeline/pipeline.hpp> #include <pipeline/maths.hpp> #include <pipeline/clean_filterbank_rfi.hpp> #include <pipeline/remove_baseline.hpp> #include <pipeline/matched_filter.hpp> #include <pipeline/get_rms.hpp> #include <pipeline/find_giants.hpp> #include <pipeline/label_candidate_clusters.hpp> #include <pipeline/merge_candidates.hpp> #include <data_types/data_source.hpp> #include <network/client_socket.hpp> #include <network/socket_exception.hpp> #include <utils/stopwatch.hpp> // For benchmarking #include <utils/exceptions.hpp> //#include "write_time_series.h" // For debugging #include <dedisp.h> #define HD_BENCHMARK #ifdef HD_BENCHMARK void start_timer(Stopwatch& timer) { timer.start(); } void stop_timer(Stopwatch& timer) { hipDeviceSynchronize(); timer.stop(); } #else void start_timer(Stopwatch& timer) { } void stop_timer(Stopwatch& timer) { } #endif // HD_BENCHMARK #include <utility> // For std::pair template<typename T, typename U> std::pair<T&,U&> tie(T& a, U& b) { return std::pair<T&,U&>(a,b); } struct hd_pipeline_t { hd_params params; dedisp_plan dedispersion_plan; //MPI_Comm communicator; // Memory buffers used during pipeline execution std::vector<hd_byte> h_clean_filterbank; host_vector<hd_byte> h_dm_series; device_vector<hd_float> d_time_series; device_vector<hd_float> d_filtered_series; // void set_dedispersion_plan(dedisp_plan &original_plan) { dedispersion_plan = original_plan }; }; hd_error allocate_gpu(const hd_pipeline pl) { // TODO: This is just a simple proc-->GPU heuristic to get us started int gpu_count; hipGetDeviceCount(&gpu_count); //int proc_idx; //MPI_Comm comm = pl->communicator; //MPI_Comm_rank(comm, &proc_idx); int proc_idx = pl->params.beam; int gpu_idx = pl->params.gpu_id; cout << "Selected GPU ID: " << gpu_idx << endl; cout << "Proc idx: " << proc_idx; hipError_t cerror = hipSetDevice(gpu_idx); if( cerror != hipSuccess ) { cerr << "Could not setCudaDevice to " << gpu_idx << ": " << hipGetErrorString(cerror) << endl; return throw_cuda_error(cerror); } if( pl->params.verbosity >= 1 ) { cout << "Process " << proc_idx << " using GPU " << gpu_idx << endl; } if( !pl->params.yield_cpu ) { if( pl->params.verbosity >= 2 ) { cout << "\tProcess " << proc_idx << " setting CPU to spin" << endl; } cout << "Setting device flags..." << endl; cerror = hipSetDeviceFlags(hipDeviceScheduleSpin); if( cerror != hipSuccess ) { cout << "Such fail" << endl; cout << "The error: " << hipGetErrorString(cerror); return throw_cuda_error(cerror); } } else { if( pl->params.verbosity >= 2 ) { cout << "\tProcess " << proc_idx << " setting CPU to yield" << endl; } // Note: This Yield flag doesn't seem to work properly. // The BlockingSync flag does the job, although it may interfere // with GPU/CPU overlapping (not currently used). //cerror = hipSetDeviceFlags(hipDeviceScheduleYield); cout << "Setting another device flag" << endl; cerror = hipSetDeviceFlags(hipDeviceScheduleBlockingSync); if( cerror != hipSuccess ) { return throw_cuda_error(cerror); } } cout << "Did everything OK" << endl; return HD_NO_ERROR; } unsigned int get_filter_index(unsigned int filter_width) { // This function finds log2 of the 32-bit power-of-two number v unsigned int v = filter_width; static const unsigned int b[] = {0xAAAAAAAA, 0xCCCCCCCC, 0xF0F0F0F0, 0xFF00FF01, 0xFFFF0000}; register unsigned int r = (v & b[0]) != 0; for( int i=4; i>0; --i) { r |= ((v & b[i]) != 0) << i; } return r; } hd_error hd_create_pipeline(hd_pipeline* pipeline_, dedisp_plan original_plan, hd_params params) //CmdLineOptions& args, Filterbank& filterbank_obj) { *pipeline_ = 0; // Note: We use a smart pointer here to automatically clean up after errors typedef std::auto_ptr<hd_pipeline_t> smart_pipeline_ptr; smart_pipeline_ptr pipeline = smart_pipeline_ptr(new hd_pipeline_t()); if( !pipeline.get() ) { return throw_error_heimdall(HD_MEM_ALLOC_FAILED); } // pipeline->params = params; pipeline->params = params; cout << "Verbosity level: " << pipeline->params.verbosity; if( params.verbosity >= 2 ) { cout << "\tAllocating GPU..." << endl; } hd_error error = allocate_gpu(pipeline.get()); if( error != HD_NO_ERROR ) { return throw_error_heimdall(error); } if( params.verbosity >= 3 ) { cout << "nchans = " << params.nchans << endl; cout << "dt = " << params.dt << endl; cout << "f0 = " << params.f0 << endl; cout << "df = " << params.df << endl; } if( params.verbosity >= 2 ) { cout << "\tSetting the dedispersion plan..." << endl; pipeline->dedispersion_plan = original_plan; } dedisp_error derror; if( pipeline->params.use_scrunching ) { derror = dedisp_enable_adaptive_dt(pipeline->dedispersion_plan, pipeline->params.dm_pulse_width, pipeline->params.scrunch_tol); if( derror != DEDISP_NO_ERROR ) { ErrorChecker::check_dedisp_error(derror,"enable_adaptive_dt"); //return throw_dedisp_error(derror); } } *pipeline_ = pipeline.release(); if( params.verbosity >= 2 ) { cout << "\tInitialisation complete." << endl; } if( params.verbosity >= 1 ) { cout << "Using Thrust v" << THRUST_MAJOR_VERSION << "." << THRUST_MINOR_VERSION << "." << THRUST_SUBMINOR_VERSION << endl; } return HD_NO_ERROR; } hd_error hd_execute(hd_pipeline pl, hd_size nsamps, hd_size nbits, hd_size first_idx, hd_size* nsamps_processed, unsigned char *timeseries_data, size_t original_nsamps, bool both_search) { // nbits is the number of bits per sample in the original data - 8 in case of GMRT hd_error error = HD_NO_ERROR; Stopwatch total_timer; Stopwatch memory_timer; Stopwatch communicate_timer; Stopwatch copy_timer; Stopwatch baseline_timer; Stopwatch normalise_timer; Stopwatch filter_timer; Stopwatch coinc_timer; Stopwatch giants_timer; Stopwatch candidates_timer; start_timer(total_timer); // Note: Filterbank cleaning must be done out-of-place hd_size nbytes = nsamps * pl->params.nchans * nbits / 8; start_timer(memory_timer); pl->h_clean_filterbank.resize(nbytes); std::vector<int> h_killmask(pl->params.nchans, 1); stop_timer(memory_timer); hd_size dm_count = dedisp_get_dm_count(pl->dedispersion_plan); const float* dm_list = dedisp_get_dm_list(pl->dedispersion_plan); const dedisp_size* scrunch_factors = dedisp_get_dt_factors(pl->dedispersion_plan); // dedisp_set_killmask(pl->dedispersion_plan, &h_killmask[0]); // Set channel killmask for dedispersion // remove as data has already been dedispersed //dedisp_set_killmask(pl->dedispersion_plan, &h_killmask[0]); hd_size nsamps_computed = nsamps; // - dedisp_get_max_delay(pl->dedispersion_plan); !! not dedispersing data anymore hd_size series_stride = nsamps_computed; // Report the number of samples that will be properly processed *nsamps_processed = nsamps_computed - pl->params.boxcar_max; hd_size beam = pl->params.beam; if( pl->params.verbosity >= 2 ) cout << "\tAllocating memory for pipeline computations..." << endl; start_timer(memory_timer); // dm_nbits is the number of bits per output dedispersed sample pl->h_dm_series.resize(series_stride * pl->params.dm_nbits/8 * dm_count); pl->d_time_series.resize(series_stride); pl->d_filtered_series.resize(series_stride, 0); stop_timer(memory_timer); if ( pl->params.verbosity >=2 ) cout << "\tMemory allocated successfully" << endl; RemoveBaselinePlan baseline_remover; GetRMSPlan rms_getter; MatchedFilterPlan<hd_float> matched_filter_plan; GiantFinder giant_finder; thrust::device_vector<hd_float> d_giant_peaks; thrust::device_vector<hd_size> d_giant_inds; thrust::device_vector<hd_size> d_giant_begins; thrust::device_vector<hd_size> d_giant_ends; thrust::device_vector<hd_size> d_giant_filter_inds; thrust::device_vector<hd_size> d_giant_dm_inds; thrust::device_vector<hd_size> d_giant_members; typedef thrust::device_ptr<hd_float> dev_float_ptr; typedef thrust::device_ptr<hd_size> dev_size_ptr; // TESTING hd_size write_dm = 0; bool too_many_giants = false; // For each DM for( hd_size dm_idx=0; dm_idx<dm_count; ++dm_idx ) { hd_size cur_dm_scrunch = scrunch_factors[dm_idx]; hd_size cur_nsamps = nsamps_computed / cur_dm_scrunch; hd_float cur_dt = pl->params.dt * cur_dm_scrunch; // Bail if the candidate rate is too high if( too_many_giants ) { break; } if( pl->params.verbosity >= 4 ) { cout << "dm_idx = " << dm_idx << endl; cout << "scrunch = " << scrunch_factors[dm_idx] << endl; cout << "cur_nsamps = " << cur_nsamps << endl; cout << "dt0 = " << pl->params.dt << endl; cout << "cur_dt = " << cur_dt << endl; cout << "\tBaselining and normalising each beam..." << endl; } // Copy the time series to the device and convert to floats //hd_size offset = dm_idx * series_stride + first_idx; hd_size offset = dm_idx * original_nsamps + first_idx; start_timer(copy_timer); // for 8-bit dedispersed output thrust::device_vector<float> d_time_series((unsigned char*)timeseries_data + offset, (unsigned char*)timeseries_data + offset + cur_nsamps); // for 16-bit dedispersed output //thrust::device_vector<float> d_time_series((unsigned short*)timeseries_data // + offset, (unsigned short*)timeseries_data + offset + cur_nsamps); /* cout << (int)timeseries_data[0] << " " << (int)timeseries_data[1] << " " << (int)timeseries_data[2] << " " << (int)timeseries_data[3] << " " << d_time_series[0] << " " << d_time_series[1] << endl; cin.get(); */ hd_float *time_series = thrust::raw_pointer_cast(&d_time_series[0]); // PRINT OUT TIMESERIES DATA FOR DM OF INTEREST /* if (dm_idx == 1) { std::ofstream times_data ("16_bits_dedispersed_chunk.dat", std::ofstream::out | std::ofstream::trunc); for ( size_t sample = 0; sample < (series_stride / 2); sample ++) times_data << sample << " " << d_time_series[sample] << endl; cout << "Printed timeseries data" << endl; times_data.close(); std::cin.get(); } */ /* switch( pl->params.dm_nbits ) { case 8: thrust::copy((unsigned char*)timeseries_data + offset, (unsigned char*)timeseries_data + offset + cur_nsamps, pl->d_time_series.begin()); break; case 8: thrust::copy(h_dm_series_original.begin(), h_dm_series_original.end(), pl->d_time_series.begin()); break; case 8: thrust::copy((unsigned char*)&pl->h_dm_series[offset], (unsigned char*)&pl->h_dm_series[offset] + cur_nsamps, pl->d_time_series.begin()); break; case 16: thrust::copy((unsigned short*)&pl->h_dm_series[offset], (unsigned short*)&pl->h_dm_series[offset] + cur_nsamps, pl->d_time_series.begin()); break; case 32: // Note: 32-bit implies float, not unsigned int thrust::copy((float*)&pl->h_dm_series[offset], (float*)&pl->h_dm_series[offset] + cur_nsamps, pl->d_time_series.begin()); break; default: return HD_INVALID_NBITS; } */ stop_timer(copy_timer); // Remove the baseline // ------------------- // Note: Divided by 2 to form a smoothing radius hd_size nsamps_smooth = hd_size(pl->params.baseline_length / (2 * cur_dt)); // Crop the smoothing length in case not enough samples start_timer(baseline_timer); error = baseline_remover.exec(time_series, cur_nsamps, nsamps_smooth); stop_timer(baseline_timer); if( error != HD_NO_ERROR ) { return throw_error_heimdall(error); } if( beam == 0 && dm_idx == write_dm && first_idx == 0 ) { // TESTING //write_device_time_series(time_series, cur_nsamps, // cur_dt, "baselined.tim"); } // ------------------- // Normalise // --------- start_timer(normalise_timer); hd_float rms = rms_getter.exec(time_series, cur_nsamps); //cout << "RMS = " << rms << endl; // devides the data by RMS thrust::transform(d_time_series.begin(), d_time_series.end(), thrust::make_constant_iterator(hd_float(1.0)/rms), d_time_series.begin(), thrust::multiplies<hd_float>()); /* thrust::transform(pl->d_time_series.begin(), pl->d_time_series.end(), thrust::make_constant_iterator(hd_float(1.0)/rms), pl->d_time_series.begin(), thrust::multiplies<hd_float>()); */ stop_timer(normalise_timer); if( beam == 0 && dm_idx == write_dm && first_idx == 0 ) { // TESTING //write_device_time_series(time_series, cur_nsamps, // cur_dt, "normalised.tim"); } // --------- // Prepare the boxcar filters // -------------------------- // We can't process the first and last max-filter-width/2 samples hd_size rel_boxcar_max = pl->params.boxcar_max/cur_dm_scrunch; hd_size max_nsamps_filtered = cur_nsamps + 1 - rel_boxcar_max; // This is the relative offset into the time series of the filtered data hd_size cur_filtered_offset = rel_boxcar_max / 2; // Create and prepare matched filtering operations start_timer(filter_timer); // Note: Filter width is relative to the current time resolution // this stage is longr than in the original heimdall as we // are nto using scrunching matched_filter_plan.prep(time_series, cur_nsamps, rel_boxcar_max); stop_timer(filter_timer); // -------------------------- hd_float* filtered_series = thrust::raw_pointer_cast(&pl->d_filtered_series[0]); // Note: Filtering is done using a combination of tscrunching and // 'proper' boxcar convolution. The parameter min_tscrunch_width // indicates how much of each to do. Raising min_tscrunch_width // increases sensitivity but decreases performance and vice // versa. // For each boxcar filter // Note: We cannot detect pulse widths < current time resolution for( hd_size filter_width=cur_dm_scrunch; filter_width<=pl->params.boxcar_max; filter_width*=2 ) { hd_size rel_filter_width = filter_width / cur_dm_scrunch; hd_size filter_idx = get_filter_index(filter_width); if( pl->params.verbosity >= 4 ) { cout << "Filtering each beam at width of " << filter_width << endl; } // Note: Filter width is relative to the current time resolution hd_size rel_min_tscrunch_width = ::max(pl->params.min_tscrunch_width / cur_dm_scrunch, hd_size(1)); hd_size rel_tscrunch_width = ::max(2 * rel_filter_width / rel_min_tscrunch_width, hd_size(1)); // Filter width relative to cur_dm_scrunch AND tscrunch hd_size rel_rel_filter_width = rel_filter_width / rel_tscrunch_width; start_timer(filter_timer); error = matched_filter_plan.exec(filtered_series, rel_filter_width, rel_tscrunch_width); if( error != HD_NO_ERROR ) { return throw_error_heimdall(error); } // Divide and round up hd_size cur_nsamps_filtered = ((max_nsamps_filtered-1) / rel_tscrunch_width + 1); hd_size cur_scrunch = cur_dm_scrunch * rel_tscrunch_width; // Normalise the filtered time series (RMS ~ sqrt(time)) // TODO: Avoid/hide the ugly thrust code? // Consider making it a method of MatchedFilterPlan /* thrust::constant_iterator<hd_float> norm_val_iter(1.0 / sqrt((hd_float)rel_filter_width)); thrust::transform(thrust::device_ptr<hd_float>(filtered_series), thrust::device_ptr<hd_float>(filtered_series) + cur_nsamps_filtered, norm_val_iter, thrust::device_ptr<hd_float>(filtered_series), thrust::multiplies<hd_float>()); */ // TESTING Proper normalisation hd_float rms = rms_getter.exec(filtered_series, cur_nsamps_filtered); thrust::transform(thrust::device_ptr<hd_float>(filtered_series), thrust::device_ptr<hd_float>(filtered_series) + cur_nsamps_filtered, thrust::make_constant_iterator(hd_float(1.0)/rms), thrust::device_ptr<hd_float>(filtered_series), thrust::multiplies<hd_float>()); stop_timer(filter_timer); // WRITE OUT THE NORMALISED TIMESERIES /*if ( dm_idx == 1 && filter_width == 1 && first_idx > 6000000) { std::ofstream norm_data ("norm_data_dm_00197462_samp_6025216.dat", std::ofstream::out | std::ofstream::trunc); for (size_t sample = 0; sample < series_stride; sample++) norm_data << sample << " " << pl->d_filtered_series[sample] << endl; norm_data.close(); cout << "Normalised data saved...\n"; std::cin.get(); }*/ if( beam == 0 && dm_idx == write_dm && first_idx == 0 && filter_width == 8 ) { // TESTING //write_device_time_series(filtered_series, // cur_nsamps_filtered, // cur_dt, "filtered.tim"); } hd_size prev_giant_count = d_giant_peaks.size(); if( pl->params.verbosity >= 4 ) { cout << "Finding giants..." << endl; } start_timer(giants_timer); error = giant_finder.exec(filtered_series, cur_nsamps_filtered, pl->params.detect_thresh, //pl->params.cand_sep_time, // Note: This was MB's recommendation pl->params.cand_sep_time * rel_rel_filter_width, d_giant_peaks, d_giant_inds, d_giant_begins, d_giant_ends); if( error != HD_NO_ERROR ) { return throw_error_heimdall(error); } hd_size rel_cur_filtered_offset = (cur_filtered_offset / rel_tscrunch_width); using namespace thrust::placeholders; thrust::transform(d_giant_inds.begin()+prev_giant_count, d_giant_inds.end(), d_giant_inds.begin()+prev_giant_count, /*first_idx +*/ (_1+rel_cur_filtered_offset)*cur_scrunch); thrust::transform(d_giant_begins.begin()+prev_giant_count, d_giant_begins.end(), d_giant_begins.begin()+prev_giant_count, /*first_idx +*/ (_1+rel_cur_filtered_offset)*cur_scrunch); thrust::transform(d_giant_ends.begin()+prev_giant_count, d_giant_ends.end(), d_giant_ends.begin()+prev_giant_count, /*first_idx +*/ (_1+rel_cur_filtered_offset)*cur_scrunch); d_giant_filter_inds.resize(d_giant_peaks.size(), filter_idx); d_giant_dm_inds.resize(d_giant_peaks.size(), dm_idx); // Note: This could be used to track total member samples if desired d_giant_members.resize(d_giant_peaks.size(), 1); stop_timer(giants_timer); // Bail if the candidate rate is too high hd_size total_giant_count = d_giant_peaks.size(); hd_float data_length_mins = nsamps * pl->params.dt / 60.0; if ( pl->params.max_giant_rate && ( total_giant_count / data_length_mins > pl->params.max_giant_rate ) ) { too_many_giants = true; float searched = ((float) dm_idx * 100) / (float) dm_count; cout << "WARNING: exceeded max giants/min, DM [" << dm_list[dm_idx] << "] space searched " << searched << "%" << endl; cout << "Will stop with processing this chunk " << endl; break; } } // End of filter width loop } // End of DM loop hd_size giant_count = d_giant_peaks.size(); if( pl->params.verbosity >= 2 ) { cout << "Giant count = " << giant_count << endl; // total number of giants detected over all DMs } start_timer(candidates_timer); thrust::host_vector<hd_float> h_group_peaks; thrust::host_vector<hd_size> h_group_inds; thrust::host_vector<hd_size> h_group_begins; thrust::host_vector<hd_size> h_group_ends; thrust::host_vector<hd_size> h_group_filter_inds; thrust::host_vector<hd_size> h_group_dm_inds; thrust::host_vector<hd_size> h_group_members; thrust::host_vector<hd_float> h_group_dms; if (!too_many_giants) { thrust::device_vector<hd_size> d_giant_labels(giant_count); hd_size* d_giant_labels_ptr = thrust::raw_pointer_cast(&d_giant_labels[0]); RawCandidates d_giants; d_giants.peaks = thrust::raw_pointer_cast(&d_giant_peaks[0]); d_giants.inds = thrust::raw_pointer_cast(&d_giant_inds[0]); d_giants.begins = thrust::raw_pointer_cast(&d_giant_begins[0]); d_giants.ends = thrust::raw_pointer_cast(&d_giant_ends[0]); d_giants.filter_inds = thrust::raw_pointer_cast(&d_giant_filter_inds[0]); d_giants.dm_inds = thrust::raw_pointer_cast(&d_giant_dm_inds[0]); d_giants.members = thrust::raw_pointer_cast(&d_giant_members[0]); hd_size filter_count = get_filter_index(pl->params.boxcar_max) + 1; if( pl->params.verbosity >= 2 ) { cout << "Grouping coincident candidates..." << endl; } hd_size label_count; error = label_candidate_clusters(giant_count, *(ConstRawCandidates*)&d_giants, pl->params.cand_sep_time, pl->params.cand_sep_filter, pl->params.cand_sep_dm, d_giant_labels_ptr, &label_count); if( error != HD_NO_ERROR ) { return throw_error_heimdall(error); } hd_size group_count = label_count; if( pl->params.verbosity >= 2 ) { cout << "Candidate count = " << group_count << endl; } thrust::device_vector<hd_float> d_group_peaks(group_count); thrust::device_vector<hd_size> d_group_inds(group_count); thrust::device_vector<hd_size> d_group_begins(group_count); thrust::device_vector<hd_size> d_group_ends(group_count); thrust::device_vector<hd_size> d_group_filter_inds(group_count); thrust::device_vector<hd_size> d_group_dm_inds(group_count); thrust::device_vector<hd_size> d_group_members(group_count); thrust::device_vector<hd_float> d_group_dms(group_count); RawCandidates d_groups; d_groups.peaks = thrust::raw_pointer_cast(&d_group_peaks[0]); d_groups.inds = thrust::raw_pointer_cast(&d_group_inds[0]); d_groups.begins = thrust::raw_pointer_cast(&d_group_begins[0]); d_groups.ends = thrust::raw_pointer_cast(&d_group_ends[0]); d_groups.filter_inds = thrust::raw_pointer_cast(&d_group_filter_inds[0]); d_groups.dm_inds = thrust::raw_pointer_cast(&d_group_dm_inds[0]); d_groups.members = thrust::raw_pointer_cast(&d_group_members[0]); merge_candidates(giant_count, d_giant_labels_ptr, *(ConstRawCandidates*)&d_giants, d_groups); // Look up the actual DM of each group thrust::device_vector<hd_float> d_dm_list(dm_list, dm_list+dm_count); thrust::gather(d_group_dm_inds.begin(), d_group_dm_inds.end(), d_dm_list.begin(), d_group_dms.begin()); // Device to host transfer of candidates h_group_peaks = d_group_peaks; h_group_inds = d_group_inds; h_group_begins = d_group_begins; h_group_ends = d_group_ends; h_group_filter_inds = d_group_filter_inds; h_group_dm_inds = d_group_dm_inds; h_group_members = d_group_members; h_group_dms = d_group_dms; //h_group_flags = d_group_flags; } if( h_group_peaks.size() > 0 ) { if( pl->params.verbosity >= 2 ) { cout << "Writing output candidates, utc_start=" << pl->params.utc_start << endl; } char buffer[64]; time_t now = pl->params.utc_start + (time_t) (first_idx / pl->params.spectra_per_second); strftime (buffer, 64, HD_TIMESTR, (struct tm*) gmtime(&now)); std::stringstream ss; ss << std::setw(2) << std::setfill('0') << (pl->params.beam)%13+1; std::ostringstream oss; if ( pl->params.coincidencer_host != NULL && pl->params.coincidencer_port != -1 ) { try { ClientSocket client_socket ( pl->params.coincidencer_host, pl->params.coincidencer_port ); strftime (buffer, 64, HD_TIMESTR, (struct tm*) gmtime(&(pl->params.utc_start))); oss << buffer << " "; time_t now = pl->params.utc_start + (time_t) (first_idx / pl->params.spectra_per_second); strftime (buffer, 64, HD_TIMESTR, (struct tm*) gmtime(&now)); oss << buffer << " "; oss << first_idx << " "; oss << ss.str() << " "; oss << h_group_peaks.size() << endl; client_socket << oss.str(); oss.flush(); oss.str(""); for (hd_size i=0; i<h_group_peaks.size(); ++i ) { hd_size samp_idx = first_idx + h_group_inds[i]; oss << h_group_peaks[i] << "\t" << samp_idx << "\t" << samp_idx * pl->params.dt << "\t" << h_group_filter_inds[i] << "\t" << h_group_dm_inds[i] << "\t" << h_group_dms[i] << "\t" << h_group_members[i] << "\t" << first_idx + h_group_begins[i] << "\t" << first_idx + h_group_ends[i] << endl; client_socket << oss.str(); oss.flush(); oss.str(""); } // client_socket should close when it goes out of scope... } catch (SocketException& e ) { std::cerr << "SocketException was caught:" << e.description() << "\n"; } } if( pl->params.verbosity >= 2 ) cout << "Output timestamp: " << buffer << endl; if (!both_search) mkdir(pl->params.output_dir.c_str(), 0777); std::string filename = std::string(pl->params.output_dir) + "/" + std::string(buffer) + "_" + ss.str() + ".cand"; if( pl->params.verbosity >= 2 ) cout << "Output filename: " << filename << endl; std::ofstream cand_file(filename.c_str(), std::ios::out); if( pl->params.verbosity >= 2 ) cout << "Dumping " << h_group_peaks.size() << " candidates to " << filename << endl; if (cand_file.good()) { /*cand_file << "S/N\t" << "peak sample\t" << "peak time\t" << "filter idx\t" << "DM idx\t" << "DM\t" << "members no.\t" << "begin sample\t" << "end sample\n"; */ for( hd_size i=0; i<h_group_peaks.size(); ++i ) { hd_size samp_idx = first_idx + h_group_inds[i]; cand_file << h_group_peaks[i] << "\t" << samp_idx << "\t" << samp_idx * pl->params.dt << "\t" << h_group_filter_inds[i] << "\t" << h_group_dm_inds[i] << "\t" << h_group_dms[i] << "\t" //<< h_group_flags[i] << "\t" << h_group_members[i] << "\t" // HACK %13 //<< (beam+pl->params.beam)%13+1 << "\t" << first_idx + h_group_begins[i] << "\t" << first_idx + h_group_ends[i] << "\t" << pl->params.beam_count << "\t" // number of beams << 0 << "\t" // beam mask (whatever that is) << 1 << "\t" // primary beam << "20" << "\t" // max_snr << 1 << "\t" // beam << "\n"; } } else cout << "Skipping dump due to bad file open on " << filename << endl; cand_file.close(); } else { if( pl->params.verbosity >= 2 ) cout << "No candidated deteced. Will not create a file..." << endl; } stop_timer(candidates_timer); stop_timer(total_timer); #ifdef HD_BENCHMARK if( pl->params.verbosity >= 1 ) { cout << "Mem alloc time: " << memory_timer.getTime() << endl; cout << "Copy time: " << copy_timer.getTime() << endl; cout << "Baselining time: " << baseline_timer.getTime() << endl; cout << "Normalisation time: " << normalise_timer.getTime() << endl; cout << "Filtering time: " << filter_timer.getTime() << endl; cout << "Find giants time: " << giants_timer.getTime() << endl; cout << "Process candidates time: " << candidates_timer.getTime() << endl; cout << "Total time: " << total_timer.getTime() << endl; } hd_float time_sum = (memory_timer.getTime() + copy_timer.getTime() + baseline_timer.getTime() + normalise_timer.getTime() + filter_timer.getTime() + giants_timer.getTime() + candidates_timer.getTime()); hd_float misc_time = total_timer.getTime() - time_sum; /* std::ofstream timing_file("timing.dat", std::ios::app); timing_file << total_timer.getTime() << "\t" << misc_time << "\t" << memory_timer.getTime() << "\t" << clean_timer.getTime() << "\t" << dedisp_timer.getTime() << "\t" << copy_timer.getTime() << "\t" << baseline_timer.getTime() << "\t" << normalise_timer.getTime() << "\t" << filter_timer.getTime() << "\t" << giants_timer.getTime() << "\t" << candidates_timer.getTime() << endl; timing_file.close(); */ #endif // HD_BENCHMARK if( too_many_giants ) { return HD_TOO_MANY_EVENTS; } else { return HD_NO_ERROR; } } void hd_destroy_pipeline(hd_pipeline pipeline) { if( pipeline->params.verbosity >= 2 ) { cout << "\tDeleting pipeline object..." << endl; } cout << "Destroying dedispersion plan" << endl; // dedisp_destroy_plan(pipeline->dedispersion_plan); cout << "Destroyed dedispersion plan" << endl; // Note: This assumes memory owned by pipeline cleans itself up if( pipeline ) { delete pipeline; } }
abacdf1932d1cbde5f627e03f87e5b34728f5950.cu
/***stru************************************************************************ * * Copyright (C) 2012 by Ben Barsdell and Andrew Jameson * Licensed under the Academic Free License version 2.1 * ***************************************************************************/ #include <vector> #include <memory> #include <iostream> using std::cout; using std::cerr; using std::endl; #include <sstream> #include <iomanip> #include <string> #include <fstream> #include <sys/stat.h> //#include <utils/cmdline.hpp> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/gather.h> using thrust::host_vector; using thrust::device_vector; #include <thrust/version.h> #include <thrust/copy.h> #include <thrust/reduce.h> #include <thrust/iterator/constant_iterator.h> #include <pipeline/pipeline.hpp> #include <pipeline/maths.hpp> #include <pipeline/clean_filterbank_rfi.hpp> #include <pipeline/remove_baseline.hpp> #include <pipeline/matched_filter.hpp> #include <pipeline/get_rms.hpp> #include <pipeline/find_giants.hpp> #include <pipeline/label_candidate_clusters.hpp> #include <pipeline/merge_candidates.hpp> #include <data_types/data_source.hpp> #include <network/client_socket.hpp> #include <network/socket_exception.hpp> #include <utils/stopwatch.hpp> // For benchmarking #include <utils/exceptions.hpp> //#include "write_time_series.h" // For debugging #include <dedisp.h> #define HD_BENCHMARK #ifdef HD_BENCHMARK void start_timer(Stopwatch& timer) { timer.start(); } void stop_timer(Stopwatch& timer) { cudaThreadSynchronize(); timer.stop(); } #else void start_timer(Stopwatch& timer) { } void stop_timer(Stopwatch& timer) { } #endif // HD_BENCHMARK #include <utility> // For std::pair template<typename T, typename U> std::pair<T&,U&> tie(T& a, U& b) { return std::pair<T&,U&>(a,b); } struct hd_pipeline_t { hd_params params; dedisp_plan dedispersion_plan; //MPI_Comm communicator; // Memory buffers used during pipeline execution std::vector<hd_byte> h_clean_filterbank; host_vector<hd_byte> h_dm_series; device_vector<hd_float> d_time_series; device_vector<hd_float> d_filtered_series; // void set_dedispersion_plan(dedisp_plan &original_plan) { dedispersion_plan = original_plan }; }; hd_error allocate_gpu(const hd_pipeline pl) { // TODO: This is just a simple proc-->GPU heuristic to get us started int gpu_count; cudaGetDeviceCount(&gpu_count); //int proc_idx; //MPI_Comm comm = pl->communicator; //MPI_Comm_rank(comm, &proc_idx); int proc_idx = pl->params.beam; int gpu_idx = pl->params.gpu_id; cout << "Selected GPU ID: " << gpu_idx << endl; cout << "Proc idx: " << proc_idx; cudaError_t cerror = cudaSetDevice(gpu_idx); if( cerror != cudaSuccess ) { cerr << "Could not setCudaDevice to " << gpu_idx << ": " << cudaGetErrorString(cerror) << endl; return throw_cuda_error(cerror); } if( pl->params.verbosity >= 1 ) { cout << "Process " << proc_idx << " using GPU " << gpu_idx << endl; } if( !pl->params.yield_cpu ) { if( pl->params.verbosity >= 2 ) { cout << "\tProcess " << proc_idx << " setting CPU to spin" << endl; } cout << "Setting device flags..." << endl; cerror = cudaSetDeviceFlags(cudaDeviceScheduleSpin); if( cerror != cudaSuccess ) { cout << "Such fail" << endl; cout << "The error: " << cudaGetErrorString(cerror); return throw_cuda_error(cerror); } } else { if( pl->params.verbosity >= 2 ) { cout << "\tProcess " << proc_idx << " setting CPU to yield" << endl; } // Note: This Yield flag doesn't seem to work properly. // The BlockingSync flag does the job, although it may interfere // with GPU/CPU overlapping (not currently used). //cerror = cudaSetDeviceFlags(cudaDeviceScheduleYield); cout << "Setting another device flag" << endl; cerror = cudaSetDeviceFlags(cudaDeviceBlockingSync); if( cerror != cudaSuccess ) { return throw_cuda_error(cerror); } } cout << "Did everything OK" << endl; return HD_NO_ERROR; } unsigned int get_filter_index(unsigned int filter_width) { // This function finds log2 of the 32-bit power-of-two number v unsigned int v = filter_width; static const unsigned int b[] = {0xAAAAAAAA, 0xCCCCCCCC, 0xF0F0F0F0, 0xFF00FF01, 0xFFFF0000}; register unsigned int r = (v & b[0]) != 0; for( int i=4; i>0; --i) { r |= ((v & b[i]) != 0) << i; } return r; } hd_error hd_create_pipeline(hd_pipeline* pipeline_, dedisp_plan original_plan, hd_params params) //CmdLineOptions& args, Filterbank& filterbank_obj) { *pipeline_ = 0; // Note: We use a smart pointer here to automatically clean up after errors typedef std::auto_ptr<hd_pipeline_t> smart_pipeline_ptr; smart_pipeline_ptr pipeline = smart_pipeline_ptr(new hd_pipeline_t()); if( !pipeline.get() ) { return throw_error_heimdall(HD_MEM_ALLOC_FAILED); } // pipeline->params = params; pipeline->params = params; cout << "Verbosity level: " << pipeline->params.verbosity; if( params.verbosity >= 2 ) { cout << "\tAllocating GPU..." << endl; } hd_error error = allocate_gpu(pipeline.get()); if( error != HD_NO_ERROR ) { return throw_error_heimdall(error); } if( params.verbosity >= 3 ) { cout << "nchans = " << params.nchans << endl; cout << "dt = " << params.dt << endl; cout << "f0 = " << params.f0 << endl; cout << "df = " << params.df << endl; } if( params.verbosity >= 2 ) { cout << "\tSetting the dedispersion plan..." << endl; pipeline->dedispersion_plan = original_plan; } dedisp_error derror; if( pipeline->params.use_scrunching ) { derror = dedisp_enable_adaptive_dt(pipeline->dedispersion_plan, pipeline->params.dm_pulse_width, pipeline->params.scrunch_tol); if( derror != DEDISP_NO_ERROR ) { ErrorChecker::check_dedisp_error(derror,"enable_adaptive_dt"); //return throw_dedisp_error(derror); } } *pipeline_ = pipeline.release(); if( params.verbosity >= 2 ) { cout << "\tInitialisation complete." << endl; } if( params.verbosity >= 1 ) { cout << "Using Thrust v" << THRUST_MAJOR_VERSION << "." << THRUST_MINOR_VERSION << "." << THRUST_SUBMINOR_VERSION << endl; } return HD_NO_ERROR; } hd_error hd_execute(hd_pipeline pl, hd_size nsamps, hd_size nbits, hd_size first_idx, hd_size* nsamps_processed, unsigned char *timeseries_data, size_t original_nsamps, bool both_search) { // nbits is the number of bits per sample in the original data - 8 in case of GMRT hd_error error = HD_NO_ERROR; Stopwatch total_timer; Stopwatch memory_timer; Stopwatch communicate_timer; Stopwatch copy_timer; Stopwatch baseline_timer; Stopwatch normalise_timer; Stopwatch filter_timer; Stopwatch coinc_timer; Stopwatch giants_timer; Stopwatch candidates_timer; start_timer(total_timer); // Note: Filterbank cleaning must be done out-of-place hd_size nbytes = nsamps * pl->params.nchans * nbits / 8; start_timer(memory_timer); pl->h_clean_filterbank.resize(nbytes); std::vector<int> h_killmask(pl->params.nchans, 1); stop_timer(memory_timer); hd_size dm_count = dedisp_get_dm_count(pl->dedispersion_plan); const float* dm_list = dedisp_get_dm_list(pl->dedispersion_plan); const dedisp_size* scrunch_factors = dedisp_get_dt_factors(pl->dedispersion_plan); // dedisp_set_killmask(pl->dedispersion_plan, &h_killmask[0]); // Set channel killmask for dedispersion // remove as data has already been dedispersed //dedisp_set_killmask(pl->dedispersion_plan, &h_killmask[0]); hd_size nsamps_computed = nsamps; // - dedisp_get_max_delay(pl->dedispersion_plan); !! not dedispersing data anymore hd_size series_stride = nsamps_computed; // Report the number of samples that will be properly processed *nsamps_processed = nsamps_computed - pl->params.boxcar_max; hd_size beam = pl->params.beam; if( pl->params.verbosity >= 2 ) cout << "\tAllocating memory for pipeline computations..." << endl; start_timer(memory_timer); // dm_nbits is the number of bits per output dedispersed sample pl->h_dm_series.resize(series_stride * pl->params.dm_nbits/8 * dm_count); pl->d_time_series.resize(series_stride); pl->d_filtered_series.resize(series_stride, 0); stop_timer(memory_timer); if ( pl->params.verbosity >=2 ) cout << "\tMemory allocated successfully" << endl; RemoveBaselinePlan baseline_remover; GetRMSPlan rms_getter; MatchedFilterPlan<hd_float> matched_filter_plan; GiantFinder giant_finder; thrust::device_vector<hd_float> d_giant_peaks; thrust::device_vector<hd_size> d_giant_inds; thrust::device_vector<hd_size> d_giant_begins; thrust::device_vector<hd_size> d_giant_ends; thrust::device_vector<hd_size> d_giant_filter_inds; thrust::device_vector<hd_size> d_giant_dm_inds; thrust::device_vector<hd_size> d_giant_members; typedef thrust::device_ptr<hd_float> dev_float_ptr; typedef thrust::device_ptr<hd_size> dev_size_ptr; // TESTING hd_size write_dm = 0; bool too_many_giants = false; // For each DM for( hd_size dm_idx=0; dm_idx<dm_count; ++dm_idx ) { hd_size cur_dm_scrunch = scrunch_factors[dm_idx]; hd_size cur_nsamps = nsamps_computed / cur_dm_scrunch; hd_float cur_dt = pl->params.dt * cur_dm_scrunch; // Bail if the candidate rate is too high if( too_many_giants ) { break; } if( pl->params.verbosity >= 4 ) { cout << "dm_idx = " << dm_idx << endl; cout << "scrunch = " << scrunch_factors[dm_idx] << endl; cout << "cur_nsamps = " << cur_nsamps << endl; cout << "dt0 = " << pl->params.dt << endl; cout << "cur_dt = " << cur_dt << endl; cout << "\tBaselining and normalising each beam..." << endl; } // Copy the time series to the device and convert to floats //hd_size offset = dm_idx * series_stride + first_idx; hd_size offset = dm_idx * original_nsamps + first_idx; start_timer(copy_timer); // for 8-bit dedispersed output thrust::device_vector<float> d_time_series((unsigned char*)timeseries_data + offset, (unsigned char*)timeseries_data + offset + cur_nsamps); // for 16-bit dedispersed output //thrust::device_vector<float> d_time_series((unsigned short*)timeseries_data // + offset, (unsigned short*)timeseries_data + offset + cur_nsamps); /* cout << (int)timeseries_data[0] << " " << (int)timeseries_data[1] << " " << (int)timeseries_data[2] << " " << (int)timeseries_data[3] << " " << d_time_series[0] << " " << d_time_series[1] << endl; cin.get(); */ hd_float *time_series = thrust::raw_pointer_cast(&d_time_series[0]); // PRINT OUT TIMESERIES DATA FOR DM OF INTEREST /* if (dm_idx == 1) { std::ofstream times_data ("16_bits_dedispersed_chunk.dat", std::ofstream::out | std::ofstream::trunc); for ( size_t sample = 0; sample < (series_stride / 2); sample ++) times_data << sample << " " << d_time_series[sample] << endl; cout << "Printed timeseries data" << endl; times_data.close(); std::cin.get(); } */ /* switch( pl->params.dm_nbits ) { case 8: thrust::copy((unsigned char*)timeseries_data + offset, (unsigned char*)timeseries_data + offset + cur_nsamps, pl->d_time_series.begin()); break; case 8: thrust::copy(h_dm_series_original.begin(), h_dm_series_original.end(), pl->d_time_series.begin()); break; case 8: thrust::copy((unsigned char*)&pl->h_dm_series[offset], (unsigned char*)&pl->h_dm_series[offset] + cur_nsamps, pl->d_time_series.begin()); break; case 16: thrust::copy((unsigned short*)&pl->h_dm_series[offset], (unsigned short*)&pl->h_dm_series[offset] + cur_nsamps, pl->d_time_series.begin()); break; case 32: // Note: 32-bit implies float, not unsigned int thrust::copy((float*)&pl->h_dm_series[offset], (float*)&pl->h_dm_series[offset] + cur_nsamps, pl->d_time_series.begin()); break; default: return HD_INVALID_NBITS; } */ stop_timer(copy_timer); // Remove the baseline // ------------------- // Note: Divided by 2 to form a smoothing radius hd_size nsamps_smooth = hd_size(pl->params.baseline_length / (2 * cur_dt)); // Crop the smoothing length in case not enough samples start_timer(baseline_timer); error = baseline_remover.exec(time_series, cur_nsamps, nsamps_smooth); stop_timer(baseline_timer); if( error != HD_NO_ERROR ) { return throw_error_heimdall(error); } if( beam == 0 && dm_idx == write_dm && first_idx == 0 ) { // TESTING //write_device_time_series(time_series, cur_nsamps, // cur_dt, "baselined.tim"); } // ------------------- // Normalise // --------- start_timer(normalise_timer); hd_float rms = rms_getter.exec(time_series, cur_nsamps); //cout << "RMS = " << rms << endl; // devides the data by RMS thrust::transform(d_time_series.begin(), d_time_series.end(), thrust::make_constant_iterator(hd_float(1.0)/rms), d_time_series.begin(), thrust::multiplies<hd_float>()); /* thrust::transform(pl->d_time_series.begin(), pl->d_time_series.end(), thrust::make_constant_iterator(hd_float(1.0)/rms), pl->d_time_series.begin(), thrust::multiplies<hd_float>()); */ stop_timer(normalise_timer); if( beam == 0 && dm_idx == write_dm && first_idx == 0 ) { // TESTING //write_device_time_series(time_series, cur_nsamps, // cur_dt, "normalised.tim"); } // --------- // Prepare the boxcar filters // -------------------------- // We can't process the first and last max-filter-width/2 samples hd_size rel_boxcar_max = pl->params.boxcar_max/cur_dm_scrunch; hd_size max_nsamps_filtered = cur_nsamps + 1 - rel_boxcar_max; // This is the relative offset into the time series of the filtered data hd_size cur_filtered_offset = rel_boxcar_max / 2; // Create and prepare matched filtering operations start_timer(filter_timer); // Note: Filter width is relative to the current time resolution // this stage is longr than in the original heimdall as we // are nto using scrunching matched_filter_plan.prep(time_series, cur_nsamps, rel_boxcar_max); stop_timer(filter_timer); // -------------------------- hd_float* filtered_series = thrust::raw_pointer_cast(&pl->d_filtered_series[0]); // Note: Filtering is done using a combination of tscrunching and // 'proper' boxcar convolution. The parameter min_tscrunch_width // indicates how much of each to do. Raising min_tscrunch_width // increases sensitivity but decreases performance and vice // versa. // For each boxcar filter // Note: We cannot detect pulse widths < current time resolution for( hd_size filter_width=cur_dm_scrunch; filter_width<=pl->params.boxcar_max; filter_width*=2 ) { hd_size rel_filter_width = filter_width / cur_dm_scrunch; hd_size filter_idx = get_filter_index(filter_width); if( pl->params.verbosity >= 4 ) { cout << "Filtering each beam at width of " << filter_width << endl; } // Note: Filter width is relative to the current time resolution hd_size rel_min_tscrunch_width = std::max(pl->params.min_tscrunch_width / cur_dm_scrunch, hd_size(1)); hd_size rel_tscrunch_width = std::max(2 * rel_filter_width / rel_min_tscrunch_width, hd_size(1)); // Filter width relative to cur_dm_scrunch AND tscrunch hd_size rel_rel_filter_width = rel_filter_width / rel_tscrunch_width; start_timer(filter_timer); error = matched_filter_plan.exec(filtered_series, rel_filter_width, rel_tscrunch_width); if( error != HD_NO_ERROR ) { return throw_error_heimdall(error); } // Divide and round up hd_size cur_nsamps_filtered = ((max_nsamps_filtered-1) / rel_tscrunch_width + 1); hd_size cur_scrunch = cur_dm_scrunch * rel_tscrunch_width; // Normalise the filtered time series (RMS ~ sqrt(time)) // TODO: Avoid/hide the ugly thrust code? // Consider making it a method of MatchedFilterPlan /* thrust::constant_iterator<hd_float> norm_val_iter(1.0 / sqrt((hd_float)rel_filter_width)); thrust::transform(thrust::device_ptr<hd_float>(filtered_series), thrust::device_ptr<hd_float>(filtered_series) + cur_nsamps_filtered, norm_val_iter, thrust::device_ptr<hd_float>(filtered_series), thrust::multiplies<hd_float>()); */ // TESTING Proper normalisation hd_float rms = rms_getter.exec(filtered_series, cur_nsamps_filtered); thrust::transform(thrust::device_ptr<hd_float>(filtered_series), thrust::device_ptr<hd_float>(filtered_series) + cur_nsamps_filtered, thrust::make_constant_iterator(hd_float(1.0)/rms), thrust::device_ptr<hd_float>(filtered_series), thrust::multiplies<hd_float>()); stop_timer(filter_timer); // WRITE OUT THE NORMALISED TIMESERIES /*if ( dm_idx == 1 && filter_width == 1 && first_idx > 6000000) { std::ofstream norm_data ("norm_data_dm_00197462_samp_6025216.dat", std::ofstream::out | std::ofstream::trunc); for (size_t sample = 0; sample < series_stride; sample++) norm_data << sample << " " << pl->d_filtered_series[sample] << endl; norm_data.close(); cout << "Normalised data saved...\n"; std::cin.get(); }*/ if( beam == 0 && dm_idx == write_dm && first_idx == 0 && filter_width == 8 ) { // TESTING //write_device_time_series(filtered_series, // cur_nsamps_filtered, // cur_dt, "filtered.tim"); } hd_size prev_giant_count = d_giant_peaks.size(); if( pl->params.verbosity >= 4 ) { cout << "Finding giants..." << endl; } start_timer(giants_timer); error = giant_finder.exec(filtered_series, cur_nsamps_filtered, pl->params.detect_thresh, //pl->params.cand_sep_time, // Note: This was MB's recommendation pl->params.cand_sep_time * rel_rel_filter_width, d_giant_peaks, d_giant_inds, d_giant_begins, d_giant_ends); if( error != HD_NO_ERROR ) { return throw_error_heimdall(error); } hd_size rel_cur_filtered_offset = (cur_filtered_offset / rel_tscrunch_width); using namespace thrust::placeholders; thrust::transform(d_giant_inds.begin()+prev_giant_count, d_giant_inds.end(), d_giant_inds.begin()+prev_giant_count, /*first_idx +*/ (_1+rel_cur_filtered_offset)*cur_scrunch); thrust::transform(d_giant_begins.begin()+prev_giant_count, d_giant_begins.end(), d_giant_begins.begin()+prev_giant_count, /*first_idx +*/ (_1+rel_cur_filtered_offset)*cur_scrunch); thrust::transform(d_giant_ends.begin()+prev_giant_count, d_giant_ends.end(), d_giant_ends.begin()+prev_giant_count, /*first_idx +*/ (_1+rel_cur_filtered_offset)*cur_scrunch); d_giant_filter_inds.resize(d_giant_peaks.size(), filter_idx); d_giant_dm_inds.resize(d_giant_peaks.size(), dm_idx); // Note: This could be used to track total member samples if desired d_giant_members.resize(d_giant_peaks.size(), 1); stop_timer(giants_timer); // Bail if the candidate rate is too high hd_size total_giant_count = d_giant_peaks.size(); hd_float data_length_mins = nsamps * pl->params.dt / 60.0; if ( pl->params.max_giant_rate && ( total_giant_count / data_length_mins > pl->params.max_giant_rate ) ) { too_many_giants = true; float searched = ((float) dm_idx * 100) / (float) dm_count; cout << "WARNING: exceeded max giants/min, DM [" << dm_list[dm_idx] << "] space searched " << searched << "%" << endl; cout << "Will stop with processing this chunk " << endl; break; } } // End of filter width loop } // End of DM loop hd_size giant_count = d_giant_peaks.size(); if( pl->params.verbosity >= 2 ) { cout << "Giant count = " << giant_count << endl; // total number of giants detected over all DMs } start_timer(candidates_timer); thrust::host_vector<hd_float> h_group_peaks; thrust::host_vector<hd_size> h_group_inds; thrust::host_vector<hd_size> h_group_begins; thrust::host_vector<hd_size> h_group_ends; thrust::host_vector<hd_size> h_group_filter_inds; thrust::host_vector<hd_size> h_group_dm_inds; thrust::host_vector<hd_size> h_group_members; thrust::host_vector<hd_float> h_group_dms; if (!too_many_giants) { thrust::device_vector<hd_size> d_giant_labels(giant_count); hd_size* d_giant_labels_ptr = thrust::raw_pointer_cast(&d_giant_labels[0]); RawCandidates d_giants; d_giants.peaks = thrust::raw_pointer_cast(&d_giant_peaks[0]); d_giants.inds = thrust::raw_pointer_cast(&d_giant_inds[0]); d_giants.begins = thrust::raw_pointer_cast(&d_giant_begins[0]); d_giants.ends = thrust::raw_pointer_cast(&d_giant_ends[0]); d_giants.filter_inds = thrust::raw_pointer_cast(&d_giant_filter_inds[0]); d_giants.dm_inds = thrust::raw_pointer_cast(&d_giant_dm_inds[0]); d_giants.members = thrust::raw_pointer_cast(&d_giant_members[0]); hd_size filter_count = get_filter_index(pl->params.boxcar_max) + 1; if( pl->params.verbosity >= 2 ) { cout << "Grouping coincident candidates..." << endl; } hd_size label_count; error = label_candidate_clusters(giant_count, *(ConstRawCandidates*)&d_giants, pl->params.cand_sep_time, pl->params.cand_sep_filter, pl->params.cand_sep_dm, d_giant_labels_ptr, &label_count); if( error != HD_NO_ERROR ) { return throw_error_heimdall(error); } hd_size group_count = label_count; if( pl->params.verbosity >= 2 ) { cout << "Candidate count = " << group_count << endl; } thrust::device_vector<hd_float> d_group_peaks(group_count); thrust::device_vector<hd_size> d_group_inds(group_count); thrust::device_vector<hd_size> d_group_begins(group_count); thrust::device_vector<hd_size> d_group_ends(group_count); thrust::device_vector<hd_size> d_group_filter_inds(group_count); thrust::device_vector<hd_size> d_group_dm_inds(group_count); thrust::device_vector<hd_size> d_group_members(group_count); thrust::device_vector<hd_float> d_group_dms(group_count); RawCandidates d_groups; d_groups.peaks = thrust::raw_pointer_cast(&d_group_peaks[0]); d_groups.inds = thrust::raw_pointer_cast(&d_group_inds[0]); d_groups.begins = thrust::raw_pointer_cast(&d_group_begins[0]); d_groups.ends = thrust::raw_pointer_cast(&d_group_ends[0]); d_groups.filter_inds = thrust::raw_pointer_cast(&d_group_filter_inds[0]); d_groups.dm_inds = thrust::raw_pointer_cast(&d_group_dm_inds[0]); d_groups.members = thrust::raw_pointer_cast(&d_group_members[0]); merge_candidates(giant_count, d_giant_labels_ptr, *(ConstRawCandidates*)&d_giants, d_groups); // Look up the actual DM of each group thrust::device_vector<hd_float> d_dm_list(dm_list, dm_list+dm_count); thrust::gather(d_group_dm_inds.begin(), d_group_dm_inds.end(), d_dm_list.begin(), d_group_dms.begin()); // Device to host transfer of candidates h_group_peaks = d_group_peaks; h_group_inds = d_group_inds; h_group_begins = d_group_begins; h_group_ends = d_group_ends; h_group_filter_inds = d_group_filter_inds; h_group_dm_inds = d_group_dm_inds; h_group_members = d_group_members; h_group_dms = d_group_dms; //h_group_flags = d_group_flags; } if( h_group_peaks.size() > 0 ) { if( pl->params.verbosity >= 2 ) { cout << "Writing output candidates, utc_start=" << pl->params.utc_start << endl; } char buffer[64]; time_t now = pl->params.utc_start + (time_t) (first_idx / pl->params.spectra_per_second); strftime (buffer, 64, HD_TIMESTR, (struct tm*) gmtime(&now)); std::stringstream ss; ss << std::setw(2) << std::setfill('0') << (pl->params.beam)%13+1; std::ostringstream oss; if ( pl->params.coincidencer_host != NULL && pl->params.coincidencer_port != -1 ) { try { ClientSocket client_socket ( pl->params.coincidencer_host, pl->params.coincidencer_port ); strftime (buffer, 64, HD_TIMESTR, (struct tm*) gmtime(&(pl->params.utc_start))); oss << buffer << " "; time_t now = pl->params.utc_start + (time_t) (first_idx / pl->params.spectra_per_second); strftime (buffer, 64, HD_TIMESTR, (struct tm*) gmtime(&now)); oss << buffer << " "; oss << first_idx << " "; oss << ss.str() << " "; oss << h_group_peaks.size() << endl; client_socket << oss.str(); oss.flush(); oss.str(""); for (hd_size i=0; i<h_group_peaks.size(); ++i ) { hd_size samp_idx = first_idx + h_group_inds[i]; oss << h_group_peaks[i] << "\t" << samp_idx << "\t" << samp_idx * pl->params.dt << "\t" << h_group_filter_inds[i] << "\t" << h_group_dm_inds[i] << "\t" << h_group_dms[i] << "\t" << h_group_members[i] << "\t" << first_idx + h_group_begins[i] << "\t" << first_idx + h_group_ends[i] << endl; client_socket << oss.str(); oss.flush(); oss.str(""); } // client_socket should close when it goes out of scope... } catch (SocketException& e ) { std::cerr << "SocketException was caught:" << e.description() << "\n"; } } if( pl->params.verbosity >= 2 ) cout << "Output timestamp: " << buffer << endl; if (!both_search) mkdir(pl->params.output_dir.c_str(), 0777); std::string filename = std::string(pl->params.output_dir) + "/" + std::string(buffer) + "_" + ss.str() + ".cand"; if( pl->params.verbosity >= 2 ) cout << "Output filename: " << filename << endl; std::ofstream cand_file(filename.c_str(), std::ios::out); if( pl->params.verbosity >= 2 ) cout << "Dumping " << h_group_peaks.size() << " candidates to " << filename << endl; if (cand_file.good()) { /*cand_file << "S/N\t" << "peak sample\t" << "peak time\t" << "filter idx\t" << "DM idx\t" << "DM\t" << "members no.\t" << "begin sample\t" << "end sample\n"; */ for( hd_size i=0; i<h_group_peaks.size(); ++i ) { hd_size samp_idx = first_idx + h_group_inds[i]; cand_file << h_group_peaks[i] << "\t" << samp_idx << "\t" << samp_idx * pl->params.dt << "\t" << h_group_filter_inds[i] << "\t" << h_group_dm_inds[i] << "\t" << h_group_dms[i] << "\t" //<< h_group_flags[i] << "\t" << h_group_members[i] << "\t" // HACK %13 //<< (beam+pl->params.beam)%13+1 << "\t" << first_idx + h_group_begins[i] << "\t" << first_idx + h_group_ends[i] << "\t" << pl->params.beam_count << "\t" // number of beams << 0 << "\t" // beam mask (whatever that is) << 1 << "\t" // primary beam << "20" << "\t" // max_snr << 1 << "\t" // beam << "\n"; } } else cout << "Skipping dump due to bad file open on " << filename << endl; cand_file.close(); } else { if( pl->params.verbosity >= 2 ) cout << "No candidated deteced. Will not create a file..." << endl; } stop_timer(candidates_timer); stop_timer(total_timer); #ifdef HD_BENCHMARK if( pl->params.verbosity >= 1 ) { cout << "Mem alloc time: " << memory_timer.getTime() << endl; cout << "Copy time: " << copy_timer.getTime() << endl; cout << "Baselining time: " << baseline_timer.getTime() << endl; cout << "Normalisation time: " << normalise_timer.getTime() << endl; cout << "Filtering time: " << filter_timer.getTime() << endl; cout << "Find giants time: " << giants_timer.getTime() << endl; cout << "Process candidates time: " << candidates_timer.getTime() << endl; cout << "Total time: " << total_timer.getTime() << endl; } hd_float time_sum = (memory_timer.getTime() + copy_timer.getTime() + baseline_timer.getTime() + normalise_timer.getTime() + filter_timer.getTime() + giants_timer.getTime() + candidates_timer.getTime()); hd_float misc_time = total_timer.getTime() - time_sum; /* std::ofstream timing_file("timing.dat", std::ios::app); timing_file << total_timer.getTime() << "\t" << misc_time << "\t" << memory_timer.getTime() << "\t" << clean_timer.getTime() << "\t" << dedisp_timer.getTime() << "\t" << copy_timer.getTime() << "\t" << baseline_timer.getTime() << "\t" << normalise_timer.getTime() << "\t" << filter_timer.getTime() << "\t" << giants_timer.getTime() << "\t" << candidates_timer.getTime() << endl; timing_file.close(); */ #endif // HD_BENCHMARK if( too_many_giants ) { return HD_TOO_MANY_EVENTS; } else { return HD_NO_ERROR; } } void hd_destroy_pipeline(hd_pipeline pipeline) { if( pipeline->params.verbosity >= 2 ) { cout << "\tDeleting pipeline object..." << endl; } cout << "Destroying dedispersion plan" << endl; // dedisp_destroy_plan(pipeline->dedispersion_plan); cout << "Destroyed dedispersion plan" << endl; // Note: This assumes memory owned by pipeline cleans itself up if( pipeline ) { delete pipeline; } }
a6fb9024715ab9368d6af75ad13d706426148310.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2018 Microsoft Corporation // Licensed under the MIT license. // Author: Paul Koch <[email protected]> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <type_traits> #include "ebm_native.h" #include "logging.h" #include "common_c.h" #include "bridge_c.h" #include "zones.h" #include "common_cpp.hpp" #include "bridge_cpp.hpp" #include "Registration.hpp" #include "Loss.hpp" namespace DEFINED_ZONE_NAME { #ifndef DEFINED_ZONE_NAME #error DEFINED_ZONE_NAME must be defined #endif // DEFINED_ZONE_NAME template <typename TLoss> GPU_GLOBAL void TestGpuAdd(const Loss * const pLoss, const int * const pVal1, const int * const pVal2, int * const pResult) { TLoss * const pLossSpecific = static_cast<TLoss *>(pLoss); const size_t iGpuThread = threadIdx.x; pResult[iGpuThread] = static_cast<int>(static_cast<float>(pLossSpecific->CalculateGradient(static_cast<float>(pVal1[iGpuThread]), static_cast<float>(pVal2[iGpuThread])))); } struct Cuda_32_Operators final { // https://docs.nvidia.com/cuda/cuda-math-api/group__CUDA__MATH__SINGLE.html#group__CUDA__MATH__SINGLE // https://docs.nvidia.com/cuda/cuda-math-api/group__CUDA__MATH__DOUBLE.html#group__CUDA__MATH__DOUBLE constexpr static size_t countPackedItems = 1; // the number of Unpacked items in a Packed structure typedef float Unpacked; typedef float Packed; private: Packed m_data; public: GPU_BOTH INLINE_ALWAYS Cuda_32_Operators() noexcept { } GPU_BOTH INLINE_ALWAYS Cuda_32_Operators(const float data) noexcept : m_data(static_cast<Unpacked>(data)) { } GPU_BOTH INLINE_ALWAYS Cuda_32_Operators(const double data) noexcept : m_data(static_cast<Unpacked>(data)) { } GPU_BOTH INLINE_ALWAYS Cuda_32_Operators(const int data) noexcept : m_data(static_cast<Unpacked>(data)) { } GPU_BOTH INLINE_ALWAYS Cuda_32_Operators operator+ (const Cuda_32_Operators & other) const noexcept { return Cuda_32_Operators(m_data + other.m_data); } GPU_BOTH INLINE_ALWAYS Cuda_32_Operators operator- (const Cuda_32_Operators & other) const noexcept { return Cuda_32_Operators(m_data - other.m_data); } GPU_BOTH INLINE_ALWAYS Cuda_32_Operators operator* (const Cuda_32_Operators & other) const noexcept { return Cuda_32_Operators(m_data * other.m_data); } GPU_BOTH INLINE_ALWAYS Cuda_32_Operators operator/ (const Cuda_32_Operators & other) const noexcept { return Cuda_32_Operators(m_data / other.m_data); } GPU_BOTH INLINE_ALWAYS bool IsAnyEqual(const Cuda_32_Operators & other) const noexcept { return m_data == other.m_data; } GPU_BOTH INLINE_ALWAYS operator float() const noexcept { return m_data; } GPU_BOTH INLINE_ALWAYS operator double() const noexcept { return m_data; } GPU_BOTH INLINE_ALWAYS bool IsAnyInf() const noexcept { return isinf(m_data); } GPU_BOTH INLINE_ALWAYS bool IsAnyNaN() const noexcept { return isnan(m_data); } GPU_BOTH INLINE_ALWAYS Cuda_32_Operators Sqrt() const noexcept { return Cuda_32_Operators(sqrtf(m_data)); } template<template <typename, typename, ptrdiff_t, ptrdiff_t, bool> class TExecute, typename TLoss, typename TFloat, ptrdiff_t cCompilerScores, ptrdiff_t cCompilerPack, bool bHessian> INLINE_RELEASE_TEMPLATED static ErrorEbmType ApplyTraining(const Loss * const pLoss, ApplyTrainingData * const pData) noexcept { constexpr size_t k_cItems = 5; bool bExitError = true; const int aVal1[k_cItems] = { 5, 4, 3, 2, 1 }; const int aVal2[k_cItems] = { 100, 200, 300, 400, 500 }; int aResult[k_cItems]; static_assert(std::is_standard_layout<TLoss>::value && std::is_trivially_copyable<TLoss>::value, "This allows offsetof, memcpy, memset, inter-language, GPU and cross-machine use where needed"); int * aDeviceVal1 = nullptr; int * aDeviceVal2 = nullptr; int * aDeviceResult = nullptr; void * pDeviceLoss = nullptr; hipError_t error; error = hipSetDevice(0); if(hipSuccess != error) { goto exit_error; } error = hipMalloc((void **)&aDeviceVal1, k_cItems * sizeof(int)); if(hipSuccess != error) { goto exit_error; } error = hipMalloc((void **)&aDeviceVal2, k_cItems * sizeof(int)); if(hipSuccess != error) { goto exit_error; } error = hipMalloc((void **)&aDeviceResult, k_cItems * sizeof(int)); if(hipSuccess != error) { goto exit_error; } if(!std::is_empty<TLoss>::value) { error = hipMalloc((void **)&pDeviceLoss, sizeof(TLoss)); if(hipSuccess != error) { goto exit_error; } error = hipMemcpy(pDeviceLoss, pLoss, sizeof(TLoss), hipMemcpyHostToDevice); if(hipSuccess != error) { goto exit_error; } } error = hipMemcpy(aDeviceVal1, aVal1, k_cItems * sizeof(int), hipMemcpyHostToDevice); if(hipSuccess != error) { goto exit_error; } error = hipMemcpy(aDeviceVal2, aVal2, k_cItems * sizeof(int), hipMemcpyHostToDevice); if(hipSuccess != error) { goto exit_error; } hipLaunchKernelGGL(( TestGpuAdd<TLoss>), dim3(1), dim3(k_cItems), 0, 0, static_cast<Loss *>(pDeviceLoss), aDeviceVal1, aDeviceVal2, aDeviceResult); hipLaunchKernelGGL(( ExecuteApplyTraining<TExecute, TLoss, TFloat, cCompilerScores, cCompilerPack, bHessian>), dim3(1), dim3(k_cItems), 0, 0, pLoss, pData->m_cRuntimeScores, pData->m_cRuntimePack ); error = hipGetLastError(); if(hipSuccess != error) { goto exit_error; } error = hipDeviceSynchronize(); if(hipSuccess != error) { goto exit_error; } error = hipMemcpy(aResult, aDeviceResult, k_cItems * sizeof(int), hipMemcpyDeviceToHost); if(hipSuccess != error) { goto exit_error; } bExitError = false; exit_error: bool bExitHard = false; if(nullptr != pDeviceLoss) { error = hipFree(pDeviceLoss); if(hipSuccess != error) { bExitHard = true; } } if(nullptr != aDeviceResult) { error = hipFree(aDeviceResult); if(hipSuccess != error) { bExitHard = true; } } if(nullptr != aDeviceVal2) { error = hipFree(aDeviceVal2); if(hipSuccess != error) { bExitHard = true; } } if(nullptr != aDeviceVal1) { error = hipFree(aDeviceVal1); if(hipSuccess != error) { bExitHard = true; } } if(bExitHard) { bExitError = true; // not much to do with the error if we fail hipDeviceReset after failing hipFree error = hipDeviceReset(); } return bExitError ? Error_UnexpectedInternal : Error_None; } template<template <typename, typename, ptrdiff_t, ptrdiff_t, bool> class TExecute, typename TLoss, typename TFloat, ptrdiff_t cCompilerScores, ptrdiff_t cCompilerPack, bool bHessian> INLINE_RELEASE_TEMPLATED static ErrorEbmType ApplyValidation(const Loss * const pLoss, ApplyValidationData * const pData) noexcept { // this allows us to switch execution onto GPU, FPGA, or other local computation // TODO: use something other than <<<1, 1>>> hipLaunchKernelGGL(( ExecuteApplyValidation<TExecute, TLoss, TFloat, cCompilerScores, cCompilerPack, bHessian>), dim3(1), dim3(1), 0, 0, pLoss, pData->m_cRuntimeScores, pData->m_cRuntimePack, nullptr ); return Error_None; } }; static_assert(std::is_standard_layout<Cuda_32_Operators>::value && std::is_trivially_copyable<Cuda_32_Operators>::value, "This allows offsetof, memcpy, memset, inter-language, GPU and cross-machine use where needed"); // FIRST, define the RegisterLoss function that we'll be calling from our registrations. This is a static // function, so we can have duplicate named functions in other files and they'll refer to different functions template<template <typename> class TRegistrable, typename... Args> static INLINE_ALWAYS std::shared_ptr<const Registration> RegisterLoss(const char * const sRegistrationName, const Args...args) { return Register<TRegistrable, Cuda_32_Operators>(sRegistrationName, args...); } // now include all our special loss registrations which will use the RegisterLoss function we defined above! #include "loss_registrations.hpp" INTERNAL_IMPORT_EXPORT_BODY ErrorEbmType CreateLoss_Cuda_32( const Config * const pConfig, const char * const sLoss, const char * const sLossEnd, LossWrapper * const pLossWrapperOut ) { return Loss::CreateLoss(&RegisterLosses, pConfig, sLoss, sLossEnd, pLossWrapperOut); } } // DEFINED_ZONE_NAME
a6fb9024715ab9368d6af75ad13d706426148310.cu
// Copyright (c) 2018 Microsoft Corporation // Licensed under the MIT license. // Author: Paul Koch <[email protected]> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <type_traits> #include "ebm_native.h" #include "logging.h" #include "common_c.h" #include "bridge_c.h" #include "zones.h" #include "common_cpp.hpp" #include "bridge_cpp.hpp" #include "Registration.hpp" #include "Loss.hpp" namespace DEFINED_ZONE_NAME { #ifndef DEFINED_ZONE_NAME #error DEFINED_ZONE_NAME must be defined #endif // DEFINED_ZONE_NAME template <typename TLoss> GPU_GLOBAL void TestGpuAdd(const Loss * const pLoss, const int * const pVal1, const int * const pVal2, int * const pResult) { TLoss * const pLossSpecific = static_cast<TLoss *>(pLoss); const size_t iGpuThread = threadIdx.x; pResult[iGpuThread] = static_cast<int>(static_cast<float>(pLossSpecific->CalculateGradient(static_cast<float>(pVal1[iGpuThread]), static_cast<float>(pVal2[iGpuThread])))); } struct Cuda_32_Operators final { // https://docs.nvidia.com/cuda/cuda-math-api/group__CUDA__MATH__SINGLE.html#group__CUDA__MATH__SINGLE // https://docs.nvidia.com/cuda/cuda-math-api/group__CUDA__MATH__DOUBLE.html#group__CUDA__MATH__DOUBLE constexpr static size_t countPackedItems = 1; // the number of Unpacked items in a Packed structure typedef float Unpacked; typedef float Packed; private: Packed m_data; public: GPU_BOTH INLINE_ALWAYS Cuda_32_Operators() noexcept { } GPU_BOTH INLINE_ALWAYS Cuda_32_Operators(const float data) noexcept : m_data(static_cast<Unpacked>(data)) { } GPU_BOTH INLINE_ALWAYS Cuda_32_Operators(const double data) noexcept : m_data(static_cast<Unpacked>(data)) { } GPU_BOTH INLINE_ALWAYS Cuda_32_Operators(const int data) noexcept : m_data(static_cast<Unpacked>(data)) { } GPU_BOTH INLINE_ALWAYS Cuda_32_Operators operator+ (const Cuda_32_Operators & other) const noexcept { return Cuda_32_Operators(m_data + other.m_data); } GPU_BOTH INLINE_ALWAYS Cuda_32_Operators operator- (const Cuda_32_Operators & other) const noexcept { return Cuda_32_Operators(m_data - other.m_data); } GPU_BOTH INLINE_ALWAYS Cuda_32_Operators operator* (const Cuda_32_Operators & other) const noexcept { return Cuda_32_Operators(m_data * other.m_data); } GPU_BOTH INLINE_ALWAYS Cuda_32_Operators operator/ (const Cuda_32_Operators & other) const noexcept { return Cuda_32_Operators(m_data / other.m_data); } GPU_BOTH INLINE_ALWAYS bool IsAnyEqual(const Cuda_32_Operators & other) const noexcept { return m_data == other.m_data; } GPU_BOTH INLINE_ALWAYS operator float() const noexcept { return m_data; } GPU_BOTH INLINE_ALWAYS operator double() const noexcept { return m_data; } GPU_BOTH INLINE_ALWAYS bool IsAnyInf() const noexcept { return isinf(m_data); } GPU_BOTH INLINE_ALWAYS bool IsAnyNaN() const noexcept { return isnan(m_data); } GPU_BOTH INLINE_ALWAYS Cuda_32_Operators Sqrt() const noexcept { return Cuda_32_Operators(sqrtf(m_data)); } template<template <typename, typename, ptrdiff_t, ptrdiff_t, bool> class TExecute, typename TLoss, typename TFloat, ptrdiff_t cCompilerScores, ptrdiff_t cCompilerPack, bool bHessian> INLINE_RELEASE_TEMPLATED static ErrorEbmType ApplyTraining(const Loss * const pLoss, ApplyTrainingData * const pData) noexcept { constexpr size_t k_cItems = 5; bool bExitError = true; const int aVal1[k_cItems] = { 5, 4, 3, 2, 1 }; const int aVal2[k_cItems] = { 100, 200, 300, 400, 500 }; int aResult[k_cItems]; static_assert(std::is_standard_layout<TLoss>::value && std::is_trivially_copyable<TLoss>::value, "This allows offsetof, memcpy, memset, inter-language, GPU and cross-machine use where needed"); int * aDeviceVal1 = nullptr; int * aDeviceVal2 = nullptr; int * aDeviceResult = nullptr; void * pDeviceLoss = nullptr; cudaError_t error; error = cudaSetDevice(0); if(cudaSuccess != error) { goto exit_error; } error = cudaMalloc((void **)&aDeviceVal1, k_cItems * sizeof(int)); if(cudaSuccess != error) { goto exit_error; } error = cudaMalloc((void **)&aDeviceVal2, k_cItems * sizeof(int)); if(cudaSuccess != error) { goto exit_error; } error = cudaMalloc((void **)&aDeviceResult, k_cItems * sizeof(int)); if(cudaSuccess != error) { goto exit_error; } if(!std::is_empty<TLoss>::value) { error = cudaMalloc((void **)&pDeviceLoss, sizeof(TLoss)); if(cudaSuccess != error) { goto exit_error; } error = cudaMemcpy(pDeviceLoss, pLoss, sizeof(TLoss), cudaMemcpyHostToDevice); if(cudaSuccess != error) { goto exit_error; } } error = cudaMemcpy(aDeviceVal1, aVal1, k_cItems * sizeof(int), cudaMemcpyHostToDevice); if(cudaSuccess != error) { goto exit_error; } error = cudaMemcpy(aDeviceVal2, aVal2, k_cItems * sizeof(int), cudaMemcpyHostToDevice); if(cudaSuccess != error) { goto exit_error; } TestGpuAdd<TLoss><<<1, k_cItems>>>(static_cast<Loss *>(pDeviceLoss), aDeviceVal1, aDeviceVal2, aDeviceResult); ExecuteApplyTraining<TExecute, TLoss, TFloat, cCompilerScores, cCompilerPack, bHessian><<<1, k_cItems>>>( pLoss, pData->m_cRuntimeScores, pData->m_cRuntimePack ); error = cudaGetLastError(); if(cudaSuccess != error) { goto exit_error; } error = cudaDeviceSynchronize(); if(cudaSuccess != error) { goto exit_error; } error = cudaMemcpy(aResult, aDeviceResult, k_cItems * sizeof(int), cudaMemcpyDeviceToHost); if(cudaSuccess != error) { goto exit_error; } bExitError = false; exit_error: bool bExitHard = false; if(nullptr != pDeviceLoss) { error = cudaFree(pDeviceLoss); if(cudaSuccess != error) { bExitHard = true; } } if(nullptr != aDeviceResult) { error = cudaFree(aDeviceResult); if(cudaSuccess != error) { bExitHard = true; } } if(nullptr != aDeviceVal2) { error = cudaFree(aDeviceVal2); if(cudaSuccess != error) { bExitHard = true; } } if(nullptr != aDeviceVal1) { error = cudaFree(aDeviceVal1); if(cudaSuccess != error) { bExitHard = true; } } if(bExitHard) { bExitError = true; // not much to do with the error if we fail cudaDeviceReset after failing cudaFree error = cudaDeviceReset(); } return bExitError ? Error_UnexpectedInternal : Error_None; } template<template <typename, typename, ptrdiff_t, ptrdiff_t, bool> class TExecute, typename TLoss, typename TFloat, ptrdiff_t cCompilerScores, ptrdiff_t cCompilerPack, bool bHessian> INLINE_RELEASE_TEMPLATED static ErrorEbmType ApplyValidation(const Loss * const pLoss, ApplyValidationData * const pData) noexcept { // this allows us to switch execution onto GPU, FPGA, or other local computation // TODO: use something other than <<<1, 1>>> ExecuteApplyValidation<TExecute, TLoss, TFloat, cCompilerScores, cCompilerPack, bHessian><<<1, 1>>>( pLoss, pData->m_cRuntimeScores, pData->m_cRuntimePack, nullptr ); return Error_None; } }; static_assert(std::is_standard_layout<Cuda_32_Operators>::value && std::is_trivially_copyable<Cuda_32_Operators>::value, "This allows offsetof, memcpy, memset, inter-language, GPU and cross-machine use where needed"); // FIRST, define the RegisterLoss function that we'll be calling from our registrations. This is a static // function, so we can have duplicate named functions in other files and they'll refer to different functions template<template <typename> class TRegistrable, typename... Args> static INLINE_ALWAYS std::shared_ptr<const Registration> RegisterLoss(const char * const sRegistrationName, const Args...args) { return Register<TRegistrable, Cuda_32_Operators>(sRegistrationName, args...); } // now include all our special loss registrations which will use the RegisterLoss function we defined above! #include "loss_registrations.hpp" INTERNAL_IMPORT_EXPORT_BODY ErrorEbmType CreateLoss_Cuda_32( const Config * const pConfig, const char * const sLoss, const char * const sLossEnd, LossWrapper * const pLossWrapperOut ) { return Loss::CreateLoss(&RegisterLosses, pConfig, sLoss, sLossEnd, pLossWrapperOut); } } // DEFINED_ZONE_NAME
3c253be8f0f9f66c23fa55a4a7cb191ab1071c37.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/utilities/span.hpp> #include <io/utilities/hostdevice_vector.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/cudf_gtest.hpp> #include <cudf_test/type_lists.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_vector.hpp> #include <cstddef> #include <cstring> #include <string> using cudf::device_span; using cudf::host_span; using cudf::detail::device_2dspan; using cudf::detail::host_2dspan; using cudf::detail::hostdevice_2dvector; template <typename T> void expect_equivolent(host_span<T> a, host_span<T> b) { EXPECT_EQ(a.size(), b.size()); EXPECT_EQ(a.data(), b.data()); } template <typename Iterator1, typename T> void expect_match(Iterator1 expected, size_t expected_size, host_span<T> input) { EXPECT_EQ(expected_size, input.size()); for (size_t i = 0; i < expected_size; i++) { EXPECT_EQ(*(expected + i), *(input.begin() + i)); } } template <typename T> void expect_match(std::string expected, host_span<T> input) { return expect_match(expected.begin(), expected.size(), input); } std::string const hello_wold_message = "hello world"; std::vector<char> create_hello_world_message() { return std::vector<char>(hello_wold_message.begin(), hello_wold_message.end()); } class SpanTest : public cudf::test::BaseFixture { }; TEST(SpanTest, CanCreateFullSubspan) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); expect_equivolent(message_span, message_span.subspan(0, message_span.size())); } TEST(SpanTest, CanTakeFirst) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); expect_match("hello", message_span.first(5)); } TEST(SpanTest, CanTakeLast) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); expect_match("world", message_span.last(5)); } TEST(SpanTest, CanTakeSubspanFull) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); expect_match("hello world", message_span.subspan(0, 11)); } TEST(SpanTest, CanTakeSubspanPartial) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); expect_match("lo w", message_span.subspan(3, 4)); } TEST(SpanTest, CanGetFront) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); EXPECT_EQ('h', message_span.front()); } TEST(SpanTest, CanGetBack) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); EXPECT_EQ('d', message_span.back()); } TEST(SpanTest, CanGetData) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); EXPECT_EQ(message.data(), message_span.data()); } TEST(SpanTest, CanDetermineEmptiness) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); auto const empty_span = host_span<char>(); EXPECT_FALSE(message_span.empty()); EXPECT_TRUE(empty_span.empty()); } TEST(SpanTest, CanGetSize) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); auto const empty_span = host_span<char>(); EXPECT_EQ(static_cast<size_t>(11), message_span.size()); EXPECT_EQ(static_cast<size_t>(0), empty_span.size()); } TEST(SpanTest, CanGetSizeBytes) { auto doubles = std::vector<double>({6, 3, 2}); auto const doubles_span = host_span<double>(doubles.data(), doubles.size()); auto const empty_span = host_span<double>(); EXPECT_EQ(static_cast<size_t>(24), doubles_span.size_bytes()); EXPECT_EQ(static_cast<size_t>(0), empty_span.size_bytes()); } TEST(SpanTest, CanCopySpan) { auto message = create_hello_world_message(); host_span<char> message_span_copy; { auto const message_span = host_span<char>(message.data(), message.size()); message_span_copy = message_span; } EXPECT_EQ(message.data(), message_span_copy.data()); EXPECT_EQ(message.size(), message_span_copy.size()); } TEST(SpanTest, CanSubscriptRead) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); EXPECT_EQ('o', message_span[4]); } TEST(SpanTest, CanSubscriptWrite) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); message_span[4] = 'x'; EXPECT_EQ('x', message_span[4]); } TEST(SpanTest, CanConstructFromHostContainers) { auto std_vector = std::vector<int>(1); auto h_vector = thrust::host_vector<int>(1); (void)host_span<int>(std_vector); (void)host_span<int>(h_vector); auto const std_vector_c = std_vector; auto const h_vector_c = h_vector; (void)host_span<int const>(std_vector_c); (void)host_span<int const>(h_vector_c); } TEST(SpanTest, CanConstructFromDeviceContainers) { auto d_thrust_vector = thrust::device_vector<int>(1); auto d_vector = rmm::device_vector<int>(1); auto d_uvector = rmm::device_uvector<int>(1, rmm::cuda_stream_default); (void)device_span<int>(d_thrust_vector); (void)device_span<int>(d_vector); (void)device_span<int>(d_uvector); auto const& d_thrust_vector_c = d_thrust_vector; auto const& d_vector_c = d_vector; auto const& d_uvector_c = d_uvector; (void)device_span<int const>(d_thrust_vector_c); (void)device_span<int const>(d_vector_c); (void)device_span<int const>(d_uvector_c); } __global__ void simple_device_kernel(device_span<bool> result) { result[0] = true; } TEST(SpanTest, CanUseDeviceSpan) { rmm::device_vector<bool> d_message = std::vector<bool>({false}); auto d_span = device_span<bool>(d_message.data().get(), d_message.size()); hipLaunchKernelGGL(( simple_device_kernel), dim3(1), dim3(1), 0, 0, d_span); hipDeviceSynchronize(); thrust::host_vector<bool> h_message = d_message; ASSERT_TRUE(h_message[0]); } class MdSpanTest : public cudf::test::BaseFixture { }; TEST(MdSpanTest, CanDetermineEmptiness) { auto const vector = hostdevice_2dvector<int>(1, 2); auto const no_rows_vector = hostdevice_2dvector<int>(0, 2); auto const no_columns_vector = hostdevice_2dvector<int>(1, 0); EXPECT_FALSE(host_2dspan<int const>{vector}.is_empty()); EXPECT_FALSE(device_2dspan<int const>{vector}.is_empty()); EXPECT_TRUE(host_2dspan<int const>{no_rows_vector}.is_empty()); EXPECT_TRUE(device_2dspan<int const>{no_rows_vector}.is_empty()); EXPECT_TRUE(host_2dspan<int const>{no_columns_vector}.is_empty()); EXPECT_TRUE(device_2dspan<int const>{no_columns_vector}.is_empty()); } __global__ void readwrite_kernel(device_2dspan<int> result) { if (result[5][6] == 5) { result[5][6] *= 6; } else { result[5][6] = 5; } } TEST(MdSpanTest, DeviceReadWrite) { auto vector = hostdevice_2dvector<int>(11, 23); hipLaunchKernelGGL(( readwrite_kernel), dim3(1), dim3(1), 0, 0, vector); hipLaunchKernelGGL(( readwrite_kernel), dim3(1), dim3(1), 0, 0, vector); vector.device_to_host(rmm::cuda_stream_default, true); EXPECT_EQ(vector[5][6], 30); } TEST(MdSpanTest, HostReadWrite) { auto vector = hostdevice_2dvector<int>(11, 23); auto span = host_2dspan<int>{vector}; span[5][6] = 5; if (span[5][6] == 5) { span[5][6] *= 6; } EXPECT_EQ(vector[5][6], 30); } TEST(MdSpanTest, CanGetSize) { auto const vector = hostdevice_2dvector<int>(1, 2); EXPECT_EQ(host_2dspan<int const>{vector}.size(), vector.size()); EXPECT_EQ(device_2dspan<int const>{vector}.size(), vector.size()); } TEST(MdSpanTest, CanGetCount) { auto const vector = hostdevice_2dvector<int>(11, 23); EXPECT_EQ(host_2dspan<int const>{vector}.count(), 11ul * 23); EXPECT_EQ(device_2dspan<int const>{vector}.count(), 11ul * 23); } CUDF_TEST_PROGRAM_MAIN()
3c253be8f0f9f66c23fa55a4a7cb191ab1071c37.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/utilities/span.hpp> #include <io/utilities/hostdevice_vector.hpp> #include <cudf_test/base_fixture.hpp> #include <cudf_test/cudf_gtest.hpp> #include <cudf_test/type_lists.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_vector.hpp> #include <cstddef> #include <cstring> #include <string> using cudf::device_span; using cudf::host_span; using cudf::detail::device_2dspan; using cudf::detail::host_2dspan; using cudf::detail::hostdevice_2dvector; template <typename T> void expect_equivolent(host_span<T> a, host_span<T> b) { EXPECT_EQ(a.size(), b.size()); EXPECT_EQ(a.data(), b.data()); } template <typename Iterator1, typename T> void expect_match(Iterator1 expected, size_t expected_size, host_span<T> input) { EXPECT_EQ(expected_size, input.size()); for (size_t i = 0; i < expected_size; i++) { EXPECT_EQ(*(expected + i), *(input.begin() + i)); } } template <typename T> void expect_match(std::string expected, host_span<T> input) { return expect_match(expected.begin(), expected.size(), input); } std::string const hello_wold_message = "hello world"; std::vector<char> create_hello_world_message() { return std::vector<char>(hello_wold_message.begin(), hello_wold_message.end()); } class SpanTest : public cudf::test::BaseFixture { }; TEST(SpanTest, CanCreateFullSubspan) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); expect_equivolent(message_span, message_span.subspan(0, message_span.size())); } TEST(SpanTest, CanTakeFirst) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); expect_match("hello", message_span.first(5)); } TEST(SpanTest, CanTakeLast) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); expect_match("world", message_span.last(5)); } TEST(SpanTest, CanTakeSubspanFull) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); expect_match("hello world", message_span.subspan(0, 11)); } TEST(SpanTest, CanTakeSubspanPartial) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); expect_match("lo w", message_span.subspan(3, 4)); } TEST(SpanTest, CanGetFront) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); EXPECT_EQ('h', message_span.front()); } TEST(SpanTest, CanGetBack) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); EXPECT_EQ('d', message_span.back()); } TEST(SpanTest, CanGetData) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); EXPECT_EQ(message.data(), message_span.data()); } TEST(SpanTest, CanDetermineEmptiness) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); auto const empty_span = host_span<char>(); EXPECT_FALSE(message_span.empty()); EXPECT_TRUE(empty_span.empty()); } TEST(SpanTest, CanGetSize) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); auto const empty_span = host_span<char>(); EXPECT_EQ(static_cast<size_t>(11), message_span.size()); EXPECT_EQ(static_cast<size_t>(0), empty_span.size()); } TEST(SpanTest, CanGetSizeBytes) { auto doubles = std::vector<double>({6, 3, 2}); auto const doubles_span = host_span<double>(doubles.data(), doubles.size()); auto const empty_span = host_span<double>(); EXPECT_EQ(static_cast<size_t>(24), doubles_span.size_bytes()); EXPECT_EQ(static_cast<size_t>(0), empty_span.size_bytes()); } TEST(SpanTest, CanCopySpan) { auto message = create_hello_world_message(); host_span<char> message_span_copy; { auto const message_span = host_span<char>(message.data(), message.size()); message_span_copy = message_span; } EXPECT_EQ(message.data(), message_span_copy.data()); EXPECT_EQ(message.size(), message_span_copy.size()); } TEST(SpanTest, CanSubscriptRead) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); EXPECT_EQ('o', message_span[4]); } TEST(SpanTest, CanSubscriptWrite) { auto message = create_hello_world_message(); auto const message_span = host_span<char>(message.data(), message.size()); message_span[4] = 'x'; EXPECT_EQ('x', message_span[4]); } TEST(SpanTest, CanConstructFromHostContainers) { auto std_vector = std::vector<int>(1); auto h_vector = thrust::host_vector<int>(1); (void)host_span<int>(std_vector); (void)host_span<int>(h_vector); auto const std_vector_c = std_vector; auto const h_vector_c = h_vector; (void)host_span<int const>(std_vector_c); (void)host_span<int const>(h_vector_c); } TEST(SpanTest, CanConstructFromDeviceContainers) { auto d_thrust_vector = thrust::device_vector<int>(1); auto d_vector = rmm::device_vector<int>(1); auto d_uvector = rmm::device_uvector<int>(1, rmm::cuda_stream_default); (void)device_span<int>(d_thrust_vector); (void)device_span<int>(d_vector); (void)device_span<int>(d_uvector); auto const& d_thrust_vector_c = d_thrust_vector; auto const& d_vector_c = d_vector; auto const& d_uvector_c = d_uvector; (void)device_span<int const>(d_thrust_vector_c); (void)device_span<int const>(d_vector_c); (void)device_span<int const>(d_uvector_c); } __global__ void simple_device_kernel(device_span<bool> result) { result[0] = true; } TEST(SpanTest, CanUseDeviceSpan) { rmm::device_vector<bool> d_message = std::vector<bool>({false}); auto d_span = device_span<bool>(d_message.data().get(), d_message.size()); simple_device_kernel<<<1, 1>>>(d_span); cudaDeviceSynchronize(); thrust::host_vector<bool> h_message = d_message; ASSERT_TRUE(h_message[0]); } class MdSpanTest : public cudf::test::BaseFixture { }; TEST(MdSpanTest, CanDetermineEmptiness) { auto const vector = hostdevice_2dvector<int>(1, 2); auto const no_rows_vector = hostdevice_2dvector<int>(0, 2); auto const no_columns_vector = hostdevice_2dvector<int>(1, 0); EXPECT_FALSE(host_2dspan<int const>{vector}.is_empty()); EXPECT_FALSE(device_2dspan<int const>{vector}.is_empty()); EXPECT_TRUE(host_2dspan<int const>{no_rows_vector}.is_empty()); EXPECT_TRUE(device_2dspan<int const>{no_rows_vector}.is_empty()); EXPECT_TRUE(host_2dspan<int const>{no_columns_vector}.is_empty()); EXPECT_TRUE(device_2dspan<int const>{no_columns_vector}.is_empty()); } __global__ void readwrite_kernel(device_2dspan<int> result) { if (result[5][6] == 5) { result[5][6] *= 6; } else { result[5][6] = 5; } } TEST(MdSpanTest, DeviceReadWrite) { auto vector = hostdevice_2dvector<int>(11, 23); readwrite_kernel<<<1, 1>>>(vector); readwrite_kernel<<<1, 1>>>(vector); vector.device_to_host(rmm::cuda_stream_default, true); EXPECT_EQ(vector[5][6], 30); } TEST(MdSpanTest, HostReadWrite) { auto vector = hostdevice_2dvector<int>(11, 23); auto span = host_2dspan<int>{vector}; span[5][6] = 5; if (span[5][6] == 5) { span[5][6] *= 6; } EXPECT_EQ(vector[5][6], 30); } TEST(MdSpanTest, CanGetSize) { auto const vector = hostdevice_2dvector<int>(1, 2); EXPECT_EQ(host_2dspan<int const>{vector}.size(), vector.size()); EXPECT_EQ(device_2dspan<int const>{vector}.size(), vector.size()); } TEST(MdSpanTest, CanGetCount) { auto const vector = hostdevice_2dvector<int>(11, 23); EXPECT_EQ(host_2dspan<int const>{vector}.count(), 11ul * 23); EXPECT_EQ(device_2dspan<int const>{vector}.count(), 11ul * 23); } CUDF_TEST_PROGRAM_MAIN()
5340a9adaf0550345ce7e47f44bbbaf3e2d0a349.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /************************************************************************* * DESCRIPTION: * Parallel Concurrent Wave Equation - C Version * This program implements the concurrent wave equation by using CUDA ************************************************************************/ #include <math.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #define ceild(n, d) ceil(((double)(n)) / ((double)(d))) #define THREADS 96 #define MAXPOINTS 1000000 #define MAXSTEPS 1000000 #define MINPOINTS 20 #define PI 3.14159265 void check_param(void); void update(void); void printfinal(void); int nsteps, /* number of time steps */ tpoints, /* total points along string */ rcode; /* generic return code */ float values[MAXPOINTS + 2], /* values at time t */ oldval[MAXPOINTS + 2], /* values at time (t-dt) */ newval[MAXPOINTS + 2]; /* values at time (t+dt) */ /********************************************************************** *Checks input values from parameters *********************************************************************/ void check_param(void) { char tchar[20]; /* check number of points, number of iterations */ while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) { printf("Enter number of points along vibrating string [%d-%d]: ", MINPOINTS, MAXPOINTS); scanf("%s", tchar); tpoints = atoi(tchar); if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) printf("Invalid. Please enter value between %d and %d\n", MINPOINTS, MAXPOINTS); } while ((nsteps < 1) || (nsteps > MAXSTEPS)) { printf("Enter number of time steps [1-%d]: ", MAXSTEPS); scanf("%s", tchar); nsteps = atoi(tchar); if ((nsteps < 1) || (nsteps > MAXSTEPS)) printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS); } printf("Using points = %d, steps = %d\n", tpoints, nsteps); } /********************************************************************** * Calculate new values using wave equation *********************************************************************/ __device__ float do_math(float val, float old) { float dtime, c, dx, tau, sqtau; dtime = 0.3; c = 1.0; dx = 1.0; tau = (c * dtime / dx); sqtau = tau * tau; return (2.0 * val) - old + (sqtau * (-2.0) * val); } #define GET_INDEX(nblock) (1 + threadIdx.x + blockIdx.x * nblock) /********************************************************************** * Update all values along line a specified number of times *********************************************************************/ __global__ void update(float *device_values, int tpoints, int nsteps) { int i; float values1, newval1, oldval1; int idx = GET_INDEX(THREADS); /* k */ if ((idx == 1) || (idx == tpoints)) { values1 = 0.0; } else { /* initialize this point */ /* Calculate initial values based on sine curve */ float x, fac, tmp; fac = 2.0 * PI; tmp = tpoints - 1; /* initialize this point */ /* Calculate initial values based on sine curve */ x = (float)(idx - 1)/tmp; values1 = sin(fac * x); oldval1 = values1; /* for each step */ for (i = 1; i <= nsteps; ++i) { /* Update each point for this time step */ newval1 = do_math(values1, oldval1); oldval1 = values1; values1 = newval1; } } device_values[idx] = values1; } /********************************************************************** * Print final results *********************************************************************/ void printfinal() { int i; for (i = 1; i <= tpoints; i++) { printf("%6.4f ", values[i]); if (i % 10 == 0) printf("\n"); } } /********************************************************************** *Main program *********************************************************************/ int main(int argc, char *argv[]) { sscanf(argv[1], "%d", &tpoints); sscanf(argv[2], "%d", &nsteps); check_param(); /* setup cuda env */ float *device_values; int size = (1 + tpoints) * sizeof(float); int block = ceild(tpoints, THREADS); hipMalloc((void **)&device_values, size); printf("Initializing points on the line...\n"); printf("Updating all points for all time steps...\n"); hipLaunchKernelGGL(( update), dim3(block), dim3(THREADS), 0, 0, device_values, tpoints, nsteps); /* move result back to host */ hipMemcpy(values, device_values, size, hipMemcpyDeviceToHost); printf("Printing final results...\n"); printfinal(); printf("\nDone.\n\n"); return 0; }
5340a9adaf0550345ce7e47f44bbbaf3e2d0a349.cu
/************************************************************************* * DESCRIPTION: * Parallel Concurrent Wave Equation - C Version * This program implements the concurrent wave equation by using CUDA ************************************************************************/ #include <math.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #define ceild(n, d) ceil(((double)(n)) / ((double)(d))) #define THREADS 96 #define MAXPOINTS 1000000 #define MAXSTEPS 1000000 #define MINPOINTS 20 #define PI 3.14159265 void check_param(void); void update(void); void printfinal(void); int nsteps, /* number of time steps */ tpoints, /* total points along string */ rcode; /* generic return code */ float values[MAXPOINTS + 2], /* values at time t */ oldval[MAXPOINTS + 2], /* values at time (t-dt) */ newval[MAXPOINTS + 2]; /* values at time (t+dt) */ /********************************************************************** *Checks input values from parameters *********************************************************************/ void check_param(void) { char tchar[20]; /* check number of points, number of iterations */ while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) { printf("Enter number of points along vibrating string [%d-%d]: ", MINPOINTS, MAXPOINTS); scanf("%s", tchar); tpoints = atoi(tchar); if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) printf("Invalid. Please enter value between %d and %d\n", MINPOINTS, MAXPOINTS); } while ((nsteps < 1) || (nsteps > MAXSTEPS)) { printf("Enter number of time steps [1-%d]: ", MAXSTEPS); scanf("%s", tchar); nsteps = atoi(tchar); if ((nsteps < 1) || (nsteps > MAXSTEPS)) printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS); } printf("Using points = %d, steps = %d\n", tpoints, nsteps); } /********************************************************************** * Calculate new values using wave equation *********************************************************************/ __device__ float do_math(float val, float old) { float dtime, c, dx, tau, sqtau; dtime = 0.3; c = 1.0; dx = 1.0; tau = (c * dtime / dx); sqtau = tau * tau; return (2.0 * val) - old + (sqtau * (-2.0) * val); } #define GET_INDEX(nblock) (1 + threadIdx.x + blockIdx.x * nblock) /********************************************************************** * Update all values along line a specified number of times *********************************************************************/ __global__ void update(float *device_values, int tpoints, int nsteps) { int i; float values1, newval1, oldval1; int idx = GET_INDEX(THREADS); /* k */ if ((idx == 1) || (idx == tpoints)) { values1 = 0.0; } else { /* initialize this point */ /* Calculate initial values based on sine curve */ float x, fac, tmp; fac = 2.0 * PI; tmp = tpoints - 1; /* initialize this point */ /* Calculate initial values based on sine curve */ x = (float)(idx - 1)/tmp; values1 = sin(fac * x); oldval1 = values1; /* for each step */ for (i = 1; i <= nsteps; ++i) { /* Update each point for this time step */ newval1 = do_math(values1, oldval1); oldval1 = values1; values1 = newval1; } } device_values[idx] = values1; } /********************************************************************** * Print final results *********************************************************************/ void printfinal() { int i; for (i = 1; i <= tpoints; i++) { printf("%6.4f ", values[i]); if (i % 10 == 0) printf("\n"); } } /********************************************************************** *Main program *********************************************************************/ int main(int argc, char *argv[]) { sscanf(argv[1], "%d", &tpoints); sscanf(argv[2], "%d", &nsteps); check_param(); /* setup cuda env */ float *device_values; int size = (1 + tpoints) * sizeof(float); int block = ceild(tpoints, THREADS); cudaMalloc((void **)&device_values, size); printf("Initializing points on the line...\n"); printf("Updating all points for all time steps...\n"); update<<<block, THREADS>>>(device_values, tpoints, nsteps); /* move result back to host */ cudaMemcpy(values, device_values, size, cudaMemcpyDeviceToHost); printf("Printing final results...\n"); printfinal(); printf("\nDone.\n\n"); return 0; }
cee63ac31f7e38f6fa2271ddaecf0d24ffd511ff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/native/Pool.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <THH/THHNumerics.cuh> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } __device__ inline int max(int a, int b) { return a >= b ? a : b; } template <typename scalar_t, typename accscalar_t, bool COUNT_INCLUDE_PAD> __global__ void avg_pool2d_out_cuda_frame(const int nthreads, const scalar_t* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, scalar_t* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); accscalar_t aveval = accscalar_t(0); const scalar_t* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } if(COUNT_INCLUDE_PAD) top_data[index] = ScalarConvert<accscalar_t, scalar_t>::to(aveval / pool_size); else top_data[index] = ScalarConvert<accscalar_t, scalar_t>::to(aveval / ((hend - hstart) * (wend - wstart))); } } template <typename scalar_t, typename accscalar_t, bool COUNT_INCLUDE_PAD> __global__ void avg_pool2d_backward_out_cuda_frame(const int nthreads, const scalar_t* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, scalar_t* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); accscalar_t gradient = accscalar_t(0); const scalar_t* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); if(COUNT_INCLUDE_PAD) gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; else gradient += top_diff_slice[ph * pooled_width + pw] / ((hend - hstart) * (wend - wstart)); } } bottom_diff[index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient); } } void avg_pool2d_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { TensorArg output_arg{ output, "output", 1 }; TensorArg input_arg{ input_, "input_", 2 }; checkAllSameGPU("avg_pool2d_out_cuda", {output_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 2) && (stride.empty() || stride.size() == 2) && (padding.size() == 1 || padding.size() == 2), "avg_pool2d: all IntArrayRef sizes must be 2"); TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : safe_downcast<int, int64_t>(stride[1]); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1; const int64_t nInputPlane = input_.size(-3); const int64_t inputHeight = input_.size(-2); const int64_t inputWidth = input_.size(-1); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode); pool2d_shape_check( input_, kH, kW, dH, dW, padH, padW, 1, 1, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth); Tensor input = input_.contiguous(); output.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); const int count = safe_downcast<int, int64_t>(output.numel()); const int num_threads = ::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); if (count_include_pad) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool2d_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data<scalar_t>(); scalar_t *input_data = input.data<scalar_t>(); hipLaunchKernelGGL(( avg_pool2d_out_cuda_frame<scalar_t, accscalar_t, true>) , dim3(cuda::ATenCeilDiv(count, num_threads)), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, output_data); } ); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool2d_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data<scalar_t>(); scalar_t *input_data = input.data<scalar_t>(); hipLaunchKernelGGL(( avg_pool2d_out_cuda_frame<scalar_t, accscalar_t, false>) , dim3(cuda::ATenCeilDiv(count, num_threads)), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, output_data); } ); } TORCH_CHECK(hipGetLastError() == hipSuccess, "avg_pool2d_out_cuda_frame failed with error code ", hipGetLastError()); if (input.ndimension() == 3) { output.resize_({nInputPlane, outputHeight, outputWidth}); } } Tensor& avg_pool2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input_, "input_", 3 }; checkAllSameGPU("avg_pool2d_backward_out_cuda", {gradInput_arg, gradOutput_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 2) && (stride.empty() || stride.size() == 2) && (padding.size() == 1 || padding.size() == 2), "avg_pool2d: all IntArrayRef sizes must be 2"); TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : safe_downcast<int, int64_t>(stride[1]); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); const Tensor input = input_.contiguous(); const Tensor gradOutput = gradOutput_.contiguous(); const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1; const int64_t nInputPlane = input.size(-3); const int64_t inputHeight = input.size(-2); const int64_t inputWidth = input.size(-1); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode); avg_pool2d_backward_shape_check( input_, gradOutput_, nbatch, kH, kW, dH, dW, padH, padW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth); gradInput.resize_as_(input); const int count = safe_downcast<int, int64_t>(input.numel()); const int num_threads = ::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); if (count_include_pad) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool2d_backward_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data<scalar_t>(); scalar_t *gradInput_data = gradInput.data<scalar_t>(); hipLaunchKernelGGL(( avg_pool2d_backward_out_cuda_frame<scalar_t, accscalar_t, true>) , dim3(cuda::ATenCeilDiv(count, num_threads)), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, gradOutput_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, gradInput_data); } ); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool2d_backward_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data<scalar_t>(); scalar_t *gradInput_data = gradInput.data<scalar_t>(); hipLaunchKernelGGL(( avg_pool2d_backward_out_cuda_frame<scalar_t, accscalar_t, false>) , dim3(cuda::ATenCeilDiv(count, num_threads)), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, gradOutput_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, gradInput_data); } ); } TORCH_CHECK(hipGetLastError() == hipSuccess, "avg_pool2d_backward_out_cuda failed with error code ", hipGetLastError()); return gradInput; } } // namespace Tensor& avg_pool2d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { avg_pool2d_out_cuda_template( output, input, kernel_size, stride, padding, ceil_mode, count_include_pad); return output; } Tensor avg_pool2d_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { Tensor output = at::empty({0}, input.options()); avg_pool2d_out_cuda_template( output, input, kernel_size, stride, padding, ceil_mode, count_include_pad); return output; } Tensor& avg_pool2d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { avg_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, kernel_size, stride, padding, ceil_mode, count_include_pad); return gradInput; } Tensor avg_pool2d_backward_cuda( const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { auto gradInput = at::zeros_like(input); avg_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, kernel_size, stride, padding, ceil_mode, count_include_pad); return gradInput; } } // at::native } // at
cee63ac31f7e38f6fa2271ddaecf0d24ffd511ff.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/native/Pool.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <THC/THCNumerics.cuh> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } __device__ inline int max(int a, int b) { return a >= b ? a : b; } template <typename scalar_t, typename accscalar_t, bool COUNT_INCLUDE_PAD> __global__ void avg_pool2d_out_cuda_frame(const int nthreads, const scalar_t* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, scalar_t* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); accscalar_t aveval = accscalar_t(0); const scalar_t* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } if(COUNT_INCLUDE_PAD) top_data[index] = ScalarConvert<accscalar_t, scalar_t>::to(aveval / pool_size); else top_data[index] = ScalarConvert<accscalar_t, scalar_t>::to(aveval / ((hend - hstart) * (wend - wstart))); } } template <typename scalar_t, typename accscalar_t, bool COUNT_INCLUDE_PAD> __global__ void avg_pool2d_backward_out_cuda_frame(const int nthreads, const scalar_t* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, scalar_t* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); accscalar_t gradient = accscalar_t(0); const scalar_t* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); if(COUNT_INCLUDE_PAD) gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; else gradient += top_diff_slice[ph * pooled_width + pw] / ((hend - hstart) * (wend - wstart)); } } bottom_diff[index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient); } } void avg_pool2d_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { TensorArg output_arg{ output, "output", 1 }; TensorArg input_arg{ input_, "input_", 2 }; checkAllSameGPU("avg_pool2d_out_cuda", {output_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 2) && (stride.empty() || stride.size() == 2) && (padding.size() == 1 || padding.size() == 2), "avg_pool2d: all IntArrayRef sizes must be 2"); TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : safe_downcast<int, int64_t>(stride[1]); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1; const int64_t nInputPlane = input_.size(-3); const int64_t inputHeight = input_.size(-2); const int64_t inputWidth = input_.size(-1); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode); pool2d_shape_check( input_, kH, kW, dH, dW, padH, padW, 1, 1, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth); Tensor input = input_.contiguous(); output.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); const int count = safe_downcast<int, int64_t>(output.numel()); const int num_threads = std::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); if (count_include_pad) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool2d_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data<scalar_t>(); scalar_t *input_data = input.data<scalar_t>(); avg_pool2d_out_cuda_frame<scalar_t, accscalar_t, true> <<<cuda::ATenCeilDiv(count, num_threads), num_threads, 0, at::cuda::getCurrentCUDAStream()>>>( count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, output_data); } ); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool2d_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data<scalar_t>(); scalar_t *input_data = input.data<scalar_t>(); avg_pool2d_out_cuda_frame<scalar_t, accscalar_t, false> <<<cuda::ATenCeilDiv(count, num_threads), num_threads, 0, at::cuda::getCurrentCUDAStream()>>>( count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, output_data); } ); } TORCH_CHECK(cudaGetLastError() == cudaSuccess, "avg_pool2d_out_cuda_frame failed with error code ", cudaGetLastError()); if (input.ndimension() == 3) { output.resize_({nInputPlane, outputHeight, outputWidth}); } } Tensor& avg_pool2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input_, "input_", 3 }; checkAllSameGPU("avg_pool2d_backward_out_cuda", {gradInput_arg, gradOutput_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 2) && (stride.empty() || stride.size() == 2) && (padding.size() == 1 || padding.size() == 2), "avg_pool2d: all IntArrayRef sizes must be 2"); TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : safe_downcast<int, int64_t>(stride[1]); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); const Tensor input = input_.contiguous(); const Tensor gradOutput = gradOutput_.contiguous(); const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1; const int64_t nInputPlane = input.size(-3); const int64_t inputHeight = input.size(-2); const int64_t inputWidth = input.size(-1); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode); avg_pool2d_backward_shape_check( input_, gradOutput_, nbatch, kH, kW, dH, dW, padH, padW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth); gradInput.resize_as_(input); const int count = safe_downcast<int, int64_t>(input.numel()); const int num_threads = std::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); if (count_include_pad) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool2d_backward_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data<scalar_t>(); scalar_t *gradInput_data = gradInput.data<scalar_t>(); avg_pool2d_backward_out_cuda_frame<scalar_t, accscalar_t, true> <<<cuda::ATenCeilDiv(count, num_threads), num_threads, 0, at::cuda::getCurrentCUDAStream()>>>( count, gradOutput_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, gradInput_data); } ); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "avg_pool2d_backward_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data<scalar_t>(); scalar_t *gradInput_data = gradInput.data<scalar_t>(); avg_pool2d_backward_out_cuda_frame<scalar_t, accscalar_t, false> <<<cuda::ATenCeilDiv(count, num_threads), num_threads, 0, at::cuda::getCurrentCUDAStream()>>>( count, gradOutput_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, gradInput_data); } ); } TORCH_CHECK(cudaGetLastError() == cudaSuccess, "avg_pool2d_backward_out_cuda failed with error code ", cudaGetLastError()); return gradInput; } } // namespace Tensor& avg_pool2d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { avg_pool2d_out_cuda_template( output, input, kernel_size, stride, padding, ceil_mode, count_include_pad); return output; } Tensor avg_pool2d_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { Tensor output = at::empty({0}, input.options()); avg_pool2d_out_cuda_template( output, input, kernel_size, stride, padding, ceil_mode, count_include_pad); return output; } Tensor& avg_pool2d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { avg_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, kernel_size, stride, padding, ceil_mode, count_include_pad); return gradInput; } Tensor avg_pool2d_backward_cuda( const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad) { auto gradInput = at::zeros_like(input); avg_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, kernel_size, stride, padding, ceil_mode, count_include_pad); return gradInput; } } // at::native } // at
a67ee7611672d684a1a2bac2d3e5a77c8629a35a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "pad_impl.h" namespace onnxruntime { namespace cuda { // PadMode enum from core/providers/cpu/tensor/pad.h, cannot use that header because of nvcc/onnxruntime incompatibility enum class PadMode : int { Constant = 0, Reflect, Edge }; template <typename T, int pad_mode> __global__ void _PadKernel( const size_t shape_rank, const TArray<int64_t> input_dims, const TArray<int64_t> input_strides, const TArray<int64_t> lower_pads, const T pad_value, const T* input_data, const TArray<fast_divmod> fdm_output_strides, T* output_data, const size_t N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); CUDA_LONG input_index = 0; CUDA_LONG output_index = id; bool use_pad_value = false; for (int dim = 0; dim < shape_rank && !use_pad_value; ++dim) { int out_coord, r; fdm_output_strides[dim].divmod(output_index, out_coord, r); output_index = r; int in_coord = 0; if (out_coord < lower_pads[dim]) { switch ((PadMode)pad_mode) { case PadMode::Constant: use_pad_value = true; break; case PadMode::Edge: in_coord = 0; break; case PadMode::Reflect: in_coord = lower_pads[dim] - out_coord; break; } } else if (out_coord >= lower_pads[dim] + input_dims[dim]) { switch ((PadMode)pad_mode) { case PadMode::Constant: use_pad_value = true; break; case PadMode::Edge: in_coord = input_dims[dim] - 1; break; case PadMode::Reflect: in_coord = input_dims[dim] - 2 - (out_coord - (lower_pads[dim] + input_dims[dim])); break; } } else { in_coord = out_coord - lower_pads[dim]; } input_index += input_strides[dim] * in_coord; } output_data[id] = use_pad_value ? (T)pad_value : input_data[input_index]; } template <typename T, int pad_mode> __global__ void _PadNCHWInputWithPaddingAlongHAndWKernel( const int64_t n, // Batch const int64_t c, // Channel const int64_t input_height, const int64_t output_height, const int64_t input_width, const int64_t output_width, const int64_t pad_height_start, const int64_t pad_width_start, const T pad_value, const T* input_data, T* output_data, const size_t N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); const int current_output_width = id % output_width; int nc_index = id / output_width; const int current_output_height = nc_index % output_height; nc_index /= output_height; int current_input_height = current_output_height - pad_height_start; int current_input_width = current_output_width - pad_width_start; switch ((PadMode)pad_mode) { case PadMode::Constant: output_data[id] = (current_input_height < 0 || current_input_width < 0 || current_input_height >= input_height || current_input_width >= input_width) ? pad_value : input_data[(nc_index * input_height + current_input_height) * input_width + current_input_width]; break; case PadMode::Edge: current_input_height = ::max(0, ::min(current_input_height, static_cast<int>(input_height - 1))); current_input_width = ::max(0, ::min(current_input_width, static_cast<int>(input_width - 1))); output_data[id] = input_data[(nc_index * input_height + current_input_height) * input_width + current_input_width]; break; case PadMode::Reflect: current_input_height = ::max(current_input_height, -current_input_height); current_input_height = ::min(static_cast<int>(current_input_height), 2 * static_cast<int>(input_height) - current_input_height - 2); current_input_width = ::max(current_input_width, -current_input_width); current_input_width = ::min(static_cast<int>(current_input_width), 2 * static_cast<int>(input_width) - current_input_width - 2); output_data[id] = input_data[(nc_index * input_height + current_input_height) * input_width + current_input_width]; break; } } template <typename T> void PadImpl( hipStream_t stream, const size_t shape_rank, const TArray<int64_t>& input_dims, const TArray<int64_t>& input_strides, const TArray<int64_t>& lower_pads, const T pad_value, const int pad_mode, const T* input_data, const TArray<fast_divmod>& fdm_output_strides, T* output_data, const size_t N) { if (N == 0) // special case where there's a dim value of 0 in the output shape return; int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); switch (pad_mode) { case 0: hipLaunchKernelGGL(( _PadKernel<T, 0>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, shape_rank, input_dims, input_strides, lower_pads, pad_value, input_data, fdm_output_strides, output_data, N); break; case 1: hipLaunchKernelGGL(( _PadKernel<T, 1>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, shape_rank, input_dims, input_strides, lower_pads, pad_value, input_data, fdm_output_strides, output_data, N); break; case 2: hipLaunchKernelGGL(( _PadKernel<T, 2>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, shape_rank, input_dims, input_strides, lower_pads, pad_value, input_data, fdm_output_strides, output_data, N); break; } } template <typename T> void PadNCHWInputWithPaddingAlongHAndWImpl( hipStream_t stream, const int64_t n, // Batch const int64_t c, // Channel const int64_t input_height, const int64_t output_height, const int64_t input_width, const int64_t output_width, const int64_t pad_height_start, const int64_t pad_width_start, const T pad_value, const int pad_mode, const T* input_data, T* output_data, const size_t N) { if (N == 0) // special case where there's a dim value of 0 in the output shape return; int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); switch (pad_mode) { case 0: hipLaunchKernelGGL(( _PadNCHWInputWithPaddingAlongHAndWKernel<T, 0>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, n, c, input_height, output_height, input_width, output_width, pad_height_start, pad_width_start, pad_value, input_data, output_data, N); break; case 1: hipLaunchKernelGGL(( _PadNCHWInputWithPaddingAlongHAndWKernel<T, 1>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, n, c, input_height, output_height, input_width, output_width, pad_height_start, pad_width_start, pad_value, input_data, output_data, N); break; case 2: hipLaunchKernelGGL(( _PadNCHWInputWithPaddingAlongHAndWKernel<T, 2>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, n, c, input_height, output_height, input_width, output_width, pad_height_start, pad_width_start, pad_value, input_data, output_data, N); break; } } #define SPECIALIZED_IMPL(T) \ template void PadImpl<T>(hipStream_t stream, const size_t shape_rank, \ const TArray<int64_t>& input_dims, const TArray<int64_t>& input_strides, \ const TArray<int64_t>& lower_pads, \ const T pad_value, \ const int pad_mode, \ const T* input_data, \ const TArray<fast_divmod>& fdm_output_strides, \ T* output_data, \ const size_t N); \ template void PadNCHWInputWithPaddingAlongHAndWImpl<T>(hipStream_t stream, const int64_t n, const int64_t c, \ const int64_t input_height, const int64_t output_height, \ const int64_t input_width, const int64_t output_width, \ const int64_t pad_height_start, \ const int64_t pad_width_start, \ const T pad_value, \ const int pad_mode, \ const T* input_data, T* output_data, \ const size_t N); SPECIALIZED_IMPL(float) SPECIALIZED_IMPL(double) SPECIALIZED_IMPL(half) SPECIALIZED_IMPL(bool) } // namespace cuda } // namespace onnxruntime
a67ee7611672d684a1a2bac2d3e5a77c8629a35a.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include "core/providers/cuda/cu_inc/common.cuh" #include "pad_impl.h" namespace onnxruntime { namespace cuda { // PadMode enum from core/providers/cpu/tensor/pad.h, cannot use that header because of nvcc/onnxruntime incompatibility enum class PadMode : int { Constant = 0, Reflect, Edge }; template <typename T, int pad_mode> __global__ void _PadKernel( const size_t shape_rank, const TArray<int64_t> input_dims, const TArray<int64_t> input_strides, const TArray<int64_t> lower_pads, const T pad_value, const T* input_data, const TArray<fast_divmod> fdm_output_strides, T* output_data, const size_t N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); CUDA_LONG input_index = 0; CUDA_LONG output_index = id; bool use_pad_value = false; for (int dim = 0; dim < shape_rank && !use_pad_value; ++dim) { int out_coord, r; fdm_output_strides[dim].divmod(output_index, out_coord, r); output_index = r; int in_coord = 0; if (out_coord < lower_pads[dim]) { switch ((PadMode)pad_mode) { case PadMode::Constant: use_pad_value = true; break; case PadMode::Edge: in_coord = 0; break; case PadMode::Reflect: in_coord = lower_pads[dim] - out_coord; break; } } else if (out_coord >= lower_pads[dim] + input_dims[dim]) { switch ((PadMode)pad_mode) { case PadMode::Constant: use_pad_value = true; break; case PadMode::Edge: in_coord = input_dims[dim] - 1; break; case PadMode::Reflect: in_coord = input_dims[dim] - 2 - (out_coord - (lower_pads[dim] + input_dims[dim])); break; } } else { in_coord = out_coord - lower_pads[dim]; } input_index += input_strides[dim] * in_coord; } output_data[id] = use_pad_value ? (T)pad_value : input_data[input_index]; } template <typename T, int pad_mode> __global__ void _PadNCHWInputWithPaddingAlongHAndWKernel( const int64_t n, // Batch const int64_t c, // Channel const int64_t input_height, const int64_t output_height, const int64_t input_width, const int64_t output_width, const int64_t pad_height_start, const int64_t pad_width_start, const T pad_value, const T* input_data, T* output_data, const size_t N) { CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N); const int current_output_width = id % output_width; int nc_index = id / output_width; const int current_output_height = nc_index % output_height; nc_index /= output_height; int current_input_height = current_output_height - pad_height_start; int current_input_width = current_output_width - pad_width_start; switch ((PadMode)pad_mode) { case PadMode::Constant: output_data[id] = (current_input_height < 0 || current_input_width < 0 || current_input_height >= input_height || current_input_width >= input_width) ? pad_value : input_data[(nc_index * input_height + current_input_height) * input_width + current_input_width]; break; case PadMode::Edge: current_input_height = std::max(0, std::min(current_input_height, static_cast<int>(input_height - 1))); current_input_width = std::max(0, std::min(current_input_width, static_cast<int>(input_width - 1))); output_data[id] = input_data[(nc_index * input_height + current_input_height) * input_width + current_input_width]; break; case PadMode::Reflect: current_input_height = std::max(current_input_height, -current_input_height); current_input_height = std::min(static_cast<int>(current_input_height), 2 * static_cast<int>(input_height) - current_input_height - 2); current_input_width = std::max(current_input_width, -current_input_width); current_input_width = std::min(static_cast<int>(current_input_width), 2 * static_cast<int>(input_width) - current_input_width - 2); output_data[id] = input_data[(nc_index * input_height + current_input_height) * input_width + current_input_width]; break; } } template <typename T> void PadImpl( cudaStream_t stream, const size_t shape_rank, const TArray<int64_t>& input_dims, const TArray<int64_t>& input_strides, const TArray<int64_t>& lower_pads, const T pad_value, const int pad_mode, const T* input_data, const TArray<fast_divmod>& fdm_output_strides, T* output_data, const size_t N) { if (N == 0) // special case where there's a dim value of 0 in the output shape return; int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); switch (pad_mode) { case 0: _PadKernel<T, 0><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( shape_rank, input_dims, input_strides, lower_pads, pad_value, input_data, fdm_output_strides, output_data, N); break; case 1: _PadKernel<T, 1><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( shape_rank, input_dims, input_strides, lower_pads, pad_value, input_data, fdm_output_strides, output_data, N); break; case 2: _PadKernel<T, 2><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( shape_rank, input_dims, input_strides, lower_pads, pad_value, input_data, fdm_output_strides, output_data, N); break; } } template <typename T> void PadNCHWInputWithPaddingAlongHAndWImpl( cudaStream_t stream, const int64_t n, // Batch const int64_t c, // Channel const int64_t input_height, const int64_t output_height, const int64_t input_width, const int64_t output_width, const int64_t pad_height_start, const int64_t pad_width_start, const T pad_value, const int pad_mode, const T* input_data, T* output_data, const size_t N) { if (N == 0) // special case where there's a dim value of 0 in the output shape return; int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock)); switch (pad_mode) { case 0: _PadNCHWInputWithPaddingAlongHAndWKernel<T, 0><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( n, c, input_height, output_height, input_width, output_width, pad_height_start, pad_width_start, pad_value, input_data, output_data, N); break; case 1: _PadNCHWInputWithPaddingAlongHAndWKernel<T, 1><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( n, c, input_height, output_height, input_width, output_width, pad_height_start, pad_width_start, pad_value, input_data, output_data, N); break; case 2: _PadNCHWInputWithPaddingAlongHAndWKernel<T, 2><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>( n, c, input_height, output_height, input_width, output_width, pad_height_start, pad_width_start, pad_value, input_data, output_data, N); break; } } #define SPECIALIZED_IMPL(T) \ template void PadImpl<T>(cudaStream_t stream, const size_t shape_rank, \ const TArray<int64_t>& input_dims, const TArray<int64_t>& input_strides, \ const TArray<int64_t>& lower_pads, \ const T pad_value, \ const int pad_mode, \ const T* input_data, \ const TArray<fast_divmod>& fdm_output_strides, \ T* output_data, \ const size_t N); \ template void PadNCHWInputWithPaddingAlongHAndWImpl<T>(cudaStream_t stream, const int64_t n, const int64_t c, \ const int64_t input_height, const int64_t output_height, \ const int64_t input_width, const int64_t output_width, \ const int64_t pad_height_start, \ const int64_t pad_width_start, \ const T pad_value, \ const int pad_mode, \ const T* input_data, T* output_data, \ const size_t N); SPECIALIZED_IMPL(float) SPECIALIZED_IMPL(double) SPECIALIZED_IMPL(half) SPECIALIZED_IMPL(bool) } // namespace cuda } // namespace onnxruntime
9c07f45b1dcf71293f240543573656943b835756.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #if __CUDACC_VER_MAJOR__ >= 8 #include "scope/scope.hpp" #include "args.hpp" #define NAME "Comm_Demand_Duplex_GPUGPU" template <bool NOOP = false> __global__ void gpu_write(char *ptr, const size_t count, const size_t stride) { if (NOOP) { return; } // global ID const size_t gx = blockIdx.x * blockDim.x + threadIdx.x; // lane ID 0-31 const size_t lx = gx & 31; // warp ID size_t wx = gx / 32; const size_t numWarps = (gridDim.x * blockDim.x + 32 - 1) / 32; if (0 == lx) { for (size_t i = wx * stride; i < count; i += numWarps * stride) { ptr[i] = 0; } } } auto Comm_Demand_Duplex_GPUGPU = [](benchmark::State &state, const int gpu0, const int gpu1) { if (gpu0 == gpu1) { state.SkipWithError(NAME " requuires two different GPUs"); return; } const size_t pageSize = page_size(); const auto bytes = 1ULL << static_cast<size_t>(state.range(0)); hipStream_t streams[2] = {nullptr}; char *ptrs[2] = {nullptr}; // start and end events in gpu0's stream. end0 will not be recorded until // after end1 hipEvent_t start = nullptr; hipEvent_t end0 = nullptr; // end event hipEvent_t end1 = nullptr; // initialize data structures for device `dev` #define INIT(dev) \ OR_SKIP_AND_RETURN(scope::cuda_reset_device(gpu##dev), "failed to reset"); \ OR_SKIP_AND_RETURN(hipSetDevice(gpu##dev), "failed to set"); \ OR_SKIP_AND_RETURN(hipStreamCreate(&streams[dev]), \ "failed to create stream"); \ OR_SKIP_AND_RETURN(hipMallocManaged(&ptrs[dev], bytes), \ "failed to hipMallocManaged"); \ OR_SKIP_AND_RETURN(hipMemset(ptrs[dev], 0, bytes), "failed to hipMemset") INIT(0); INIT(1); // record the "pimary" events in the stream associated with gpu0 OR_SKIP_AND_RETURN(hipSetDevice(gpu0), "failed to set gpu0"); OR_SKIP_AND_RETURN(hipEventCreate(&start), "failed to create start event") OR_SKIP_AND_RETURN(hipEventCreate(&end0), "failed to create end0") // record the end of the transfer task running on gpu1 OR_SKIP_AND_RETURN(hipSetDevice(gpu1), "failed to set gpu1"); OR_SKIP_AND_RETURN(hipEventCreate(&end1), "failed to create end1") for (auto _ : state) { // prefetch data to the source device before the transfers OR_SKIP_AND_BREAK(hipMemPrefetchAsync(ptrs[0], bytes, gpu1, streams[0]), ""); OR_SKIP_AND_BREAK(hipMemPrefetchAsync(ptrs[1], bytes, gpu0, streams[1]), ""); OR_SKIP_AND_BREAK(hipStreamSynchronize(streams[0]), ""); OR_SKIP_AND_BREAK(hipStreamSynchronize(streams[1]), ""); OR_SKIP_AND_BREAK(hipSetDevice(gpu0), ""); OR_SKIP_AND_BREAK(hipEventRecord(start, streams[0]), ""); hipLaunchKernelGGL(( gpu_write), dim3(256), dim3(256), 0, streams[0], ptrs[0], bytes, pageSize); OR_SKIP_AND_BREAK(hipGetLastError(), ""); OR_SKIP_AND_BREAK(hipSetDevice(gpu1), ""); hipLaunchKernelGGL(( gpu_write), dim3(256), dim3(256), 0, streams[1], ptrs[1], bytes, pageSize); OR_SKIP_AND_BREAK(hipGetLastError(), ""); OR_SKIP_AND_BREAK(hipEventRecord(end1, streams[1]), ""); OR_SKIP_AND_BREAK(hipSetDevice(gpu0), ""); OR_SKIP_AND_BREAK(hipStreamWaitEvent(streams[0], end1, 0 /*must be 0*/), ""); OR_SKIP_AND_BREAK(hipEventRecord(end0, streams[0]), ""); // once stream 0 is finished, we can compute the elapsed time OR_SKIP_AND_BREAK(hipStreamSynchronize(streams[0]), ""); float millis = 0; OR_SKIP_AND_BREAK(hipEventElapsedTime(&millis, start, end0), ""); state.SetIterationTime(millis / 1000); } state.SetBytesProcessed(int64_t(state.iterations()) * int64_t(bytes) * 2); state.counters["bytes"] = bytes; state.counters["gpu0"] = gpu0; state.counters["gpu1"] = gpu1; OR_SKIP_AND_RETURN(hipEventDestroy(start), ""); OR_SKIP_AND_RETURN(hipEventDestroy(end0), ""); OR_SKIP_AND_RETURN(hipEventDestroy(end1), ""); for (auto s : streams) { OR_SKIP_AND_RETURN(hipStreamDestroy(s), ""); } for (auto p : ptrs) { OR_SKIP_AND_RETURN(hipFree(p), ""); } }; static void registerer() { for (size_t i : scope::system::cuda_devices()) { for (size_t j : scope::system::cuda_devices()) { if (i < j) { std::string name = std::string(NAME) + "/" + std::to_string(i) + "/" + std::to_string(j); benchmark::RegisterBenchmark(name.c_str(), Comm_Demand_Duplex_GPUGPU, i, j) ->SMALL_ARGS() ->UseManualTime(); } } } } SCOPE_AFTER_INIT(registerer, NAME); #endif // __CUDACC_VER_MAJOR__ >= 8
9c07f45b1dcf71293f240543573656943b835756.cu
#if __CUDACC_VER_MAJOR__ >= 8 #include "scope/scope.hpp" #include "args.hpp" #define NAME "Comm_Demand_Duplex_GPUGPU" template <bool NOOP = false> __global__ void gpu_write(char *ptr, const size_t count, const size_t stride) { if (NOOP) { return; } // global ID const size_t gx = blockIdx.x * blockDim.x + threadIdx.x; // lane ID 0-31 const size_t lx = gx & 31; // warp ID size_t wx = gx / 32; const size_t numWarps = (gridDim.x * blockDim.x + 32 - 1) / 32; if (0 == lx) { for (size_t i = wx * stride; i < count; i += numWarps * stride) { ptr[i] = 0; } } } auto Comm_Demand_Duplex_GPUGPU = [](benchmark::State &state, const int gpu0, const int gpu1) { if (gpu0 == gpu1) { state.SkipWithError(NAME " requuires two different GPUs"); return; } const size_t pageSize = page_size(); const auto bytes = 1ULL << static_cast<size_t>(state.range(0)); cudaStream_t streams[2] = {nullptr}; char *ptrs[2] = {nullptr}; // start and end events in gpu0's stream. end0 will not be recorded until // after end1 cudaEvent_t start = nullptr; cudaEvent_t end0 = nullptr; // end event cudaEvent_t end1 = nullptr; // initialize data structures for device `dev` #define INIT(dev) \ OR_SKIP_AND_RETURN(scope::cuda_reset_device(gpu##dev), "failed to reset"); \ OR_SKIP_AND_RETURN(cudaSetDevice(gpu##dev), "failed to set"); \ OR_SKIP_AND_RETURN(cudaStreamCreate(&streams[dev]), \ "failed to create stream"); \ OR_SKIP_AND_RETURN(cudaMallocManaged(&ptrs[dev], bytes), \ "failed to cudaMallocManaged"); \ OR_SKIP_AND_RETURN(cudaMemset(ptrs[dev], 0, bytes), "failed to cudaMemset") INIT(0); INIT(1); // record the "pimary" events in the stream associated with gpu0 OR_SKIP_AND_RETURN(cudaSetDevice(gpu0), "failed to set gpu0"); OR_SKIP_AND_RETURN(cudaEventCreate(&start), "failed to create start event") OR_SKIP_AND_RETURN(cudaEventCreate(&end0), "failed to create end0") // record the end of the transfer task running on gpu1 OR_SKIP_AND_RETURN(cudaSetDevice(gpu1), "failed to set gpu1"); OR_SKIP_AND_RETURN(cudaEventCreate(&end1), "failed to create end1") for (auto _ : state) { // prefetch data to the source device before the transfers OR_SKIP_AND_BREAK(cudaMemPrefetchAsync(ptrs[0], bytes, gpu1, streams[0]), ""); OR_SKIP_AND_BREAK(cudaMemPrefetchAsync(ptrs[1], bytes, gpu0, streams[1]), ""); OR_SKIP_AND_BREAK(cudaStreamSynchronize(streams[0]), ""); OR_SKIP_AND_BREAK(cudaStreamSynchronize(streams[1]), ""); OR_SKIP_AND_BREAK(cudaSetDevice(gpu0), ""); OR_SKIP_AND_BREAK(cudaEventRecord(start, streams[0]), ""); gpu_write<<<256, 256, 0, streams[0]>>>(ptrs[0], bytes, pageSize); OR_SKIP_AND_BREAK(cudaGetLastError(), ""); OR_SKIP_AND_BREAK(cudaSetDevice(gpu1), ""); gpu_write<<<256, 256, 0, streams[1]>>>(ptrs[1], bytes, pageSize); OR_SKIP_AND_BREAK(cudaGetLastError(), ""); OR_SKIP_AND_BREAK(cudaEventRecord(end1, streams[1]), ""); OR_SKIP_AND_BREAK(cudaSetDevice(gpu0), ""); OR_SKIP_AND_BREAK(cudaStreamWaitEvent(streams[0], end1, 0 /*must be 0*/), ""); OR_SKIP_AND_BREAK(cudaEventRecord(end0, streams[0]), ""); // once stream 0 is finished, we can compute the elapsed time OR_SKIP_AND_BREAK(cudaStreamSynchronize(streams[0]), ""); float millis = 0; OR_SKIP_AND_BREAK(cudaEventElapsedTime(&millis, start, end0), ""); state.SetIterationTime(millis / 1000); } state.SetBytesProcessed(int64_t(state.iterations()) * int64_t(bytes) * 2); state.counters["bytes"] = bytes; state.counters["gpu0"] = gpu0; state.counters["gpu1"] = gpu1; OR_SKIP_AND_RETURN(cudaEventDestroy(start), ""); OR_SKIP_AND_RETURN(cudaEventDestroy(end0), ""); OR_SKIP_AND_RETURN(cudaEventDestroy(end1), ""); for (auto s : streams) { OR_SKIP_AND_RETURN(cudaStreamDestroy(s), ""); } for (auto p : ptrs) { OR_SKIP_AND_RETURN(cudaFree(p), ""); } }; static void registerer() { for (size_t i : scope::system::cuda_devices()) { for (size_t j : scope::system::cuda_devices()) { if (i < j) { std::string name = std::string(NAME) + "/" + std::to_string(i) + "/" + std::to_string(j); benchmark::RegisterBenchmark(name.c_str(), Comm_Demand_Duplex_GPUGPU, i, j) ->SMALL_ARGS() ->UseManualTime(); } } } } SCOPE_AFTER_INIT(registerer, NAME); #endif // __CUDACC_VER_MAJOR__ >= 8
15e529e328e082aeeb159a9b92602d0e25877933.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2017 by Contributors * Copyright (c) 2017 Microsoft * Licensed under The Apache-2.0 License [see LICENSE for details] * \file psroi_pooling.cu * \brief psroi pooling operator * \author Yi Li, Tairui Chen, Guodong Zhang, Jifeng Dai */ #include "./psroi_pooling-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> #include "../../common/cuda_utils.h" #include "../mxnet_op.h" #define PSROIPOOLING_CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \ } while (0) #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) namespace mshadow { namespace cuda { template <typename DType> __global__ void PSROIPoolForwardKernel( const int count, const DType* bottom_data, const DType spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const DType* bottom_rois, const int output_dim, const int group_size, DType* top_data, DType* mapping_channel) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const DType* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale; DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale; DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale; DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 DType roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 DType roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom DType bin_size_h = roi_height / static_cast<DType>(pooled_height); DType bin_size_w = roi_width / static_cast<DType>(pooled_width); int hstart = floor(static_cast<DType>(ph) * bin_size_h + roi_start_h); int wstart = floor(static_cast<DType>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<DType>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<DType>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0),width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int gw = floor(static_cast<DType>(pw)* group_size / pooled_width); int gh = floor(static_cast<DType>(ph)* group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); int c = (ctop*group_size + gh)*group_size + gw; const DType* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; DType out_sum = 0; for (int h = hstart; h < hend; ++h){ for (int w = wstart; w < wend; ++w){ int bottom_index = h*width + w; out_sum += offset_bottom_data[bottom_index]; } } DType bin_area = (hend - hstart)*(wend - wstart); top_data[index] = is_empty? (DType)0. : out_sum/bin_area; mapping_channel[index] = c; } } template<typename DType> inline void PSROIPoolForward(const Tensor<gpu, 4, DType> &out, const Tensor<gpu, 4, DType> &data, const Tensor<gpu, 2, DType> &bbox, const Tensor<gpu, 4, DType> &mapping_channel, const float spatial_scale, const int output_dim_, const int group_size_) { // LOG(INFO) << "PSROIPoolForward"; const DType *bottom_data = data.dptr_; const DType *bottom_rois = bbox.dptr_; DType *top_data = out.dptr_; DType *mapping_channel_ptr = mapping_channel.dptr_; const int count = out.shape_.Size(); const int channels = data.size(1); const int height = data.size(2); const int width = data.size(3); const int pooled_height = out.size(2); const int pooled_width = out.size(3); hipStream_t stream = Stream<gpu>::GetStream(out.stream_); PSROIPoolForwardKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count), kBaseThreadNum, 0, stream >> >( count, bottom_data, spatial_scale, channels, height, width, pooled_height, pooled_width, bottom_rois, output_dim_, group_size_, top_data, mapping_channel_ptr); PSROIPOOLING_CUDA_CHECK(hipPeekAtLastError()); } template <typename DType> __global__ void PSROIPoolBackwardAccKernel( const int count, const DType* top_diff, const DType* mapping_channel, const int num_rois, const DType spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, DType* bottom_diff, const DType* bottom_rois) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const DType* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale; DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale; DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale; DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 DType roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 DType roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom DType bin_size_h = roi_height / static_cast<DType>(pooled_height); DType bin_size_w = roi_width / static_cast<DType>(pooled_width); int hstart = floor(static_cast<DType>(ph)* bin_size_h + roi_start_h); int wstart = floor(static_cast<DType>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<DType>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<DType>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Compute c at bottom int c = mapping_channel[index]; DType* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; DType bin_area = (hend - hstart)*(wend - wstart); DType diff_val = is_empty ? (DType)0. : top_diff[index] / bin_area; for (int h = hstart; h < hend; ++h){ for (int w = wstart; w < wend; ++w){ int bottom_index = h*width + w; // mxnet_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index); atomicAdd(offset_bottom_diff + bottom_index, diff_val); } } } } template<typename DType> inline void PSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad, const Tensor<gpu, 4, DType> &out_grad, const Tensor<gpu, 2, DType> &bbox, const Tensor<gpu, 4, DType> &mapping_channel, const float spatial_scale, const int output_dim_) { // LOG(INFO) << "PSROIPoolBackward"; const DType *top_diff = out_grad.dptr_; const DType *bottom_rois = bbox.dptr_; DType *bottom_diff = in_grad.dptr_; DType *mapping_channel_ptr = mapping_channel.dptr_; const int count = out_grad.shape_.Size(); const int num_rois = bbox.size(0); const int channels = in_grad.size(1); const int height = in_grad.size(2); const int width = in_grad.size(3); const int pooled_height = out_grad.size(2); const int pooled_width = out_grad.size(3); hipStream_t stream = Stream<gpu>::GetStream(in_grad.stream_); PSROIPoolBackwardAccKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count), kBaseThreadNum, 0, stream >> >( count, top_diff, mapping_channel_ptr, num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, output_dim_, bottom_diff, bottom_rois); PSROIPOOLING_CUDA_CHECK(hipPeekAtLastError()); } } // namespace cuda template<typename DType> inline void PSROIPoolForward(const Tensor<gpu, 4, DType> &out, const Tensor<gpu, 4, DType> &data, const Tensor<gpu, 2, DType> &bbox, const Tensor<gpu, 4, DType> &mapping_channel, const float spatial_scale, const int output_dim_, const int group_size_) { cuda::PSROIPoolForward(out, data, bbox, mapping_channel, spatial_scale, output_dim_, group_size_); } template<typename DType> inline void PSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad, const Tensor<gpu, 4, DType> &out_grad, const Tensor<gpu, 2, DType> &bbox, const Tensor<gpu, 4, DType> &mapping_channel, const float spatial_scale, const int output_dim_) { cuda::PSROIPoolBackwardAcc(in_grad, out_grad, bbox, mapping_channel, spatial_scale, output_dim_); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(PSROIPoolingParam param, int dtype) { Operator* op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new PSROIPoolingOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
15e529e328e082aeeb159a9b92602d0e25877933.cu
/*! * Copyright (c) 2017 by Contributors * Copyright (c) 2017 Microsoft * Licensed under The Apache-2.0 License [see LICENSE for details] * \file psroi_pooling.cu * \brief psroi pooling operator * \author Yi Li, Tairui Chen, Guodong Zhang, Jifeng Dai */ #include "./psroi_pooling-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> #include "../../common/cuda_utils.h" #include "../mxnet_op.h" #define PSROIPOOLING_CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \ } while (0) #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) namespace mshadow { namespace cuda { template <typename DType> __global__ void PSROIPoolForwardKernel( const int count, const DType* bottom_data, const DType spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const DType* bottom_rois, const int output_dim, const int group_size, DType* top_data, DType* mapping_channel) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const DType* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale; DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale; DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale; DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 DType roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 DType roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom DType bin_size_h = roi_height / static_cast<DType>(pooled_height); DType bin_size_w = roi_width / static_cast<DType>(pooled_width); int hstart = floor(static_cast<DType>(ph) * bin_size_h + roi_start_h); int wstart = floor(static_cast<DType>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<DType>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<DType>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0),width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int gw = floor(static_cast<DType>(pw)* group_size / pooled_width); int gh = floor(static_cast<DType>(ph)* group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); int c = (ctop*group_size + gh)*group_size + gw; const DType* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; DType out_sum = 0; for (int h = hstart; h < hend; ++h){ for (int w = wstart; w < wend; ++w){ int bottom_index = h*width + w; out_sum += offset_bottom_data[bottom_index]; } } DType bin_area = (hend - hstart)*(wend - wstart); top_data[index] = is_empty? (DType)0. : out_sum/bin_area; mapping_channel[index] = c; } } template<typename DType> inline void PSROIPoolForward(const Tensor<gpu, 4, DType> &out, const Tensor<gpu, 4, DType> &data, const Tensor<gpu, 2, DType> &bbox, const Tensor<gpu, 4, DType> &mapping_channel, const float spatial_scale, const int output_dim_, const int group_size_) { // LOG(INFO) << "PSROIPoolForward"; const DType *bottom_data = data.dptr_; const DType *bottom_rois = bbox.dptr_; DType *top_data = out.dptr_; DType *mapping_channel_ptr = mapping_channel.dptr_; const int count = out.shape_.Size(); const int channels = data.size(1); const int height = data.size(2); const int width = data.size(3); const int pooled_height = out.size(2); const int pooled_width = out.size(3); cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); PSROIPoolForwardKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count), kBaseThreadNum, 0, stream >> >( count, bottom_data, spatial_scale, channels, height, width, pooled_height, pooled_width, bottom_rois, output_dim_, group_size_, top_data, mapping_channel_ptr); PSROIPOOLING_CUDA_CHECK(cudaPeekAtLastError()); } template <typename DType> __global__ void PSROIPoolBackwardAccKernel( const int count, const DType* top_diff, const DType* mapping_channel, const int num_rois, const DType spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, DType* bottom_diff, const DType* bottom_rois) { CUDA_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const DType* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; DType roi_start_w = static_cast<DType>(round(offset_bottom_rois[1])) * spatial_scale; DType roi_start_h = static_cast<DType>(round(offset_bottom_rois[2])) * spatial_scale; DType roi_end_w = static_cast<DType>(round(offset_bottom_rois[3]) + 1.) * spatial_scale; DType roi_end_h = static_cast<DType>(round(offset_bottom_rois[4]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 DType roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 DType roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom DType bin_size_h = roi_height / static_cast<DType>(pooled_height); DType bin_size_w = roi_width / static_cast<DType>(pooled_width); int hstart = floor(static_cast<DType>(ph)* bin_size_h + roi_start_h); int wstart = floor(static_cast<DType>(pw)* bin_size_w + roi_start_w); int hend = ceil(static_cast<DType>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil(static_cast<DType>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Compute c at bottom int c = mapping_channel[index]; DType* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; DType bin_area = (hend - hstart)*(wend - wstart); DType diff_val = is_empty ? (DType)0. : top_diff[index] / bin_area; for (int h = hstart; h < hend; ++h){ for (int w = wstart; w < wend; ++w){ int bottom_index = h*width + w; // mxnet_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index); atomicAdd(offset_bottom_diff + bottom_index, diff_val); } } } } template<typename DType> inline void PSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad, const Tensor<gpu, 4, DType> &out_grad, const Tensor<gpu, 2, DType> &bbox, const Tensor<gpu, 4, DType> &mapping_channel, const float spatial_scale, const int output_dim_) { // LOG(INFO) << "PSROIPoolBackward"; const DType *top_diff = out_grad.dptr_; const DType *bottom_rois = bbox.dptr_; DType *bottom_diff = in_grad.dptr_; DType *mapping_channel_ptr = mapping_channel.dptr_; const int count = out_grad.shape_.Size(); const int num_rois = bbox.size(0); const int channels = in_grad.size(1); const int height = in_grad.size(2); const int width = in_grad.size(3); const int pooled_height = out_grad.size(2); const int pooled_width = out_grad.size(3); cudaStream_t stream = Stream<gpu>::GetStream(in_grad.stream_); PSROIPoolBackwardAccKernel<DType> << <mxnet::op::mxnet_op::cuda_get_num_blocks(count), kBaseThreadNum, 0, stream >> >( count, top_diff, mapping_channel_ptr, num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, output_dim_, bottom_diff, bottom_rois); PSROIPOOLING_CUDA_CHECK(cudaPeekAtLastError()); } } // namespace cuda template<typename DType> inline void PSROIPoolForward(const Tensor<gpu, 4, DType> &out, const Tensor<gpu, 4, DType> &data, const Tensor<gpu, 2, DType> &bbox, const Tensor<gpu, 4, DType> &mapping_channel, const float spatial_scale, const int output_dim_, const int group_size_) { cuda::PSROIPoolForward(out, data, bbox, mapping_channel, spatial_scale, output_dim_, group_size_); } template<typename DType> inline void PSROIPoolBackwardAcc(const Tensor<gpu, 4, DType> &in_grad, const Tensor<gpu, 4, DType> &out_grad, const Tensor<gpu, 2, DType> &bbox, const Tensor<gpu, 4, DType> &mapping_channel, const float spatial_scale, const int output_dim_) { cuda::PSROIPoolBackwardAcc(in_grad, out_grad, bbox, mapping_channel, spatial_scale, output_dim_); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(PSROIPoolingParam param, int dtype) { Operator* op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new PSROIPoolingOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
e9e95664981e8e5c6d89ed5112aa61d4d2b78f9d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--gridDim=[32,32,1] --blockDim=[16,16,1] #include "common.h" __global__ void d_render_regular(uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, float transferWeight = 0.0f) { __requires(imageW == 32*16 /*gridDim.x*blockDim.x*/); d_render<TF_SINGLE_1D>(d_output,imageW,imageH,density,brightness,transferOffset,transferScale,transferWeight); }
e9e95664981e8e5c6d89ed5112aa61d4d2b78f9d.cu
//pass //--gridDim=[32,32,1] --blockDim=[16,16,1] #include "common.h" __global__ void d_render_regular(uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, float transferWeight = 0.0f) { __requires(imageW == 32*16 /*gridDim.x*blockDim.x*/); d_render<TF_SINGLE_1D>(d_output,imageW,imageH,density,brightness,transferOffset,transferScale,transferWeight); }
6d892e30e030a976223186b051070dcdb2114c54.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef PADDLE_WITH_HIP // HIP not support cusolver #include "paddle/phi/kernels/matrix_rank_tol_kernel.h" #include <algorithm> #include <vector> #include "paddle/fluid/memory/memory.h" #include "paddle/phi/backends/dynload/cusolver.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/abs_kernel.h" #include "paddle/phi/kernels/elementwise_kernel.h" #include "paddle/phi/kernels/full_kernel.h" #include "paddle/phi/kernels/funcs/broadcast_function.h" #include "paddle/phi/kernels/funcs/compare_functors.h" #include "paddle/phi/kernels/impl/matrix_rank_kernel_impl.h" #include "paddle/phi/kernels/reduce_kernel.h" namespace phi { template <typename T> void GesvdjBatched(const phi::GPUContext& dev_ctx, int batchSize, int m, int n, int k, T* A, T* U, T* V, T* S, int* info, int thin_UV = 1); template <typename T> void SyevjBatched(const phi::GPUContext& dev_ctx, int batchSize, int n, T* A, T* W, int* info); template <> void GesvdjBatched<float>(const phi::GPUContext& dev_ctx, int batchSize, int m, int n, int k, float* A, float* U, float* V, float* S, int* info, int thin_UV) { // do not compute singular vectors const hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_NOVECTOR; hipsolverGesvdjInfo_t gesvdj_params = NULL; int lda = m; int ldu = m; int ldt = n; int lwork = 0; auto handle = dev_ctx.cusolver_dn_handle(); PADDLE_ENFORCE_GPU_SUCCESS( dynload::hipsolverDnCreateGesvdjInfo(&gesvdj_params)); PADDLE_ENFORCE_GPU_SUCCESS( dynload::hipsolverDnSgesvdj_bufferSize(handle, jobz, thin_UV, m, n, A, lda, S, U, ldu, V, ldt, &lwork, gesvdj_params)); auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(float)); float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr()); int stride_A = lda * n; int stride_U = ldu * (thin_UV ? k : m); int stride_V = ldt * (thin_UV ? k : n); for (int i = 0; i < batchSize; i++) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnSgesvdj(handle, jobz, thin_UV, m, n, A + stride_A * i, lda, S + k * i, U + stride_U * i, ldu, V + stride_V * i, ldt, workspace_ptr, lwork, info, gesvdj_params)); int error_info; paddle::memory::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS( dynload::hipsolverDnDestroyGesvdjInfo(gesvdj_params)); } template <> void GesvdjBatched<double>(const phi::GPUContext& dev_ctx, int batchSize, int m, int n, int k, double* A, double* U, double* V, double* S, int* info, int thin_UV) { // do not compute singular vectors const hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_NOVECTOR; hipsolverGesvdjInfo_t gesvdj_params = NULL; int lda = m; int ldu = m; int ldt = n; int lwork = 0; auto handle = dev_ctx.cusolver_dn_handle(); PADDLE_ENFORCE_GPU_SUCCESS( dynload::hipsolverDnCreateGesvdjInfo(&gesvdj_params)); PADDLE_ENFORCE_GPU_SUCCESS( dynload::hipsolverDnDgesvdj_bufferSize(handle, jobz, thin_UV, m, n, A, lda, S, U, ldu, V, ldt, &lwork, gesvdj_params)); auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(double)); double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr()); int stride_A = lda * n; int stride_U = ldu * (thin_UV ? k : m); int stride_V = ldt * (thin_UV ? k : n); for (int i = 0; i < batchSize; ++i) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDgesvdj(handle, jobz, thin_UV, m, n, A + stride_A * i, lda, S + k * i, U + stride_U * i, ldu, V + stride_V * i, ldt, workspace_ptr, lwork, info, gesvdj_params)); // check the error info int error_info; paddle::memory::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS( dynload::hipsolverDnDestroyGesvdjInfo(gesvdj_params)); } template <> void SyevjBatched<float>(const phi::GPUContext& dev_ctx, int batchSize, int n, float* A, float* W, int* info) { auto handle = dev_ctx.cusolver_dn_handle(); // Compute eigenvalues only const hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_NOVECTOR; // matrix is saved as column-major in cusolver. // numpy and torch use lower triangle to compute eigenvalues, so here use // upper triangle hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_UPPER; int lda = n; int stride_A = lda * n; int lwork = 0; hipsolverSyevjInfo_t params = NULL; PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnCreateSyevjInfo(&params)); PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnSsyevj_bufferSize( handle, jobz, uplo, n, A, lda, W, &lwork, params)); auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(float)); float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr()); for (int i = 0; i < batchSize; i++) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnSsyevj(handle, jobz, uplo, n, A + stride_A * i, lda, W + n * i, workspace_ptr, lwork, info, params)); int error_info; paddle::memory::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver eigenvalues is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDestroySyevjInfo(params)); } template <> void SyevjBatched<double>(const phi::GPUContext& dev_ctx, int batchSize, int n, double* A, double* W, int* info) { auto handle = dev_ctx.cusolver_dn_handle(); // Compute eigenvalues only const hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_NOVECTOR; // upper triangle of A is stored hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_UPPER; int lda = n; int stride_A = lda * n; int lwork = 0; hipsolverSyevjInfo_t params = NULL; PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnCreateSyevjInfo(&params)); PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDsyevj_bufferSize( handle, jobz, uplo, n, A, lda, W, &lwork, params)); auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(double)); double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr()); for (int i = 0; i < batchSize; i++) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDsyevj(handle, jobz, uplo, n, A + stride_A * i, lda, W + n * i, workspace_ptr, lwork, info, params)); int error_info; paddle::memory::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver eigenvalues is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDestroySyevjInfo(params)); } template <typename T, typename Context> void MatrixRankTolKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& atol_tensor, bool use_default_tol, bool hermitian, DenseTensor* out) { auto* x_data = x.data<T>(); dev_ctx.template Alloc<int64_t>(out); auto dim_x = x.dims(); auto dim_out = out->dims(); int rows = dim_x[dim_x.size() - 2]; int cols = dim_x[dim_x.size() - 1]; int k = ::min(rows, cols); auto numel = x.numel(); int batches = numel / (rows * cols); T rtol_T = 0; if (use_default_tol) { rtol_T = std::numeric_limits<T>::epsilon() * ::max(rows, cols); } // Must Copy X once, because the gesvdj will destory the content when exit. DenseTensor x_tmp; paddle::framework::TensorCopy(x, dev_ctx.GetPlace(), &x_tmp); auto info = paddle::memory::Alloc(dev_ctx, sizeof(int) * batches); int* info_ptr = reinterpret_cast<int*>(info->ptr()); DenseTensor eigenvalue_tensor; eigenvalue_tensor.Resize(detail::GetEigenvalueDim(dim_x, k)); auto* eigenvalue_data = dev_ctx.template Alloc<T>(&eigenvalue_tensor); if (hermitian) { SyevjBatched<T>( dev_ctx, batches, rows, x_tmp.data<T>(), eigenvalue_data, info_ptr); phi::AbsKernel<T, Context>(dev_ctx, eigenvalue_tensor, &eigenvalue_tensor); } else { DenseTensor U, VH; U.Resize(detail::GetUDDim(dim_x, k)); VH.Resize(detail::GetVHDDim(dim_x, k)); auto* u_data = dev_ctx.template Alloc<T>(&U); auto* vh_data = dev_ctx.template Alloc<T>(&VH); GesvdjBatched<T>(dev_ctx, batches, cols, rows, k, x_tmp.data<T>(), vh_data, u_data, eigenvalue_data, info_ptr, 1); } DenseTensor max_eigenvalue_tensor; dev_ctx.template Alloc<T>(&max_eigenvalue_tensor); max_eigenvalue_tensor.Resize(detail::RemoveLastDim(eigenvalue_tensor.dims())); phi::MaxKernel<T, Context>(dev_ctx, eigenvalue_tensor, std::vector<int64_t>{-1}, false, &max_eigenvalue_tensor); DenseTensor temp_rtol_tensor; temp_rtol_tensor = phi::Full<T, Context>(dev_ctx, {1}, static_cast<T>(rtol_T)); DenseTensor rtol_tensor = phi::Multiply<T>(dev_ctx, temp_rtol_tensor, max_eigenvalue_tensor); DenseTensor tol_tensor; tol_tensor.Resize(dim_out); dev_ctx.template Alloc<T>(&tol_tensor); funcs::ElementwiseCompute<GreaterElementFunctor<T>, T, T>( dev_ctx, atol_tensor, rtol_tensor, -1, GreaterElementFunctor<T>(), &tol_tensor); tol_tensor.Resize(detail::NewAxisDim(tol_tensor.dims(), 1)); DenseTensor compare_result; compare_result.Resize(detail::NewAxisDim(dim_out, k)); dev_ctx.template Alloc<int64_t>(&compare_result); int axis = -1; funcs::ElementwiseCompute<funcs::GreaterThanFunctor<T, int64_t>, T, int64_t>( dev_ctx, eigenvalue_tensor, tol_tensor, axis, funcs::GreaterThanFunctor<T, int64_t>(), &compare_result); phi::SumKernel<int64_t>(dev_ctx, compare_result, std::vector<int64_t>{-1}, compare_result.dtype(), false, out); } } // namespace phi PD_REGISTER_KERNEL(matrix_rank_tol, // cuda_only GPU, ALL_LAYOUT, phi::MatrixRankTolKernel, float, double) {} #endif // not PADDLE_WITH_HIP
6d892e30e030a976223186b051070dcdb2114c54.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef PADDLE_WITH_HIP // HIP not support cusolver #include "paddle/phi/kernels/matrix_rank_tol_kernel.h" #include <algorithm> #include <vector> #include "paddle/fluid/memory/memory.h" #include "paddle/phi/backends/dynload/cusolver.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/abs_kernel.h" #include "paddle/phi/kernels/elementwise_kernel.h" #include "paddle/phi/kernels/full_kernel.h" #include "paddle/phi/kernels/funcs/broadcast_function.h" #include "paddle/phi/kernels/funcs/compare_functors.h" #include "paddle/phi/kernels/impl/matrix_rank_kernel_impl.h" #include "paddle/phi/kernels/reduce_kernel.h" namespace phi { template <typename T> void GesvdjBatched(const phi::GPUContext& dev_ctx, int batchSize, int m, int n, int k, T* A, T* U, T* V, T* S, int* info, int thin_UV = 1); template <typename T> void SyevjBatched(const phi::GPUContext& dev_ctx, int batchSize, int n, T* A, T* W, int* info); template <> void GesvdjBatched<float>(const phi::GPUContext& dev_ctx, int batchSize, int m, int n, int k, float* A, float* U, float* V, float* S, int* info, int thin_UV) { // do not compute singular vectors const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_NOVECTOR; gesvdjInfo_t gesvdj_params = NULL; int lda = m; int ldu = m; int ldt = n; int lwork = 0; auto handle = dev_ctx.cusolver_dn_handle(); PADDLE_ENFORCE_GPU_SUCCESS( dynload::cusolverDnCreateGesvdjInfo(&gesvdj_params)); PADDLE_ENFORCE_GPU_SUCCESS( dynload::cusolverDnSgesvdj_bufferSize(handle, jobz, thin_UV, m, n, A, lda, S, U, ldu, V, ldt, &lwork, gesvdj_params)); auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(float)); float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr()); int stride_A = lda * n; int stride_U = ldu * (thin_UV ? k : m); int stride_V = ldt * (thin_UV ? k : n); for (int i = 0; i < batchSize; i++) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnSgesvdj(handle, jobz, thin_UV, m, n, A + stride_A * i, lda, S + k * i, U + stride_U * i, ldu, V + stride_V * i, ldt, workspace_ptr, lwork, info, gesvdj_params)); int error_info; paddle::memory::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS( dynload::cusolverDnDestroyGesvdjInfo(gesvdj_params)); } template <> void GesvdjBatched<double>(const phi::GPUContext& dev_ctx, int batchSize, int m, int n, int k, double* A, double* U, double* V, double* S, int* info, int thin_UV) { // do not compute singular vectors const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_NOVECTOR; gesvdjInfo_t gesvdj_params = NULL; int lda = m; int ldu = m; int ldt = n; int lwork = 0; auto handle = dev_ctx.cusolver_dn_handle(); PADDLE_ENFORCE_GPU_SUCCESS( dynload::cusolverDnCreateGesvdjInfo(&gesvdj_params)); PADDLE_ENFORCE_GPU_SUCCESS( dynload::cusolverDnDgesvdj_bufferSize(handle, jobz, thin_UV, m, n, A, lda, S, U, ldu, V, ldt, &lwork, gesvdj_params)); auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(double)); double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr()); int stride_A = lda * n; int stride_U = ldu * (thin_UV ? k : m); int stride_V = ldt * (thin_UV ? k : n); for (int i = 0; i < batchSize; ++i) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDgesvdj(handle, jobz, thin_UV, m, n, A + stride_A * i, lda, S + k * i, U + stride_U * i, ldu, V + stride_V * i, ldt, workspace_ptr, lwork, info, gesvdj_params)); // check the error info int error_info; paddle::memory::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver SVD is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS( dynload::cusolverDnDestroyGesvdjInfo(gesvdj_params)); } template <> void SyevjBatched<float>(const phi::GPUContext& dev_ctx, int batchSize, int n, float* A, float* W, int* info) { auto handle = dev_ctx.cusolver_dn_handle(); // Compute eigenvalues only const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_NOVECTOR; // matrix is saved as column-major in cusolver. // numpy and torch use lower triangle to compute eigenvalues, so here use // upper triangle cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER; int lda = n; int stride_A = lda * n; int lwork = 0; syevjInfo_t params = NULL; PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnCreateSyevjInfo(&params)); PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnSsyevj_bufferSize( handle, jobz, uplo, n, A, lda, W, &lwork, params)); auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(float)); float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr()); for (int i = 0; i < batchSize; i++) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnSsyevj(handle, jobz, uplo, n, A + stride_A * i, lda, W + n * i, workspace_ptr, lwork, info, params)); int error_info; paddle::memory::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver eigenvalues is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDestroySyevjInfo(params)); } template <> void SyevjBatched<double>(const phi::GPUContext& dev_ctx, int batchSize, int n, double* A, double* W, int* info) { auto handle = dev_ctx.cusolver_dn_handle(); // Compute eigenvalues only const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_NOVECTOR; // upper triangle of A is stored cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER; int lda = n; int stride_A = lda * n; int lwork = 0; syevjInfo_t params = NULL; PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnCreateSyevjInfo(&params)); PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDsyevj_bufferSize( handle, jobz, uplo, n, A, lda, W, &lwork, params)); auto workspace = paddle::memory::Alloc(dev_ctx, lwork * sizeof(double)); double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr()); for (int i = 0; i < batchSize; i++) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDsyevj(handle, jobz, uplo, n, A + stride_A * i, lda, W + n * i, workspace_ptr, lwork, info, params)); int error_info; paddle::memory::Copy(phi::CPUPlace(), &error_info, dev_ctx.GetPlace(), info, sizeof(int), dev_ctx.stream()); PADDLE_ENFORCE_EQ( error_info, 0, phi::errors::PreconditionNotMet( "For batch [%d]: CUSolver eigenvalues is not zero. [%d]", i, error_info)); } PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDestroySyevjInfo(params)); } template <typename T, typename Context> void MatrixRankTolKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& atol_tensor, bool use_default_tol, bool hermitian, DenseTensor* out) { auto* x_data = x.data<T>(); dev_ctx.template Alloc<int64_t>(out); auto dim_x = x.dims(); auto dim_out = out->dims(); int rows = dim_x[dim_x.size() - 2]; int cols = dim_x[dim_x.size() - 1]; int k = std::min(rows, cols); auto numel = x.numel(); int batches = numel / (rows * cols); T rtol_T = 0; if (use_default_tol) { rtol_T = std::numeric_limits<T>::epsilon() * std::max(rows, cols); } // Must Copy X once, because the gesvdj will destory the content when exit. DenseTensor x_tmp; paddle::framework::TensorCopy(x, dev_ctx.GetPlace(), &x_tmp); auto info = paddle::memory::Alloc(dev_ctx, sizeof(int) * batches); int* info_ptr = reinterpret_cast<int*>(info->ptr()); DenseTensor eigenvalue_tensor; eigenvalue_tensor.Resize(detail::GetEigenvalueDim(dim_x, k)); auto* eigenvalue_data = dev_ctx.template Alloc<T>(&eigenvalue_tensor); if (hermitian) { SyevjBatched<T>( dev_ctx, batches, rows, x_tmp.data<T>(), eigenvalue_data, info_ptr); phi::AbsKernel<T, Context>(dev_ctx, eigenvalue_tensor, &eigenvalue_tensor); } else { DenseTensor U, VH; U.Resize(detail::GetUDDim(dim_x, k)); VH.Resize(detail::GetVHDDim(dim_x, k)); auto* u_data = dev_ctx.template Alloc<T>(&U); auto* vh_data = dev_ctx.template Alloc<T>(&VH); GesvdjBatched<T>(dev_ctx, batches, cols, rows, k, x_tmp.data<T>(), vh_data, u_data, eigenvalue_data, info_ptr, 1); } DenseTensor max_eigenvalue_tensor; dev_ctx.template Alloc<T>(&max_eigenvalue_tensor); max_eigenvalue_tensor.Resize(detail::RemoveLastDim(eigenvalue_tensor.dims())); phi::MaxKernel<T, Context>(dev_ctx, eigenvalue_tensor, std::vector<int64_t>{-1}, false, &max_eigenvalue_tensor); DenseTensor temp_rtol_tensor; temp_rtol_tensor = phi::Full<T, Context>(dev_ctx, {1}, static_cast<T>(rtol_T)); DenseTensor rtol_tensor = phi::Multiply<T>(dev_ctx, temp_rtol_tensor, max_eigenvalue_tensor); DenseTensor tol_tensor; tol_tensor.Resize(dim_out); dev_ctx.template Alloc<T>(&tol_tensor); funcs::ElementwiseCompute<GreaterElementFunctor<T>, T, T>( dev_ctx, atol_tensor, rtol_tensor, -1, GreaterElementFunctor<T>(), &tol_tensor); tol_tensor.Resize(detail::NewAxisDim(tol_tensor.dims(), 1)); DenseTensor compare_result; compare_result.Resize(detail::NewAxisDim(dim_out, k)); dev_ctx.template Alloc<int64_t>(&compare_result); int axis = -1; funcs::ElementwiseCompute<funcs::GreaterThanFunctor<T, int64_t>, T, int64_t>( dev_ctx, eigenvalue_tensor, tol_tensor, axis, funcs::GreaterThanFunctor<T, int64_t>(), &compare_result); phi::SumKernel<int64_t>(dev_ctx, compare_result, std::vector<int64_t>{-1}, compare_result.dtype(), false, out); } } // namespace phi PD_REGISTER_KERNEL(matrix_rank_tol, // cuda_only GPU, ALL_LAYOUT, phi::MatrixRankTolKernel, float, double) {} #endif // not PADDLE_WITH_HIP
701c9e49e2a50478ff53b75a4297e10d6cab12bf.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************* * File: fc_layer.cu * * *************************************************************/ #include <iostream> #include <cmath> #include "hip/hip_runtime.h" #include "rocblas.h" #include "fc_layer.h" #include "../components/device.h" #include "../components/helper.h" /************************************************************* * PUBLIC FUNCTIONS *************************************************************/ FC_Layer::FC_Layer(int n_inputs, int n_outputs) { total_inputs = n_inputs; total_outputs = n_outputs; Helper::cuda_array_random_allocate( &w , Layer::HALF_FLOAT_TYPE, n_inputs * n_outputs ); Helper::cuda_array_random_allocate( &b , Layer::HALF_FLOAT_TYPE, n_outputs ); Helper::cuda_array_zero_allocate( &z , Layer::HALF_FLOAT_TYPE, n_outputs ); Helper::cuda_array_zero_allocate( &w_grad , Layer::HALF_FLOAT_TYPE, n_inputs * n_outputs ); Helper::cuda_array_zero_allocate( &b_grad , Layer::HALF_FLOAT_TYPE, n_outputs ); Helper::cuda_array_zero_allocate( &output , Layer::HALF_FLOAT_TYPE, n_outputs ); Helper::cuda_array_zero_allocate( &err , Layer::HALF_FLOAT_TYPE, n_inputs ); Helper::cuda_array_zero_allocate( &act_dvt, Layer::HALF_FLOAT_TYPE, n_outputs ); Helper::cuda_array_zero_allocate( &err_dvt, Layer::HALF_FLOAT_TYPE, n_outputs ); } FC_Layer::~FC_Layer() { hipFree(w); hipFree(b); hipFree(z); hipFree(w_grad); hipFree(b_grad); hipFree(output); hipFree(err); hipFree(act_dvt); hipFree(err_dvt); } layer_param_t FC_Layer::forward_propagation(layer_param_t in) { // Save the input input = in; // Calculate the net // z = x.w + b Helper::net_calc(input, w, b, z, total_inputs, total_outputs); // Apply Sigmoid activate function // output = sigmoid(z) Helper::sigmoid_calc(z, output, total_outputs); // Return this layer's output for further calculation in next layer return output; } layer_param_t FC_Layer::backward_propagation(layer_param_t error) { // Calculate derivative of neuron output // dO/dnet = sigmoid'(z) Helper::sigmoid_dev_calc(output, act_dvt, total_outputs); // Calculate error derivative // dE/dnet = dE/dO x dO/dnet // dE/dO is error signal from next layer Helper::err_dev_calc(error, act_dvt, err_dvt, total_outputs); // Accumulate gradients // dw = dw + input.dE/dnet // db = db + dE/dnet Helper::accum_w_grad(input, err_dvt, w_grad, total_inputs, total_outputs); Helper::accum_b_grad(err_dvt, b_grad, total_outputs); // Calculate error signal propagated to previous layer // error_signal = dE/dnet * w Helper::err_signal_calc(w, err_dvt, err, total_inputs, total_outputs); // Back propagate this layer's error signal return err; } void FC_Layer::update(float eta, int batch_size) { // Update weights and biases and clear gradients // w = w - dw * (eta/batch_size) // b = b - db * (eta/batch_size) float alpha = -eta / batch_size; Helper::update_param(w, w_grad, alpha, total_inputs * total_outputs); Helper::update_param(b, b_grad, alpha, total_outputs); } /************************************************************* * PRIVATE FUNCTIONS *************************************************************/
701c9e49e2a50478ff53b75a4297e10d6cab12bf.cu
/************************************************************* * File: fc_layer.cu * * *************************************************************/ #include <iostream> #include <cmath> #include "cuda_runtime.h" #include "cublas_v2.h" #include "fc_layer.h" #include "../components/device.h" #include "../components/helper.h" /************************************************************* * PUBLIC FUNCTIONS *************************************************************/ FC_Layer::FC_Layer(int n_inputs, int n_outputs) { total_inputs = n_inputs; total_outputs = n_outputs; Helper::cuda_array_random_allocate( &w , Layer::HALF_FLOAT_TYPE, n_inputs * n_outputs ); Helper::cuda_array_random_allocate( &b , Layer::HALF_FLOAT_TYPE, n_outputs ); Helper::cuda_array_zero_allocate( &z , Layer::HALF_FLOAT_TYPE, n_outputs ); Helper::cuda_array_zero_allocate( &w_grad , Layer::HALF_FLOAT_TYPE, n_inputs * n_outputs ); Helper::cuda_array_zero_allocate( &b_grad , Layer::HALF_FLOAT_TYPE, n_outputs ); Helper::cuda_array_zero_allocate( &output , Layer::HALF_FLOAT_TYPE, n_outputs ); Helper::cuda_array_zero_allocate( &err , Layer::HALF_FLOAT_TYPE, n_inputs ); Helper::cuda_array_zero_allocate( &act_dvt, Layer::HALF_FLOAT_TYPE, n_outputs ); Helper::cuda_array_zero_allocate( &err_dvt, Layer::HALF_FLOAT_TYPE, n_outputs ); } FC_Layer::~FC_Layer() { cudaFree(w); cudaFree(b); cudaFree(z); cudaFree(w_grad); cudaFree(b_grad); cudaFree(output); cudaFree(err); cudaFree(act_dvt); cudaFree(err_dvt); } layer_param_t FC_Layer::forward_propagation(layer_param_t in) { // Save the input input = in; // Calculate the net // z = x.w + b Helper::net_calc(input, w, b, z, total_inputs, total_outputs); // Apply Sigmoid activate function // output = sigmoid(z) Helper::sigmoid_calc(z, output, total_outputs); // Return this layer's output for further calculation in next layer return output; } layer_param_t FC_Layer::backward_propagation(layer_param_t error) { // Calculate derivative of neuron output // dO/dnet = sigmoid'(z) Helper::sigmoid_dev_calc(output, act_dvt, total_outputs); // Calculate error derivative // dE/dnet = dE/dO x dO/dnet // dE/dO is error signal from next layer Helper::err_dev_calc(error, act_dvt, err_dvt, total_outputs); // Accumulate gradients // dw = dw + input.dE/dnet // db = db + dE/dnet Helper::accum_w_grad(input, err_dvt, w_grad, total_inputs, total_outputs); Helper::accum_b_grad(err_dvt, b_grad, total_outputs); // Calculate error signal propagated to previous layer // error_signal = dE/dnet * w Helper::err_signal_calc(w, err_dvt, err, total_inputs, total_outputs); // Back propagate this layer's error signal return err; } void FC_Layer::update(float eta, int batch_size) { // Update weights and biases and clear gradients // w = w - dw * (eta/batch_size) // b = b - db * (eta/batch_size) float alpha = -eta / batch_size; Helper::update_param(w, w_grad, alpha, total_inputs * total_outputs); Helper::update_param(b, b_grad, alpha, total_outputs); } /************************************************************* * PRIVATE FUNCTIONS *************************************************************/
d985597546ce535a4f47e59922cb77967863d29c.hip
// !!! This is a file automatically generated by hipify!!! /* * * compiling: * nvcc -lglut -LGLEW life.cuda.cu -o life -g -G * * -g -G - debug options * * for it's work: * export LD_LIBRARY_PATH=:/usr/local/cuda/lib * export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/libnvvp/ * * cuda-gdb */ #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <memory.h> #define FIELD_WIDTH 10 #define FIELD_HEIGHT 10 #define NUMBER_OF_THREADS 100 float * state_first; // on PC float * state_second; // arrays float * dev_first_state; // on Card float * dev_second_state; // arrays int * dev_width; int * dev_height; int width = FIELD_WIDTH; int height = FIELD_HEIGHT; __global__ void kernel(float * first, float * second , int * width, int * height) { int id = threadIdx.x + blockIdx.x * blockDim.x; if(id < (*width)*(*height)) { int numberofneighbours = 0; int num = 0; // change to num += ... /* if(*(first + id + 1) == 1) num++; if(*(first + id - 1) == 1) num++; if(*(first + id + *height) == 1) num++; if(*(first + id - *height) == 1) num++; if(*(first + id + *height + 1) == 1) num++; if(*(first + id + *height - 1) == 1) num++; if(*(first + id - *height + 1) == 1) num++; if(*(first + id - *height - 1) == 1) num++; */ num += *(first + id + 1); num += *(first + id - 1); num += *(first + id + *height); num += *(first + id - *height); num += *(first + id + *height + 1); num += *(first + id + *height - 1); num += *(first + id - *height + 1); num += *(first + id - *height - 1); switch(num) { case 3 : *(second + id) = 1; break; case 2 : if(*(first + id) == 1) *(second + id) = 1; break; default : *(second + id) = 0; break; } } } void GetDataFromCudaDevice(int width, int height) { hipMemcpy(state_first,dev_second_state,sizeof(float)*width*height,hipMemcpyDeviceToHost); } void CopyDataToCudaDevice(int width, int height) { hipMemcpy(dev_first_state,state_first,sizeof(float)*width*height,hipMemcpyHostToDevice); hipMemset(dev_second_state,0,sizeof(float)*width*height); hipMemcpy(dev_width,&width,sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_height,&height,sizeof(int),hipMemcpyHostToDevice); } void InitCudaArrays(int width, int height) { hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); return; } hipMalloc((void**)&dev_width,sizeof(int)); hipMalloc((void**)&dev_height,sizeof(int)); hipMalloc((void**)&dev_first_state,sizeof(float)*width*height); hipMalloc((void**)&dev_second_state,sizeof(float)*width*height); } void RunCudaDevice() { hipError_t cudaStatus; int threads = NUMBER_OF_THREADS; int blocks = (width*height)/(NUMBER_OF_THREADS + 1); // kernel <<<blocks,threads>>> (dev_first_state,dev_second_state,dev_width,dev_height); hipLaunchKernelGGL(( kernel) , dim3(10),dim3(10), 0, 0, dev_first_state,dev_second_state,dev_width,dev_height); hipDeviceSynchronize(); // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); return; } GetDataFromCudaDevice(FIELD_WIDTH,FIELD_HEIGHT); } void FreeCudaDevice(int width, int height) { hipFree(dev_first_state); hipFree(dev_second_state); hipFree(dev_width); hipFree(dev_height); } void FillField() { /* * * 01010 * 00110 * 00100 * 00000 * */ // two planers and a dot state_first[9*width+9] = 1; state_first[2*width+1] = 1; state_first[2*width+2] = 1; state_first[3*width+2] = 1; state_first[3*width+3] = 1; state_first[1*width+3] = 1; state_first[7*width+1] = 1; state_first[7*width+2] = 1; state_first[8*width+2] = 1; state_first[8*width+3] = 1; state_first[6*width+3] = 1; } // allocate memory and initialize array with '0' void InitArrays(int width, int height) { state_first = (float *) malloc(sizeof(float)*width*height); state_second = (float *) malloc(sizeof(float)*width*height); memset(state_first,0,sizeof(float)*width*height); memset(state_second,0,sizeof(float)*width*height); } void ShowArray(int width, int height) { puts("-----------------"); for(int i=0;i<width;i++) { for(int j=0;j<height;j++) { if(state_first[i*width+j] != 0)printf("*"); else printf(" "); // printf("%1.0f",state_first[i*width+j]); } printf("\n"); } puts("-----------------"); } int main() { InitArrays(FIELD_WIDTH,FIELD_HEIGHT); FillField(); ShowArray(FIELD_WIDTH,FIELD_HEIGHT); InitCudaArrays(FIELD_WIDTH,FIELD_HEIGHT); CopyDataToCudaDevice(FIELD_WIDTH,FIELD_HEIGHT); // printf("ok\n"); RunCudaDevice(); // GetDataFromCudaDevice(FIELD_WIDTH,FIELD_HEIGHT); FreeCudaDevice(FIELD_WIDTH,FIELD_HEIGHT); ShowArray(FIELD_WIDTH,FIELD_HEIGHT); char ch; scanf("%c",&ch); }
d985597546ce535a4f47e59922cb77967863d29c.cu
/* * * compiling: * nvcc -lglut -LGLEW life.cuda.cu -o life -g -G * * -g -G - debug options * * for it's work: * export LD_LIBRARY_PATH=:/usr/local/cuda/lib * export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/libnvvp/ * * cuda-gdb */ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <memory.h> #define FIELD_WIDTH 10 #define FIELD_HEIGHT 10 #define NUMBER_OF_THREADS 100 float * state_first; // on PC float * state_second; // arrays float * dev_first_state; // on Card float * dev_second_state; // arrays int * dev_width; int * dev_height; int width = FIELD_WIDTH; int height = FIELD_HEIGHT; __global__ void kernel(float * first, float * second , int * width, int * height) { int id = threadIdx.x + blockIdx.x * blockDim.x; if(id < (*width)*(*height)) { int numberofneighbours = 0; int num = 0; // change to num += ... /* if(*(first + id + 1) == 1) num++; if(*(first + id - 1) == 1) num++; if(*(first + id + *height) == 1) num++; if(*(first + id - *height) == 1) num++; if(*(first + id + *height + 1) == 1) num++; if(*(first + id + *height - 1) == 1) num++; if(*(first + id - *height + 1) == 1) num++; if(*(first + id - *height - 1) == 1) num++; */ num += *(first + id + 1); num += *(first + id - 1); num += *(first + id + *height); num += *(first + id - *height); num += *(first + id + *height + 1); num += *(first + id + *height - 1); num += *(first + id - *height + 1); num += *(first + id - *height - 1); switch(num) { case 3 : *(second + id) = 1; break; case 2 : if(*(first + id) == 1) *(second + id) = 1; break; default : *(second + id) = 0; break; } } } void GetDataFromCudaDevice(int width, int height) { cudaMemcpy(state_first,dev_second_state,sizeof(float)*width*height,cudaMemcpyDeviceToHost); } void CopyDataToCudaDevice(int width, int height) { cudaMemcpy(dev_first_state,state_first,sizeof(float)*width*height,cudaMemcpyHostToDevice); cudaMemset(dev_second_state,0,sizeof(float)*width*height); cudaMemcpy(dev_width,&width,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_height,&height,sizeof(int),cudaMemcpyHostToDevice); } void InitCudaArrays(int width, int height) { cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); return; } cudaMalloc((void**)&dev_width,sizeof(int)); cudaMalloc((void**)&dev_height,sizeof(int)); cudaMalloc((void**)&dev_first_state,sizeof(float)*width*height); cudaMalloc((void**)&dev_second_state,sizeof(float)*width*height); } void RunCudaDevice() { cudaError_t cudaStatus; int threads = NUMBER_OF_THREADS; int blocks = (width*height)/(NUMBER_OF_THREADS + 1); // kernel <<<blocks,threads>>> (dev_first_state,dev_second_state,dev_width,dev_height); kernel <<<10,10>>> (dev_first_state,dev_second_state,dev_width,dev_height); cudaDeviceSynchronize(); // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); return; } GetDataFromCudaDevice(FIELD_WIDTH,FIELD_HEIGHT); } void FreeCudaDevice(int width, int height) { cudaFree(dev_first_state); cudaFree(dev_second_state); cudaFree(dev_width); cudaFree(dev_height); } void FillField() { /* * * 01010 * 00110 * 00100 * 00000 * */ // two planers and a dot state_first[9*width+9] = 1; state_first[2*width+1] = 1; state_first[2*width+2] = 1; state_first[3*width+2] = 1; state_first[3*width+3] = 1; state_first[1*width+3] = 1; state_first[7*width+1] = 1; state_first[7*width+2] = 1; state_first[8*width+2] = 1; state_first[8*width+3] = 1; state_first[6*width+3] = 1; } // allocate memory and initialize array with '0' void InitArrays(int width, int height) { state_first = (float *) malloc(sizeof(float)*width*height); state_second = (float *) malloc(sizeof(float)*width*height); memset(state_first,0,sizeof(float)*width*height); memset(state_second,0,sizeof(float)*width*height); } void ShowArray(int width, int height) { puts("-----------------"); for(int i=0;i<width;i++) { for(int j=0;j<height;j++) { if(state_first[i*width+j] != 0)printf("*"); else printf(" "); // printf("%1.0f",state_first[i*width+j]); } printf("\n"); } puts("-----------------"); } int main() { InitArrays(FIELD_WIDTH,FIELD_HEIGHT); FillField(); ShowArray(FIELD_WIDTH,FIELD_HEIGHT); InitCudaArrays(FIELD_WIDTH,FIELD_HEIGHT); CopyDataToCudaDevice(FIELD_WIDTH,FIELD_HEIGHT); // printf("ok\n"); RunCudaDevice(); // GetDataFromCudaDevice(FIELD_WIDTH,FIELD_HEIGHT); FreeCudaDevice(FIELD_WIDTH,FIELD_HEIGHT); ShowArray(FIELD_WIDTH,FIELD_HEIGHT); char ch; scanf("%c",&ch); }
ebf4ec0ac3307f900d19ebd9db6da871b6375785.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <reduce3.h> __device__ double update(double old,double opOutput,double *extraParams) { return old + opOutput; } /** An op on the device @param d1 the first operator @param d2 the second operator */ __device__ double op(double d1,double d2,double *extraParams) { return d1 * d2; } //post process result (for things like means etc) __device__ double postProcess(double reduction,int n,int xOffset,double *dx,int incx,double *extraParams,double *result) { return reduction / extraParams[0] / extraParams[1]; } extern "C" __global__ void cosinesimilarity_strided_double(int n, int xOffset,int yOffset,double *dx,double *dy,int incx,int incy,double *extraParams,double *result) { transform_pair(n,xOffset,yOffset,dx,dy,incx,incy,extraParams,result); }
ebf4ec0ac3307f900d19ebd9db6da871b6375785.cu
#include <reduce3.h> __device__ double update(double old,double opOutput,double *extraParams) { return old + opOutput; } /** An op on the device @param d1 the first operator @param d2 the second operator */ __device__ double op(double d1,double d2,double *extraParams) { return d1 * d2; } //post process result (for things like means etc) __device__ double postProcess(double reduction,int n,int xOffset,double *dx,int incx,double *extraParams,double *result) { return reduction / extraParams[0] / extraParams[1]; } extern "C" __global__ void cosinesimilarity_strided_double(int n, int xOffset,int yOffset,double *dx,double *dy,int incx,int incy,double *extraParams,double *result) { transform_pair(n,xOffset,yOffset,dx,dy,incx,incy,extraParams,result); }
37a0ad58d1983763f228f975ff96b1208015f6e2.hip
// !!! This is a file automatically generated by hipify!!! // SM 3.0 or greater GPUS only! // compile with: nvcc bab_stream.cu -o stream -arch=sm_30 -std=c++11 --expt-relaxed-constexpr #include <hip/hip_runtime.h> #include <cstdio> #include <cassert> #include <algorithm> #include <hiprand/hiprand_kernel.h> #include <vector> #include <time.h> #include <unistd.h> // sleep #include <thread> #include "../bab_gui.cpp" #define BITS_PER_INT 32 #define NaN std::numeric_limits<double>::quiet_NaN() unsigned long NUM_DIMS = 0; // Global variable holding the number of dimensions per interval //#define DEBUGG 1 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); exit(code); } } inline uint interval_purger(float in) { static int remove_counter = 0; if(remove_counter > 0) { remove_counter--; return 1; //remove me } else if(std::isnan(in)){ remove_counter = 2 * NUM_DIMS - 1; return 1; //remove me } else return 0; // keep me } void split(std::vector<float> &v, unsigned long interval_offset, unsigned long NUM_DIMS) { float interval[NUM_DIMS*2]; for(unsigned long i = 0; i < NUM_DIMS * 2; ++i) { interval[i] = v[interval_offset + i]; } // Get the widest dim float max_width = (v[1] - v[0]); uint index_of_max_width = 0; for(int j = 1; j < NUM_DIMS; ++j) { float this_width = (v[1 + j * 2] - v[0 + j * 2]); if(this_width > max_width) { max_width = this_width; index_of_max_width = j; } } //printf("Max width: %f\n", max_width); //assert(max_width > 0); // Split into 2 new intervals for(int i = 0; i < NUM_DIMS; i++) { if(i == index_of_max_width) { v.push_back(interval[2 * i]); v.push_back(interval[2 * i + 1] - max_width / 2.0); } else { v.push_back(interval[2 * i]); v.push_back(interval[2 * i + 1]); } } for(int i = 0; i < NUM_DIMS; i++) { if(i == index_of_max_width) { v.push_back(interval[2 * i] + max_width / 2.0); v.push_back(interval[2 * i + 1] ); } else { v.push_back(interval[2 * i]); v.push_back(interval[2 * i + 1]); } } } // void update_candidates(std::vector<float> &v, unsigned long begin_offset, unsigned long size_to_span) // { // long unsigned original_size = v.size(); // printf("begin offset: %lu, size_to_span: %lu\n", begin_offset, size_to_span); // std::vector<float>::iterator end_vaild = // std::remove_if(v.begin(), v.end(), interval_purger); // printf("elems marked for removal!\n"); // v.erase(end_vaild, v.end()); // long unsigned elems_removed = original_size - v.size(); // printf("%lu elems purged!\n", elems_removed); // unsigned long pre_expand_size = v.size(); // OPTIMIZE TO UPDATE ONE CHUNK // printf("Cabdudates is now of size: %lu\n", pre_expand_size); // unsigned long stride = NUM_DIMS * 2; // for(unsigned long i = 0; i < pre_expand_size; i *= stride) { // split(v, i, NUM_DIMS); // v[i] = NaN; // } // end_vaild = std::remove_if(v.begin(), v.end(), interval_purger); // v.erase(end_vaild, v.end()); // } void update_candidates(std::vector<float> &v, std::vector<float> &s, unsigned long NUM_DIMS, unsigned long EPSILON) { // Grab solutions by checking intervals that weren't deleted. // GPU PARALLELIZABLE long unsigned original_size = v.size(); for(long unsigned i = 0; i < original_size; i += NUM_DIMS * 2) { if(std::isnan(v[i])) { continue; } float volume = v[i + 1] - v[i]; for(int j = 1; j < NUM_DIMS; ++j) { float low = v[i + 2 * j]; float high = v[i + 2 * j + 1]; float width = high - low; volume *= width; } if(volume <= EPSILON ) { for(unsigned long j = 0; j < 2 * NUM_DIMS; ++j) { s.push_back(v[i + j]); } v[i] = NaN; } } // Clean candidates std::vector<float>::iterator end_vaild = std::remove_if(v.begin(), v.end(), interval_purger); //printf("elems marked for removal!\n"); v.erase(end_vaild, v.end()); original_size = v.size(); end_vaild = std::remove_if(v.begin(), v.end(), interval_purger); //printf("elems marked for removal!\n"); v.erase(end_vaild, v.end()); long unsigned elems_removed = original_size - v.size(); //printf("%lu elems purged!\n", elems_removed); unsigned long pre_expand_size = v.size(); // OPTIMIZE TO UPDATE ONE CHUNK //printf("Candidates is now of size: %lu\n", pre_expand_size); unsigned long stride = NUM_DIMS * 2; //printf("Expanding...\n"); for(unsigned long i = 0; i < pre_expand_size; i += stride) { split(v, i, NUM_DIMS); } //printf("Puring parents intervals\n"); for(unsigned long i = 0; i < pre_expand_size; i += stride) { v[i] = NaN; } end_vaild = std::remove_if(v.begin(), v.end(), interval_purger); v.erase(end_vaild, v.end()); } __device__ inline float squared(float v) { return v * v; } __device__ inline uint determine_valid_interval_line(float * start, unsigned long NUM_DIMS) { float xmin = start[0]; float xmax = start[1]; float ymin = start[2]; float ymax = start[3]; //if(xmin < ymax && xmax > ymin) int within_line = (ymin <= xmax && ymax >= xmin); // for(unsigned long i = 0; i < NUM_DIMS * 2; ++i) { // garbage *= start[i]; // } // return 1; return within_line; } __device__ inline uint determine_valid_interval_sphere(float * start, unsigned long NUM_DIMS) { float R = 1.0; float C1X = start[0]; float C1Y = start[2]; float C1Z = start[4]; float C2X = start[1]; float C2Y = start[3]; float C2Z = start[5]; float SX = 0.0; float SY = 0.0; float SZ = 0.0; float xmin = C1X; float xmax = C2X; float ymin = C1Y; float ymax = C2Y; float dist_squared = R * R; /* assume C1 and C2 are element-wise sorted, if not, do that now */ if (SX < C1X) dist_squared -= squared(SX - C1X); else if (SX > C2X) dist_squared -= squared(SX - C2X); if (SY < C1Y) dist_squared -= squared(SY - C1Y); else if (SY > C2Y) dist_squared -= squared(SY - C2Y); if (SZ < C1Z) dist_squared -= squared(SZ - C1Z); else if (SZ > C2Z) dist_squared -= squared(SZ - C2Z); return dist_squared > 0; } // SM 3.0 > devices only __global__ void branch_and_bound(float * intervals, unsigned long num_floats, unsigned long NUM_DIMS) { unsigned long thread_index = blockIdx.x * blockDim.x + threadIdx.x; thread_index *= NUM_DIMS * 2; unsigned long jump_length = blockDim.x * gridDim.x * NUM_DIMS * 2; while(thread_index < num_floats) { float new_val = intervals[thread_index]; float * start_addr = intervals + thread_index; uint result = determine_valid_interval_sphere(start_addr, NUM_DIMS); new_val = (result == 1 ? new_val : NaN); // printf("New val: %f\n", new_val); intervals[thread_index] = new_val; thread_index += jump_length; } // if(thread_index % jump_length == 0) { // for(int z = 0; z < num_floats; ++z) // printf("GPU: intervals[%d]: %f\n", z, intervals[z]); // } } // unsigned long get_num_intervals_left(std::vector<float> &a, unsigned long FLOATS_PER_INTERVAL, int _num_devices) { // unsigned long num_devices = (unsigned long) _num_devices; // return (unsigned long) a.size() / (FLOATS_PER_INTERVAL * num_devices); // } void divy_up_work(std::vector<float> &candidate_intervals, unsigned long FLOATS_PER_INTERVAL, int num_devices, std::vector<unsigned long> & interval_sizes, std::vector<unsigned long> & float_offsets) { unsigned long total_floats = candidate_intervals.size(); unsigned long total_intervals = total_floats / FLOATS_PER_INTERVAL; unsigned long intervals_distributed = 0; unsigned long stride = total_intervals / num_devices; if(stride * num_devices != total_intervals) // fraction stride += 1; // Split as evenly as possible for(int i = 0; i < num_devices; ++i) { unsigned long this_length = ::min(total_intervals - intervals_distributed, stride); unsigned long this_offset = intervals_distributed; intervals_distributed += this_length; // convert to raw size in bytes float_offsets.push_back(this_offset * FLOATS_PER_INTERVAL * sizeof(float)); // Keep size in units of INTERVAL_SIZE interval_sizes.push_back(this_length); } // for(int i = 0; i < num_devices; ++i) { // printf("offset and size and stride and total_intervals %d: %lu, %lu, %lu, %lu\n", i, float_offsets[i], interval_sizes[i], stride, total_intervals); // } } void visualize_realtime(Level_Set_GUI LS_GUI) { // RUN GUI LOOP LS_GUI.mainLoop(); return; } int main(int argc, char **argv) { if(argc != 3) { printf("./bab <number_of_dims> <min epsilon>\n"); return -1; } assert( sizeof(size_t) == 8 ); assert( sizeof(unsigned long) == 8 ); NUM_DIMS = (unsigned long) atoi(argv[1]); float EPSILON = (float) atof(argv[2]); assert(NUM_DIMS > 0 && NUM_DIMS < 7); printf("Num dims: %lu\n", NUM_DIMS); time_t initial, final; int num_devices; hipDeviceProp_t prop; gpuErrchk( hipGetDeviceProperties(&prop, 0) ); // Assume all GPUs on this system are the same gpuErrchk( hipGetDeviceCount(&num_devices) ); int NUM_SMS = prop.multiProcessorCount; printf("Num devices: %d. SMs per device: %d\n", num_devices, NUM_SMS); // Compute optimal launch bounds and other constants uint MAX_BLOCK_SIZE = 1024; uint BLOCKS_PER_SM = 2; uint NUM_BLOCKS = BLOCKS_PER_SM * NUM_SMS; unsigned long INTERVAL_SIZE = (2 * sizeof(float) * NUM_DIMS); unsigned long FLOATS_PER_INTERVAL = 2 * NUM_DIMS; // GUI INIT Level_Set_GUI &LS_GUI = Level_Set_GUI::getInstance(); LS_GUI.dimensions = NUM_DIMS; LS_GUI.setup(argc, argv); //std::thread first(visualize_realtime, LS_GUI); // create thread called "first" // END GUI // Host-Side allocations // Make up a search space float ** search_space = new float * [NUM_DIMS]; for(int i = 0; i < NUM_DIMS; ++i) { search_space[i] = new float[2]; //min, max } // Populate search space for(int i = 0; i < NUM_DIMS; i++) { search_space[i][0] = -10.0; // min search_space[i][1] = 10.0; // max } // Create vector of handles to device bool_arrays std::vector<float *> dev_candidate_intervals(num_devices); // Divy up the workspace. level 1: num_devices. Level 2: num_dims. Level 3: lower, upper. float *** initial_search_spaces = new float ** [num_devices]; for(int i = 0; i < num_devices; ++i) { initial_search_spaces[i] = new float * [NUM_DIMS]; } for(int i = 0; i < num_devices; ++i) { for(int j = 0; j < NUM_DIMS; ++j) { initial_search_spaces[i][j] = new float [2]; //lower bound, upper bound } } // First N - 1 dimensions are the same as the initial searchspace dims for(int i = 0; i < num_devices; ++i) { for(int j = 0; j < NUM_DIMS - 1; ++j) { initial_search_spaces[i][j][0] = search_space[j][0]; initial_search_spaces[i][j][1] = search_space[j][1]; } // Split the last dimension evenly float last_dim_min = search_space[NUM_DIMS - 1][0]; float last_dim_max = search_space[NUM_DIMS - 1][1]; float stride = (last_dim_max - last_dim_min) / num_devices; float this_lower_bound = i * stride + last_dim_min; initial_search_spaces[i][NUM_DIMS - 1][0] = this_lower_bound; initial_search_spaces[i][NUM_DIMS - 1][1] = ::min(this_lower_bound + stride, last_dim_max); } // Store the number of intervals being tested on the GPUs std::vector<unsigned long> num_intervals(num_devices); // Store the sizesof the arrays that hold candidate intervals std::vector<unsigned long> array_capacities(num_devices); std::vector<unsigned long> array_sizes(num_devices); // Store the candidate intervals and solution intervals std::vector<float> candidate_intervals; std::vector<float> satisfactory_intervals; long unsigned iterations = 0; // Populate the stack of candidates. for(int i = 0; i < num_devices; ++i) { for(int j = 0; j < NUM_DIMS; ++j) { candidate_intervals.push_back(initial_search_spaces[i][j][0]); // lower bound candidate_intervals.push_back(initial_search_spaces[i][j][1]); // upper bound } } while(candidate_intervals.size() != 0) { initial = clock(); std::vector<unsigned long> interval_sizes; std::vector<unsigned long> interval_offsets_bytes; divy_up_work(candidate_intervals, FLOATS_PER_INTERVAL, num_devices, interval_sizes, interval_offsets_bytes); // Launch kernels on each GPU iterations++; for(int i = 0; i < num_devices; ++i) { // Select GPU hipSetDevice(i); // Read in available memory on this GPU size_t free_memory, total_memory; gpuErrchk( hipMemGetInfo(&free_memory, &total_memory) ); // Determine how big we can make the array of intervals unsigned long intervals_available = interval_sizes[i]; unsigned long max_array_capacity = .99*(free_memory) / INTERVAL_SIZE; array_capacities[i] = ::min(max_array_capacity, intervals_available); //printf("cap: %lu, avail: %lu\n", max_array_capacity, intervals_available); array_sizes[i] = array_capacities[i] * INTERVAL_SIZE; // Malloc space for this array gpuErrchk( hipMalloc((void **) &dev_candidate_intervals[i], array_sizes[i]) ); // Copy over intervals float * intervals_start_addr = &candidate_intervals[0] + interval_offsets_bytes[i] / sizeof(float); //printf("array size: %lu, interval_offsets_bytes: %lu\n",array_sizes[i], interval_offsets_bytes[i]); //for(int z = 0; z < array_sizes[i] / sizeof(float); ++z) //printf("MEMCPY: candidate_intervals[%d]: %f\n", z, *(intervals_start_addr + z)); gpuErrchk( hipMemcpyAsync(dev_candidate_intervals[i], intervals_start_addr, array_sizes[i], hipMemcpyHostToDevice) ); hipLaunchKernelGGL(( branch_and_bound), dim3(NUM_BLOCKS), dim3(MAX_BLOCK_SIZE), 0, 0, dev_candidate_intervals[i], array_sizes[i] / sizeof(float), NUM_DIMS); // Check for errors on kernel call hipError_t err = hipGetLastError(); if(hipSuccess != err) printf("Error %s\n",hipGetErrorString(err)); // Read back the procesed intervals ontop of their old data gpuErrchk( hipMemcpyAsync(&candidate_intervals[0] + interval_offsets_bytes[i] / sizeof(float), dev_candidate_intervals[i], array_sizes[i], hipMemcpyDeviceToHost) ); gpuErrchk( hipFree(dev_candidate_intervals[i]) ); // for(int z = 0; z < candidate_intervals.size(); ++z) // printf("CPU: dev_candidate_intervals[%d]: %f\n", z, candidate_intervals[z]); } update_candidates(candidate_intervals, satisfactory_intervals, NUM_DIMS, EPSILON); // UPDATE INTERVALS ON GUI LS_GUI.update_candidates(candidate_intervals); LS_GUI.update_solutions(satisfactory_intervals); LS_GUI.display(); //sleep(1); final = clock(); // if (iterations < 25) // { // for (unsigned long tempnumer = 0; tempnumer < candidate_intervals.size(); tempnumer++) // { // printf(" %f ", candidate_intervals[tempnumer]); // } // printf("\n"); // } // END GUI printf("Iteration %lu time: %f (s). Num candidates: %lu, Num solutions: %lu\n", iterations, double(final - initial) / CLOCKS_PER_SEC, candidate_intervals.size() / (2 * NUM_DIMS), satisfactory_intervals.size() / (2 * NUM_DIMS) ); // for(int i = 0; i < candidate_intervals.size(); ++i) // printf("candidate_intervals[%d]: %f\n", i, candidate_intervals[i]); } // cleanup host // Clean up initial search spaces for(int i = 0; i < num_devices; ++i) { for(int j = 0; j < NUM_DIMS; ++j) { delete [] initial_search_spaces[i][j]; } } // // Clean up streams // for(int i = 0; i < num_devices; ++i) { // gpuErrchk( hipStreamDestroy(streams[i]) ); // } for(int i = 0; i < num_devices; ++i) { delete [] initial_search_spaces[i]; } delete initial_search_spaces; // Clean up search space for(int i = 0; i < NUM_DIMS; ++i) { delete [] search_space[i]; } delete search_space; // Close visualization // first.join(); // GUI MAIN LOOP LS_GUI.mainLoop(); } void widest_dims(float *** initial_search_spaces, int num_devices, int NUM_DIMS) { // Vector to hold the index of the widest dim for each GPU's search space std::vector<uint> indices_of_widest_dim(num_devices); for(int i = 0; i < num_devices; ++i) { float max_width = (initial_search_spaces[i][0][1] - initial_search_spaces[i][0][0]); uint index_of_max_width = 0; for(int j = 1; j < NUM_DIMS; ++j) { float this_width = (initial_search_spaces[i][j][1] - initial_search_spaces[i][j][0]); if(this_width > max_width) { max_width = this_width; index_of_max_width = j; } } assert(max_width > 0); indices_of_widest_dim[i] = index_of_max_width; } }
37a0ad58d1983763f228f975ff96b1208015f6e2.cu
// SM 3.0 or greater GPUS only! // compile with: nvcc bab_stream.cu -o stream -arch=sm_30 -std=c++11 --expt-relaxed-constexpr #include <cuda_runtime.h> #include <cstdio> #include <cassert> #include <algorithm> #include <curand_kernel.h> #include <vector> #include <time.h> #include <unistd.h> // sleep #include <thread> #include "../bab_gui.cpp" #define BITS_PER_INT 32 #define NaN std::numeric_limits<double>::quiet_NaN() unsigned long NUM_DIMS = 0; // Global variable holding the number of dimensions per interval //#define DEBUGG 1 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); exit(code); } } inline uint interval_purger(float in) { static int remove_counter = 0; if(remove_counter > 0) { remove_counter--; return 1; //remove me } else if(std::isnan(in)){ remove_counter = 2 * NUM_DIMS - 1; return 1; //remove me } else return 0; // keep me } void split(std::vector<float> &v, unsigned long interval_offset, unsigned long NUM_DIMS) { float interval[NUM_DIMS*2]; for(unsigned long i = 0; i < NUM_DIMS * 2; ++i) { interval[i] = v[interval_offset + i]; } // Get the widest dim float max_width = (v[1] - v[0]); uint index_of_max_width = 0; for(int j = 1; j < NUM_DIMS; ++j) { float this_width = (v[1 + j * 2] - v[0 + j * 2]); if(this_width > max_width) { max_width = this_width; index_of_max_width = j; } } //printf("Max width: %f\n", max_width); //assert(max_width > 0); // Split into 2 new intervals for(int i = 0; i < NUM_DIMS; i++) { if(i == index_of_max_width) { v.push_back(interval[2 * i]); v.push_back(interval[2 * i + 1] - max_width / 2.0); } else { v.push_back(interval[2 * i]); v.push_back(interval[2 * i + 1]); } } for(int i = 0; i < NUM_DIMS; i++) { if(i == index_of_max_width) { v.push_back(interval[2 * i] + max_width / 2.0); v.push_back(interval[2 * i + 1] ); } else { v.push_back(interval[2 * i]); v.push_back(interval[2 * i + 1]); } } } // void update_candidates(std::vector<float> &v, unsigned long begin_offset, unsigned long size_to_span) // { // long unsigned original_size = v.size(); // printf("begin offset: %lu, size_to_span: %lu\n", begin_offset, size_to_span); // std::vector<float>::iterator end_vaild = // std::remove_if(v.begin(), v.end(), interval_purger); // printf("elems marked for removal!\n"); // v.erase(end_vaild, v.end()); // long unsigned elems_removed = original_size - v.size(); // printf("%lu elems purged!\n", elems_removed); // unsigned long pre_expand_size = v.size(); // OPTIMIZE TO UPDATE ONE CHUNK // printf("Cabdudates is now of size: %lu\n", pre_expand_size); // unsigned long stride = NUM_DIMS * 2; // for(unsigned long i = 0; i < pre_expand_size; i *= stride) { // split(v, i, NUM_DIMS); // v[i] = NaN; // } // end_vaild = std::remove_if(v.begin(), v.end(), interval_purger); // v.erase(end_vaild, v.end()); // } void update_candidates(std::vector<float> &v, std::vector<float> &s, unsigned long NUM_DIMS, unsigned long EPSILON) { // Grab solutions by checking intervals that weren't deleted. // GPU PARALLELIZABLE long unsigned original_size = v.size(); for(long unsigned i = 0; i < original_size; i += NUM_DIMS * 2) { if(std::isnan(v[i])) { continue; } float volume = v[i + 1] - v[i]; for(int j = 1; j < NUM_DIMS; ++j) { float low = v[i + 2 * j]; float high = v[i + 2 * j + 1]; float width = high - low; volume *= width; } if(volume <= EPSILON ) { for(unsigned long j = 0; j < 2 * NUM_DIMS; ++j) { s.push_back(v[i + j]); } v[i] = NaN; } } // Clean candidates std::vector<float>::iterator end_vaild = std::remove_if(v.begin(), v.end(), interval_purger); //printf("elems marked for removal!\n"); v.erase(end_vaild, v.end()); original_size = v.size(); end_vaild = std::remove_if(v.begin(), v.end(), interval_purger); //printf("elems marked for removal!\n"); v.erase(end_vaild, v.end()); long unsigned elems_removed = original_size - v.size(); //printf("%lu elems purged!\n", elems_removed); unsigned long pre_expand_size = v.size(); // OPTIMIZE TO UPDATE ONE CHUNK //printf("Candidates is now of size: %lu\n", pre_expand_size); unsigned long stride = NUM_DIMS * 2; //printf("Expanding...\n"); for(unsigned long i = 0; i < pre_expand_size; i += stride) { split(v, i, NUM_DIMS); } //printf("Puring parents intervals\n"); for(unsigned long i = 0; i < pre_expand_size; i += stride) { v[i] = NaN; } end_vaild = std::remove_if(v.begin(), v.end(), interval_purger); v.erase(end_vaild, v.end()); } __device__ inline float squared(float v) { return v * v; } __device__ inline uint determine_valid_interval_line(float * start, unsigned long NUM_DIMS) { float xmin = start[0]; float xmax = start[1]; float ymin = start[2]; float ymax = start[3]; //if(xmin < ymax && xmax > ymin) int within_line = (ymin <= xmax && ymax >= xmin); // for(unsigned long i = 0; i < NUM_DIMS * 2; ++i) { // garbage *= start[i]; // } // return 1; return within_line; } __device__ inline uint determine_valid_interval_sphere(float * start, unsigned long NUM_DIMS) { float R = 1.0; float C1X = start[0]; float C1Y = start[2]; float C1Z = start[4]; float C2X = start[1]; float C2Y = start[3]; float C2Z = start[5]; float SX = 0.0; float SY = 0.0; float SZ = 0.0; float xmin = C1X; float xmax = C2X; float ymin = C1Y; float ymax = C2Y; float dist_squared = R * R; /* assume C1 and C2 are element-wise sorted, if not, do that now */ if (SX < C1X) dist_squared -= squared(SX - C1X); else if (SX > C2X) dist_squared -= squared(SX - C2X); if (SY < C1Y) dist_squared -= squared(SY - C1Y); else if (SY > C2Y) dist_squared -= squared(SY - C2Y); if (SZ < C1Z) dist_squared -= squared(SZ - C1Z); else if (SZ > C2Z) dist_squared -= squared(SZ - C2Z); return dist_squared > 0; } // SM 3.0 > devices only __global__ void branch_and_bound(float * intervals, unsigned long num_floats, unsigned long NUM_DIMS) { unsigned long thread_index = blockIdx.x * blockDim.x + threadIdx.x; thread_index *= NUM_DIMS * 2; unsigned long jump_length = blockDim.x * gridDim.x * NUM_DIMS * 2; while(thread_index < num_floats) { float new_val = intervals[thread_index]; float * start_addr = intervals + thread_index; uint result = determine_valid_interval_sphere(start_addr, NUM_DIMS); new_val = (result == 1 ? new_val : NaN); // printf("New val: %f\n", new_val); intervals[thread_index] = new_val; thread_index += jump_length; } // if(thread_index % jump_length == 0) { // for(int z = 0; z < num_floats; ++z) // printf("GPU: intervals[%d]: %f\n", z, intervals[z]); // } } // unsigned long get_num_intervals_left(std::vector<float> &a, unsigned long FLOATS_PER_INTERVAL, int _num_devices) { // unsigned long num_devices = (unsigned long) _num_devices; // return (unsigned long) a.size() / (FLOATS_PER_INTERVAL * num_devices); // } void divy_up_work(std::vector<float> &candidate_intervals, unsigned long FLOATS_PER_INTERVAL, int num_devices, std::vector<unsigned long> & interval_sizes, std::vector<unsigned long> & float_offsets) { unsigned long total_floats = candidate_intervals.size(); unsigned long total_intervals = total_floats / FLOATS_PER_INTERVAL; unsigned long intervals_distributed = 0; unsigned long stride = total_intervals / num_devices; if(stride * num_devices != total_intervals) // fraction stride += 1; // Split as evenly as possible for(int i = 0; i < num_devices; ++i) { unsigned long this_length = std::min(total_intervals - intervals_distributed, stride); unsigned long this_offset = intervals_distributed; intervals_distributed += this_length; // convert to raw size in bytes float_offsets.push_back(this_offset * FLOATS_PER_INTERVAL * sizeof(float)); // Keep size in units of INTERVAL_SIZE interval_sizes.push_back(this_length); } // for(int i = 0; i < num_devices; ++i) { // printf("offset and size and stride and total_intervals %d: %lu, %lu, %lu, %lu\n", i, float_offsets[i], interval_sizes[i], stride, total_intervals); // } } void visualize_realtime(Level_Set_GUI LS_GUI) { // RUN GUI LOOP LS_GUI.mainLoop(); return; } int main(int argc, char **argv) { if(argc != 3) { printf("./bab <number_of_dims> <min epsilon>\n"); return -1; } assert( sizeof(size_t) == 8 ); assert( sizeof(unsigned long) == 8 ); NUM_DIMS = (unsigned long) atoi(argv[1]); float EPSILON = (float) atof(argv[2]); assert(NUM_DIMS > 0 && NUM_DIMS < 7); printf("Num dims: %lu\n", NUM_DIMS); time_t initial, final; int num_devices; cudaDeviceProp prop; gpuErrchk( cudaGetDeviceProperties(&prop, 0) ); // Assume all GPUs on this system are the same gpuErrchk( cudaGetDeviceCount(&num_devices) ); int NUM_SMS = prop.multiProcessorCount; printf("Num devices: %d. SMs per device: %d\n", num_devices, NUM_SMS); // Compute optimal launch bounds and other constants uint MAX_BLOCK_SIZE = 1024; uint BLOCKS_PER_SM = 2; uint NUM_BLOCKS = BLOCKS_PER_SM * NUM_SMS; unsigned long INTERVAL_SIZE = (2 * sizeof(float) * NUM_DIMS); unsigned long FLOATS_PER_INTERVAL = 2 * NUM_DIMS; // GUI INIT Level_Set_GUI &LS_GUI = Level_Set_GUI::getInstance(); LS_GUI.dimensions = NUM_DIMS; LS_GUI.setup(argc, argv); //std::thread first(visualize_realtime, LS_GUI); // create thread called "first" // END GUI // Host-Side allocations // Make up a search space float ** search_space = new float * [NUM_DIMS]; for(int i = 0; i < NUM_DIMS; ++i) { search_space[i] = new float[2]; //min, max } // Populate search space for(int i = 0; i < NUM_DIMS; i++) { search_space[i][0] = -10.0; // min search_space[i][1] = 10.0; // max } // Create vector of handles to device bool_arrays std::vector<float *> dev_candidate_intervals(num_devices); // Divy up the workspace. level 1: num_devices. Level 2: num_dims. Level 3: lower, upper. float *** initial_search_spaces = new float ** [num_devices]; for(int i = 0; i < num_devices; ++i) { initial_search_spaces[i] = new float * [NUM_DIMS]; } for(int i = 0; i < num_devices; ++i) { for(int j = 0; j < NUM_DIMS; ++j) { initial_search_spaces[i][j] = new float [2]; //lower bound, upper bound } } // First N - 1 dimensions are the same as the initial searchspace dims for(int i = 0; i < num_devices; ++i) { for(int j = 0; j < NUM_DIMS - 1; ++j) { initial_search_spaces[i][j][0] = search_space[j][0]; initial_search_spaces[i][j][1] = search_space[j][1]; } // Split the last dimension evenly float last_dim_min = search_space[NUM_DIMS - 1][0]; float last_dim_max = search_space[NUM_DIMS - 1][1]; float stride = (last_dim_max - last_dim_min) / num_devices; float this_lower_bound = i * stride + last_dim_min; initial_search_spaces[i][NUM_DIMS - 1][0] = this_lower_bound; initial_search_spaces[i][NUM_DIMS - 1][1] = std::min(this_lower_bound + stride, last_dim_max); } // Store the number of intervals being tested on the GPUs std::vector<unsigned long> num_intervals(num_devices); // Store the sizesof the arrays that hold candidate intervals std::vector<unsigned long> array_capacities(num_devices); std::vector<unsigned long> array_sizes(num_devices); // Store the candidate intervals and solution intervals std::vector<float> candidate_intervals; std::vector<float> satisfactory_intervals; long unsigned iterations = 0; // Populate the stack of candidates. for(int i = 0; i < num_devices; ++i) { for(int j = 0; j < NUM_DIMS; ++j) { candidate_intervals.push_back(initial_search_spaces[i][j][0]); // lower bound candidate_intervals.push_back(initial_search_spaces[i][j][1]); // upper bound } } while(candidate_intervals.size() != 0) { initial = clock(); std::vector<unsigned long> interval_sizes; std::vector<unsigned long> interval_offsets_bytes; divy_up_work(candidate_intervals, FLOATS_PER_INTERVAL, num_devices, interval_sizes, interval_offsets_bytes); // Launch kernels on each GPU iterations++; for(int i = 0; i < num_devices; ++i) { // Select GPU cudaSetDevice(i); // Read in available memory on this GPU size_t free_memory, total_memory; gpuErrchk( cudaMemGetInfo(&free_memory, &total_memory) ); // Determine how big we can make the array of intervals unsigned long intervals_available = interval_sizes[i]; unsigned long max_array_capacity = .99*(free_memory) / INTERVAL_SIZE; array_capacities[i] = std::min(max_array_capacity, intervals_available); //printf("cap: %lu, avail: %lu\n", max_array_capacity, intervals_available); array_sizes[i] = array_capacities[i] * INTERVAL_SIZE; // Malloc space for this array gpuErrchk( cudaMalloc((void **) &dev_candidate_intervals[i], array_sizes[i]) ); // Copy over intervals float * intervals_start_addr = &candidate_intervals[0] + interval_offsets_bytes[i] / sizeof(float); //printf("array size: %lu, interval_offsets_bytes: %lu\n",array_sizes[i], interval_offsets_bytes[i]); //for(int z = 0; z < array_sizes[i] / sizeof(float); ++z) //printf("MEMCPY: candidate_intervals[%d]: %f\n", z, *(intervals_start_addr + z)); gpuErrchk( cudaMemcpyAsync(dev_candidate_intervals[i], intervals_start_addr, array_sizes[i], cudaMemcpyHostToDevice) ); branch_and_bound<<<NUM_BLOCKS, MAX_BLOCK_SIZE>>> (dev_candidate_intervals[i], array_sizes[i] / sizeof(float), NUM_DIMS); // Check for errors on kernel call cudaError err = cudaGetLastError(); if(cudaSuccess != err) printf("Error %s\n",cudaGetErrorString(err)); // Read back the procesed intervals ontop of their old data gpuErrchk( cudaMemcpyAsync(&candidate_intervals[0] + interval_offsets_bytes[i] / sizeof(float), dev_candidate_intervals[i], array_sizes[i], cudaMemcpyDeviceToHost) ); gpuErrchk( cudaFree(dev_candidate_intervals[i]) ); // for(int z = 0; z < candidate_intervals.size(); ++z) // printf("CPU: dev_candidate_intervals[%d]: %f\n", z, candidate_intervals[z]); } update_candidates(candidate_intervals, satisfactory_intervals, NUM_DIMS, EPSILON); // UPDATE INTERVALS ON GUI LS_GUI.update_candidates(candidate_intervals); LS_GUI.update_solutions(satisfactory_intervals); LS_GUI.display(); //sleep(1); final = clock(); // if (iterations < 25) // { // for (unsigned long tempnumer = 0; tempnumer < candidate_intervals.size(); tempnumer++) // { // printf(" %f ", candidate_intervals[tempnumer]); // } // printf("\n"); // } // END GUI printf("Iteration %lu time: %f (s). Num candidates: %lu, Num solutions: %lu\n", iterations, double(final - initial) / CLOCKS_PER_SEC, candidate_intervals.size() / (2 * NUM_DIMS), satisfactory_intervals.size() / (2 * NUM_DIMS) ); // for(int i = 0; i < candidate_intervals.size(); ++i) // printf("candidate_intervals[%d]: %f\n", i, candidate_intervals[i]); } // cleanup host // Clean up initial search spaces for(int i = 0; i < num_devices; ++i) { for(int j = 0; j < NUM_DIMS; ++j) { delete [] initial_search_spaces[i][j]; } } // // Clean up streams // for(int i = 0; i < num_devices; ++i) { // gpuErrchk( cudaStreamDestroy(streams[i]) ); // } for(int i = 0; i < num_devices; ++i) { delete [] initial_search_spaces[i]; } delete initial_search_spaces; // Clean up search space for(int i = 0; i < NUM_DIMS; ++i) { delete [] search_space[i]; } delete search_space; // Close visualization // first.join(); // GUI MAIN LOOP LS_GUI.mainLoop(); } void widest_dims(float *** initial_search_spaces, int num_devices, int NUM_DIMS) { // Vector to hold the index of the widest dim for each GPU's search space std::vector<uint> indices_of_widest_dim(num_devices); for(int i = 0; i < num_devices; ++i) { float max_width = (initial_search_spaces[i][0][1] - initial_search_spaces[i][0][0]); uint index_of_max_width = 0; for(int j = 1; j < NUM_DIMS; ++j) { float this_width = (initial_search_spaces[i][j][1] - initial_search_spaces[i][j][0]); if(this_width > max_width) { max_width = this_width; index_of_max_width = j; } } assert(max_width > 0); indices_of_widest_dim[i] = index_of_max_width; } }
b67b4f30ccafd947d14b4b67b37b97cc60c46cf2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define RAYCASTER_ENABLE_ISO2 #include "RaycasterKernelParams.h" #ifdef RAYCASTER_ENABLE_ISO2 #include "cudaUtil.h" #include "RaycasterKernelDefines.h" #include "RaycasterKernelGlobals.cuh" #include "RaycasterKernelHelpers.cuh" template <eMeasureSource measureSource, eTextureFilterMode F, eMeasureComputeMode C, bool shadeSI> __device__ inline void iso2Step( float4& sum, float& oldVal, const float3& world2texOffset, const float3& world2texScale, const float3& rayPosTx, const float3& pos, const float3& step, const float3& rayDir) { float val = getMeasure<measureSource, F, C>(c_raycastParams.measure1, g_texVolume1, w2t(pos), c_raycastParams.gridSpacing, c_raycastParams.measureScale1); float fDist = 1e10; float4 vColors[2] = {make_float4(0,0,0,0), make_float4(0,0,0,0)}; // passed first iso surface? if ((val>=c_raycastParams.isoValues.x) != (oldVal>=c_raycastParams.isoValues.x)) { float factor = (val>=c_raycastParams.isoValues.x) ? 1.0f : 0.0f; float3 pp = binarySearch<measureSource, F, C>(c_raycastParams.measure1, g_texVolume1, w2t(pos - factor * step), w2t(pos - (1.0f - factor) * step), c_raycastParams.gridSpacing, c_raycastParams.isoValues.x, c_raycastParams.measureScale1); float3 grad = getGradient<measureSource, F, C>(c_raycastParams.measure1, g_texVolume1, pp, c_raycastParams.gridSpacing); if(shadeSI) vColors[0] = shadeScaleInvariant(rayDir, grad, c_raycastParams.isoColor1); else vColors[0] = shadeIsosurface(rayDir, grad, c_raycastParams.isoColor1); fDist = lengthSq(pp-rayPosTx); } // passed second iso surface? if ((val>=c_raycastParams.isoValues.y) != (oldVal>=c_raycastParams.isoValues.y)) { float factor = (val>=c_raycastParams.isoValues.y) ? 1.0f : 0.0f; float3 pp = binarySearch<measureSource, F, C>(c_raycastParams.measure1, g_texVolume1, w2t(pos - factor * step), w2t(pos - (1.0f - factor) * step), c_raycastParams.gridSpacing, c_raycastParams.isoValues.y, c_raycastParams.measureScale1); // sort hits vColors[1] = vColors[0]; int index = (lengthSq(pp-rayPosTx) <= fDist) ? 0 : 1; float3 grad = getGradient<measureSource, F, C>(c_raycastParams.measure1, g_texVolume1, pp, c_raycastParams.gridSpacing); if (shadeSI) vColors[index] = shadeScaleInvariant(rayDir, grad, c_raycastParams.isoColor2); else vColors[index] = shadeIsosurface(rayDir, grad, c_raycastParams.isoColor2); if(shadeSI) vColors[index] = shadeScaleInvariant(rayDir, grad, c_raycastParams.isoColor2); else vColors[index] = shadeIsosurface(rayDir, grad, c_raycastParams.isoColor2); } // blend both potential hits "behind" sum += (1.0f - sum.w) * vColors[0]; sum += (1.0f - sum.w) * vColors[1]; oldVal = val; } template <eMeasureSource measureSource, eTextureFilterMode F, eMeasureComputeMode C, bool shadeSI> __device__ inline void iso2Raycast( int2 brickMinScreen, int2 brickSizeScreen, int2 renderTargetOffset, float3 boxMin, float3 boxMax, float3 world2texOffset, float3 world2texScale ) { const float opacityThreshold = 0.999f; uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; // translate from brick's 2D screen space bbox to global if ((x >= brickSizeScreen.x) || (y >= brickSizeScreen.y)) return; x += brickMinScreen.x; y += brickMinScreen.y; // calculate eye ray in world space float3 rayPos = getRayPos(c_raycastParams.viewInv); float3 rayDir = getRayDir(c_raycastParams.viewInv, x, y); // translate into viewport (-> side-by-side stereo!) x += renderTargetOffset.x; y += renderTargetOffset.y; // find intersections with box float tnear, tfar; if (!intersectBox(rayPos, rayDir, boxMin, boxMax, &tnear, &tfar)) return; tnear = fmaxf(tnear, 0.0f); // clamp to near plane // current position and step increment in world space float3 pos = rayPos + rayDir * tnear; float3 step = rayDir * c_raycastParams.stepSizeWorld; float depthLinear = -transformPos(c_raycastParams.view, pos).z; float depthStepLinear = -transformDir(c_raycastParams.view, step).z; // read depth buffer float depthMax; surf2Dread(&depthMax, g_surfDepth, x * sizeof(float), y); float depthMaxLinear = depthToLinear(depthMax); // restrict depthMaxLinear to exit point depth, so we can use it as stop criterion depthMaxLinear = min(depthMaxLinear, -transformPos(c_raycastParams.view, rayPos + rayDir * tfar).z); // early-out z test if(depthLinear >= depthMaxLinear) return; // get initial color from render target uchar4 colorStart; surf2Dread(&colorStart, g_surfTarget, x * 4, y); float4 sum = rgbaUCharToFloat(colorStart); // get value at entry point float oldVal = getMeasure<measureSource, F, C>(c_raycastParams.measure1, g_texVolume1, w2t(pos), c_raycastParams.gridSpacing, c_raycastParams.measureScale1); // march along ray from front to back while((depthLinear + depthStepLinear) < depthMaxLinear && sum.w < opacityThreshold) { // go to end of current segment pos += step; depthLinear += depthStepLinear; iso2Step<measureSource, F, C, shadeSI>(sum, oldVal, world2texOffset, world2texScale, w2t(rayPos), pos, step, rayDir); } // if we didn't hit the alpha threshold, do final (smaller) step to z buffer hit (or brick exit point) if(sum.w < opacityThreshold) { step *= (depthMaxLinear - depthLinear) / depthStepLinear; pos += step; iso2Step<measureSource, F, C, shadeSI>(sum, oldVal, world2texOffset, world2texScale, w2t(rayPos), pos, step, rayDir); } // write output color surf2Dwrite(rgbaFloatToUChar(sum), g_surfTarget, x * 4, y); } template <eMeasureSource measureSource, eTextureFilterMode F, eMeasureComputeMode C, eColorMode CM> __global__ void iso2Kernel( int2 brickMinScreen, int2 brickSizeScreen, int2 renderTargetOffset, float3 boxMin, float3 boxMax, float3 world2texOffset, float3 world2texScale ) { iso2Raycast<measureSource, F, C, false>(brickMinScreen, brickSizeScreen, renderTargetOffset, boxMin, boxMax, world2texOffset, world2texScale); } template <eMeasureSource measureSource, eTextureFilterMode F, eMeasureComputeMode C, eColorMode CM> __global__ void iso2SiKernel( int2 brickMinScreen, int2 brickSizeScreen, int2 renderTargetOffset, float3 boxMin, float3 boxMax, float3 world2texOffset, float3 world2texScale ) { iso2Raycast<measureSource, F, C, true>(brickMinScreen, brickSizeScreen, renderTargetOffset, boxMin, boxMax, world2texOffset, world2texScale); } #endif void raycasterKernelIso2(RaycasterKernelParams& params) { #ifdef RAYCASTER_ENABLE_ISO2 // color mode isn't used -> directly call RAYCASTER_COMPUTE_SWITCH switch(params.filterMode) { #ifdef RAYCASTER_ENABLE_LINEAR case TEXTURE_FILTER_LINEAR : RAYCASTER_COMPUTE_SWITCH_RT(iso2Kernel, TEXTURE_FILTER_LINEAR, COLOR_MODE_UI); break; #endif #ifdef RAYCASTER_ENABLE_CUBIC case TEXTURE_FILTER_CUBIC : RAYCASTER_COMPUTE_SWITCH_RT(iso2Kernel, TEXTURE_FILTER_CUBIC, COLOR_MODE_UI); break; #endif } #endif } void raycasterKernelIso2Si(RaycasterKernelParams& params) { #ifdef RAYCASTER_ENABLE_ISO2 // color mode isn't used -> directly call RAYCASTER_COMPUTE_SWITCH switch(params.filterMode) { #ifdef RAYCASTER_ENABLE_LINEAR case TEXTURE_FILTER_LINEAR : RAYCASTER_COMPUTE_SWITCH_RT(iso2SiKernel, TEXTURE_FILTER_LINEAR, COLOR_MODE_UI); break; #endif #ifdef RAYCASTER_ENABLE_CUBIC case TEXTURE_FILTER_CUBIC : RAYCASTER_COMPUTE_SWITCH_RT(iso2SiKernel, TEXTURE_FILTER_CUBIC, COLOR_MODE_UI); break; #endif } #endif }
b67b4f30ccafd947d14b4b67b37b97cc60c46cf2.cu
#define RAYCASTER_ENABLE_ISO2 #include "RaycasterKernelParams.h" #ifdef RAYCASTER_ENABLE_ISO2 #include "cudaUtil.h" #include "RaycasterKernelDefines.h" #include "RaycasterKernelGlobals.cuh" #include "RaycasterKernelHelpers.cuh" template <eMeasureSource measureSource, eTextureFilterMode F, eMeasureComputeMode C, bool shadeSI> __device__ inline void iso2Step( float4& sum, float& oldVal, const float3& world2texOffset, const float3& world2texScale, const float3& rayPosTx, const float3& pos, const float3& step, const float3& rayDir) { float val = getMeasure<measureSource, F, C>(c_raycastParams.measure1, g_texVolume1, w2t(pos), c_raycastParams.gridSpacing, c_raycastParams.measureScale1); float fDist = 1e10; float4 vColors[2] = {make_float4(0,0,0,0), make_float4(0,0,0,0)}; // passed first iso surface? if ((val>=c_raycastParams.isoValues.x) != (oldVal>=c_raycastParams.isoValues.x)) { float factor = (val>=c_raycastParams.isoValues.x) ? 1.0f : 0.0f; float3 pp = binarySearch<measureSource, F, C>(c_raycastParams.measure1, g_texVolume1, w2t(pos - factor * step), w2t(pos - (1.0f - factor) * step), c_raycastParams.gridSpacing, c_raycastParams.isoValues.x, c_raycastParams.measureScale1); float3 grad = getGradient<measureSource, F, C>(c_raycastParams.measure1, g_texVolume1, pp, c_raycastParams.gridSpacing); if(shadeSI) vColors[0] = shadeScaleInvariant(rayDir, grad, c_raycastParams.isoColor1); else vColors[0] = shadeIsosurface(rayDir, grad, c_raycastParams.isoColor1); fDist = lengthSq(pp-rayPosTx); } // passed second iso surface? if ((val>=c_raycastParams.isoValues.y) != (oldVal>=c_raycastParams.isoValues.y)) { float factor = (val>=c_raycastParams.isoValues.y) ? 1.0f : 0.0f; float3 pp = binarySearch<measureSource, F, C>(c_raycastParams.measure1, g_texVolume1, w2t(pos - factor * step), w2t(pos - (1.0f - factor) * step), c_raycastParams.gridSpacing, c_raycastParams.isoValues.y, c_raycastParams.measureScale1); // sort hits vColors[1] = vColors[0]; int index = (lengthSq(pp-rayPosTx) <= fDist) ? 0 : 1; float3 grad = getGradient<measureSource, F, C>(c_raycastParams.measure1, g_texVolume1, pp, c_raycastParams.gridSpacing); if (shadeSI) vColors[index] = shadeScaleInvariant(rayDir, grad, c_raycastParams.isoColor2); else vColors[index] = shadeIsosurface(rayDir, grad, c_raycastParams.isoColor2); if(shadeSI) vColors[index] = shadeScaleInvariant(rayDir, grad, c_raycastParams.isoColor2); else vColors[index] = shadeIsosurface(rayDir, grad, c_raycastParams.isoColor2); } // blend both potential hits "behind" sum += (1.0f - sum.w) * vColors[0]; sum += (1.0f - sum.w) * vColors[1]; oldVal = val; } template <eMeasureSource measureSource, eTextureFilterMode F, eMeasureComputeMode C, bool shadeSI> __device__ inline void iso2Raycast( int2 brickMinScreen, int2 brickSizeScreen, int2 renderTargetOffset, float3 boxMin, float3 boxMax, float3 world2texOffset, float3 world2texScale ) { const float opacityThreshold = 0.999f; uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; // translate from brick's 2D screen space bbox to global if ((x >= brickSizeScreen.x) || (y >= brickSizeScreen.y)) return; x += brickMinScreen.x; y += brickMinScreen.y; // calculate eye ray in world space float3 rayPos = getRayPos(c_raycastParams.viewInv); float3 rayDir = getRayDir(c_raycastParams.viewInv, x, y); // translate into viewport (-> side-by-side stereo!) x += renderTargetOffset.x; y += renderTargetOffset.y; // find intersections with box float tnear, tfar; if (!intersectBox(rayPos, rayDir, boxMin, boxMax, &tnear, &tfar)) return; tnear = fmaxf(tnear, 0.0f); // clamp to near plane // current position and step increment in world space float3 pos = rayPos + rayDir * tnear; float3 step = rayDir * c_raycastParams.stepSizeWorld; float depthLinear = -transformPos(c_raycastParams.view, pos).z; float depthStepLinear = -transformDir(c_raycastParams.view, step).z; // read depth buffer float depthMax; surf2Dread(&depthMax, g_surfDepth, x * sizeof(float), y); float depthMaxLinear = depthToLinear(depthMax); // restrict depthMaxLinear to exit point depth, so we can use it as stop criterion depthMaxLinear = min(depthMaxLinear, -transformPos(c_raycastParams.view, rayPos + rayDir * tfar).z); // early-out z test if(depthLinear >= depthMaxLinear) return; // get initial color from render target uchar4 colorStart; surf2Dread(&colorStart, g_surfTarget, x * 4, y); float4 sum = rgbaUCharToFloat(colorStart); // get value at entry point float oldVal = getMeasure<measureSource, F, C>(c_raycastParams.measure1, g_texVolume1, w2t(pos), c_raycastParams.gridSpacing, c_raycastParams.measureScale1); // march along ray from front to back while((depthLinear + depthStepLinear) < depthMaxLinear && sum.w < opacityThreshold) { // go to end of current segment pos += step; depthLinear += depthStepLinear; iso2Step<measureSource, F, C, shadeSI>(sum, oldVal, world2texOffset, world2texScale, w2t(rayPos), pos, step, rayDir); } // if we didn't hit the alpha threshold, do final (smaller) step to z buffer hit (or brick exit point) if(sum.w < opacityThreshold) { step *= (depthMaxLinear - depthLinear) / depthStepLinear; pos += step; iso2Step<measureSource, F, C, shadeSI>(sum, oldVal, world2texOffset, world2texScale, w2t(rayPos), pos, step, rayDir); } // write output color surf2Dwrite(rgbaFloatToUChar(sum), g_surfTarget, x * 4, y); } template <eMeasureSource measureSource, eTextureFilterMode F, eMeasureComputeMode C, eColorMode CM> __global__ void iso2Kernel( int2 brickMinScreen, int2 brickSizeScreen, int2 renderTargetOffset, float3 boxMin, float3 boxMax, float3 world2texOffset, float3 world2texScale ) { iso2Raycast<measureSource, F, C, false>(brickMinScreen, brickSizeScreen, renderTargetOffset, boxMin, boxMax, world2texOffset, world2texScale); } template <eMeasureSource measureSource, eTextureFilterMode F, eMeasureComputeMode C, eColorMode CM> __global__ void iso2SiKernel( int2 brickMinScreen, int2 brickSizeScreen, int2 renderTargetOffset, float3 boxMin, float3 boxMax, float3 world2texOffset, float3 world2texScale ) { iso2Raycast<measureSource, F, C, true>(brickMinScreen, brickSizeScreen, renderTargetOffset, boxMin, boxMax, world2texOffset, world2texScale); } #endif void raycasterKernelIso2(RaycasterKernelParams& params) { #ifdef RAYCASTER_ENABLE_ISO2 // color mode isn't used -> directly call RAYCASTER_COMPUTE_SWITCH switch(params.filterMode) { #ifdef RAYCASTER_ENABLE_LINEAR case TEXTURE_FILTER_LINEAR : RAYCASTER_COMPUTE_SWITCH_RT(iso2Kernel, TEXTURE_FILTER_LINEAR, COLOR_MODE_UI); break; #endif #ifdef RAYCASTER_ENABLE_CUBIC case TEXTURE_FILTER_CUBIC : RAYCASTER_COMPUTE_SWITCH_RT(iso2Kernel, TEXTURE_FILTER_CUBIC, COLOR_MODE_UI); break; #endif } #endif } void raycasterKernelIso2Si(RaycasterKernelParams& params) { #ifdef RAYCASTER_ENABLE_ISO2 // color mode isn't used -> directly call RAYCASTER_COMPUTE_SWITCH switch(params.filterMode) { #ifdef RAYCASTER_ENABLE_LINEAR case TEXTURE_FILTER_LINEAR : RAYCASTER_COMPUTE_SWITCH_RT(iso2SiKernel, TEXTURE_FILTER_LINEAR, COLOR_MODE_UI); break; #endif #ifdef RAYCASTER_ENABLE_CUBIC case TEXTURE_FILTER_CUBIC : RAYCASTER_COMPUTE_SWITCH_RT(iso2SiKernel, TEXTURE_FILTER_CUBIC, COLOR_MODE_UI); break; #endif } #endif }
a66eff27bbb1bdb096a5eaf2ba68ada38a032c48.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "image-gpu.hh" #include "stb_image.h" #include <iostream> #include <fstream> #include <cstdio> __global__ void to_gray_gpu(unsigned char* gray_data, unsigned char* data, int width, int height, int nb_channels); __global__ void padd_image_gpu(unsigned char* padded_gray_data, unsigned char* gray_data, int width, int height, int padded_width, int padded_height); ImageGPU::ImageGPU(const char* path) { unsigned char* stbi_data = stbi_load(path, &width, &height, &nb_channels, 0); if (!stbi_data) { std::cout << "FAILURE to load the image: " << path << '\n'; return; } patch_size = 16; int size = width * height * nb_channels; // compute padded width / height padded_width = width + patch_size - width % patch_size; padded_height = height + patch_size - height % patch_size; // make allocations hipMallocManaged(&data, sizeof(unsigned char) * size); cudaCheckError(); hipMallocManaged(&gray_data, sizeof(unsigned char) * width * height); cudaCheckError(); hipMallocManaged(&padded_gray_data, sizeof(unsigned char) * padded_width * padded_height); cudaCheckError(); // copy the data to GPU hipMemcpy(data, stbi_data, sizeof(unsigned char) * size, hipMemcpyHostToDevice); cudaCheckError(); // free the stbi data stbi_image_free(stbi_data); } ImageGPU::~ImageGPU() { hipFree(data); hipFree(gray_data); hipFree(padded_gray_data); } void ImageGPU::to_gray() { int nb_blocks_x = 50; int nb_blocks_y = 50; dim3 blocks_(nb_blocks_x, nb_blocks_y); dim3 threads_((height + nb_blocks_x) / nb_blocks_x, (width + nb_blocks_y) / nb_blocks_y); hipLaunchKernelGGL(( to_gray_gpu), dim3(blocks_), dim3(threads_), 0, 0, gray_data, data, width, height, nb_channels); cudaCheckError(); hipDeviceSynchronize(); cudaCheckError(); } void ImageGPU::padd_image() { int nb_blocks_x = 50; int nb_blocks_y = 50; dim3 blocks_(nb_blocks_x, nb_blocks_y); dim3 threads_((height + nb_blocks_x) / nb_blocks_x, (width + nb_blocks_y) / nb_blocks_y); hipLaunchKernelGGL(( padd_image_gpu), dim3(blocks_), dim3(threads_), 0, 0, padded_gray_data, gray_data, width, height, padded_width, padded_height); cudaCheckError(); hipDeviceSynchronize(); cudaCheckError(); } __global__ void compute_blocks_device(int window_size, unsigned char* blocks_device, unsigned char* padded_gray_data, int p_size, int nb_tiles_x, int padded_width, int padded_height, int patch_size) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if (x >= padded_width || y >= padded_height) return; int i = x + y * padded_width; int new_index = (i % patch_size) + p_size * ((i / patch_size) % nb_tiles_x) + patch_size * ((i / (nb_tiles_x * patch_size)) % patch_size) + p_size * nb_tiles_x * (i / (p_size * nb_tiles_x)); blocks_device[new_index] = padded_gray_data[i]; } BlocksGPU ImageGPU::to_blocks(int window_size) const { int nb_blocks = padded_width / patch_size * padded_height / patch_size; // allocation of blocks_device unsigned char* blocks_device; int size = nb_blocks * patch_size * patch_size; hipMallocManaged(&blocks_device, sizeof(unsigned char) * size); cudaCheckError(); int p_size = patch_size * patch_size; int nb_tiles_x = padded_width / patch_size; dim3 threads_(patch_size, patch_size); dim3 blocks_(padded_width / patch_size, padded_height / patch_size); hipLaunchKernelGGL(( compute_blocks_device), dim3(blocks_), dim3(threads_), 0, 0, window_size, blocks_device, padded_gray_data, p_size, nb_tiles_x, padded_width, padded_height, patch_size); return BlocksGPU(blocks_device, nb_blocks, patch_size, window_size); } void ImageGPU::save_gray_ppm(const char* path) const { std::ofstream ofs(path, std::ios_base::out | std::ios_base::binary); ofs << "P6" << std::endl << width << ' ' << height << std::endl << "255" << std::endl; for (int j = 0; j < height; ++j) for (int i = 0; i < width; ++i) ofs << (char) gray_data[j * width + i] << (char) gray_data[j * width + i] << (char) gray_data[j * width + i]; ofs.close(); } void ImageGPU::save_padded_gray_ppm(const char* path) const { std::ofstream ofs(path, std::ios_base::out | std::ios_base::binary); ofs << "P6" << std::endl << padded_width << ' ' << padded_height << std::endl << "255" << std::endl; for (int j = 0; j < padded_height; ++j) for (int i = 0; i < padded_width; ++i) ofs << (char) padded_gray_data[j * padded_width + i] << (char) padded_gray_data[j * padded_width + i] << (char) padded_gray_data[j * padded_width + i]; ofs.close(); } // ------------- // GPU functions // ------------- __global__ void to_gray_gpu(unsigned char* gray_data, unsigned char* data, int width, int height, int nb_channels) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < height && j < width) { // get r / g / b float r = (float) data[i * width * nb_channels + j * nb_channels]; float g = (float) data[i * width * nb_channels + j * nb_channels + 1]; float b = (float) data[i * width * nb_channels + j * nb_channels + 2]; // to gray float pixel_intensity = r * 0.2989 + g * 0.5870 + b * 0.1140; gray_data[i * width + j] = (unsigned char) pixel_intensity; } } __global__ void padd_image_gpu(unsigned char* padded_gray_data, unsigned char* gray_data, int width, int height, int padded_width, int padded_height) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < padded_height && j < padded_width) { if (i < height && j < width) { padded_gray_data[i * padded_width + j] = gray_data[i * width + j]; } else if (i < height && j >= width) { padded_gray_data[i * padded_width + j] = gray_data[i * width + width - 1]; } else if (i >= height && j < width) { padded_gray_data[i * padded_width + j] = gray_data[(height - 1) * width + j]; } else { padded_gray_data[i * padded_width + j] = gray_data[(height - 1) * width + width - 1]; } } }
a66eff27bbb1bdb096a5eaf2ba68ada38a032c48.cu
#include "image-gpu.hh" #include "stb_image.h" #include <iostream> #include <fstream> #include <cstdio> __global__ void to_gray_gpu(unsigned char* gray_data, unsigned char* data, int width, int height, int nb_channels); __global__ void padd_image_gpu(unsigned char* padded_gray_data, unsigned char* gray_data, int width, int height, int padded_width, int padded_height); ImageGPU::ImageGPU(const char* path) { unsigned char* stbi_data = stbi_load(path, &width, &height, &nb_channels, 0); if (!stbi_data) { std::cout << "FAILURE to load the image: " << path << '\n'; return; } patch_size = 16; int size = width * height * nb_channels; // compute padded width / height padded_width = width + patch_size - width % patch_size; padded_height = height + patch_size - height % patch_size; // make allocations cudaMallocManaged(&data, sizeof(unsigned char) * size); cudaCheckError(); cudaMallocManaged(&gray_data, sizeof(unsigned char) * width * height); cudaCheckError(); cudaMallocManaged(&padded_gray_data, sizeof(unsigned char) * padded_width * padded_height); cudaCheckError(); // copy the data to GPU cudaMemcpy(data, stbi_data, sizeof(unsigned char) * size, cudaMemcpyHostToDevice); cudaCheckError(); // free the stbi data stbi_image_free(stbi_data); } ImageGPU::~ImageGPU() { cudaFree(data); cudaFree(gray_data); cudaFree(padded_gray_data); } void ImageGPU::to_gray() { int nb_blocks_x = 50; int nb_blocks_y = 50; dim3 blocks_(nb_blocks_x, nb_blocks_y); dim3 threads_((height + nb_blocks_x) / nb_blocks_x, (width + nb_blocks_y) / nb_blocks_y); to_gray_gpu<<<blocks_, threads_>>>(gray_data, data, width, height, nb_channels); cudaCheckError(); cudaDeviceSynchronize(); cudaCheckError(); } void ImageGPU::padd_image() { int nb_blocks_x = 50; int nb_blocks_y = 50; dim3 blocks_(nb_blocks_x, nb_blocks_y); dim3 threads_((height + nb_blocks_x) / nb_blocks_x, (width + nb_blocks_y) / nb_blocks_y); padd_image_gpu<<<blocks_, threads_>>>(padded_gray_data, gray_data, width, height, padded_width, padded_height); cudaCheckError(); cudaDeviceSynchronize(); cudaCheckError(); } __global__ void compute_blocks_device(int window_size, unsigned char* blocks_device, unsigned char* padded_gray_data, int p_size, int nb_tiles_x, int padded_width, int padded_height, int patch_size) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if (x >= padded_width || y >= padded_height) return; int i = x + y * padded_width; int new_index = (i % patch_size) + p_size * ((i / patch_size) % nb_tiles_x) + patch_size * ((i / (nb_tiles_x * patch_size)) % patch_size) + p_size * nb_tiles_x * (i / (p_size * nb_tiles_x)); blocks_device[new_index] = padded_gray_data[i]; } BlocksGPU ImageGPU::to_blocks(int window_size) const { int nb_blocks = padded_width / patch_size * padded_height / patch_size; // allocation of blocks_device unsigned char* blocks_device; int size = nb_blocks * patch_size * patch_size; cudaMallocManaged(&blocks_device, sizeof(unsigned char) * size); cudaCheckError(); int p_size = patch_size * patch_size; int nb_tiles_x = padded_width / patch_size; dim3 threads_(patch_size, patch_size); dim3 blocks_(padded_width / patch_size, padded_height / patch_size); compute_blocks_device<<<blocks_, threads_>>>(window_size, blocks_device, padded_gray_data, p_size, nb_tiles_x, padded_width, padded_height, patch_size); return BlocksGPU(blocks_device, nb_blocks, patch_size, window_size); } void ImageGPU::save_gray_ppm(const char* path) const { std::ofstream ofs(path, std::ios_base::out | std::ios_base::binary); ofs << "P6" << std::endl << width << ' ' << height << std::endl << "255" << std::endl; for (int j = 0; j < height; ++j) for (int i = 0; i < width; ++i) ofs << (char) gray_data[j * width + i] << (char) gray_data[j * width + i] << (char) gray_data[j * width + i]; ofs.close(); } void ImageGPU::save_padded_gray_ppm(const char* path) const { std::ofstream ofs(path, std::ios_base::out | std::ios_base::binary); ofs << "P6" << std::endl << padded_width << ' ' << padded_height << std::endl << "255" << std::endl; for (int j = 0; j < padded_height; ++j) for (int i = 0; i < padded_width; ++i) ofs << (char) padded_gray_data[j * padded_width + i] << (char) padded_gray_data[j * padded_width + i] << (char) padded_gray_data[j * padded_width + i]; ofs.close(); } // ------------- // GPU functions // ------------- __global__ void to_gray_gpu(unsigned char* gray_data, unsigned char* data, int width, int height, int nb_channels) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < height && j < width) { // get r / g / b float r = (float) data[i * width * nb_channels + j * nb_channels]; float g = (float) data[i * width * nb_channels + j * nb_channels + 1]; float b = (float) data[i * width * nb_channels + j * nb_channels + 2]; // to gray float pixel_intensity = r * 0.2989 + g * 0.5870 + b * 0.1140; gray_data[i * width + j] = (unsigned char) pixel_intensity; } } __global__ void padd_image_gpu(unsigned char* padded_gray_data, unsigned char* gray_data, int width, int height, int padded_width, int padded_height) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < padded_height && j < padded_width) { if (i < height && j < width) { padded_gray_data[i * padded_width + j] = gray_data[i * width + j]; } else if (i < height && j >= width) { padded_gray_data[i * padded_width + j] = gray_data[i * width + width - 1]; } else if (i >= height && j < width) { padded_gray_data[i * padded_width + j] = gray_data[(height - 1) * width + j]; } else { padded_gray_data[i * padded_width + j] = gray_data[(height - 1) * width + width - 1]; } } }
f853d9296fc73867e5dcbdd5fe67b3aca0e52c3a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Author: Dongwei Shi //Created: 06/15/2016 //Description: this program is for template matching with cuda. The program is expected to template match several template simutaneously #include <stdio.h> #include <iostream> #include <math.h> #include <vector> #include <unistd.h> #include <string> #include <opencv2/opencv.hpp> #include <opencv2/nonfree/features2d.hpp> #include </usr/local/cuda-8.0/include/cuda.h> #include </usr/local/cuda-8.0/include/hipfft.h> #include </usr/local/cuda-8.0/include/hipfft.h> #define KERNEL_WIDTH 17 #define KERNEL_RADIUS (KERNEL_WIDTH/2) #define TILE_WIDTH (33-KERNEL_WIDTH) #define BLK_SIZE (TILE_WIDTH+KERNEL_WIDTH-1) #define TMP_NUM 48 #define ACCURATE_MODE KERNEL_WIDTH #define SPEED_MODE 1 #define RECORD 0 #define CROP_PARAM 2.2 using namespace std; using namespace cv; //global image and templates Mat img, gray_img, prev_img; Mat templs[TMP_NUM]; Mat img_vec[TMP_NUM]; Point kpt_vec[TMP_NUM]; Point ext_vec[TMP_NUM]; vector<Point2f > corners; int dis[TMP_NUM]; //deviceKernel for storing the templates __constant__ float deviceKernel[TMP_NUM*KERNEL_WIDTH*KERNEL_WIDTH]; /////////////////////////////////////////////////////////////////// /* conv2d * Description: This funtion is CUDA kernel. Where perform the 2D convolution of the images and templates. * Using CV_TM_CCOEFF_NORMED method for template matching. Simutaneously perform 2D convolution * on several images with specific templates. * Input: A -- the input data of images * x_size -- the image width * y_size -- the image height * template_num -- the total templates need to be matched. * Output: B -- the convolution results of the images. * * */ /////////////////////////////////////////////////////////////////// __global__ void conv2d(float* A, float* B, const int x_size, const int y_size, const int template_num) { //allocated shared memory for storing the image __shared__ float Nds[BLK_SIZE][BLK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bz = blockIdx.z; int x_out = bx*TILE_WIDTH + tx; int y_out = by*TILE_WIDTH + ty; int x_in = x_out - KERNEL_RADIUS; int y_in = y_out - KERNEL_RADIUS; float res = 0.0; float templ_res = 0.0; float img_res = 0.0; //copy the image to the shared memeory if((x_in>=0) && (x_in<x_size) && (y_in>=0) && (y_in<y_size) && (bz>=0) && (bz<template_num) ) { Nds[ty][tx] = A[bz*x_size*y_size + y_in*x_size + x_in]; } else { Nds[ty][tx] = 0.0; } __syncthreads(); //perform convolution below using CV_TM_CCOEFF_NORMED method for template matching if( (tx<TILE_WIDTH) && (ty<TILE_WIDTH) && (x_out<x_size) && (y_out<y_size) && (bz>=0) && (bz<template_num)) { res = 0.0; templ_res = 0.0; img_res = 0.0; for( int idx_y=0; idx_y<KERNEL_WIDTH; idx_y++ ) { for( int idx_x=0; idx_x<SPEED_MODE; idx_x++ ) { templ_res += pow(deviceKernel[bz*KERNEL_WIDTH*KERNEL_WIDTH+idx_y*KERNEL_WIDTH+idx_x],2); img_res += pow(Nds[ty+idx_y][tx+idx_x],2); res += Nds[ty+idx_y][tx+idx_x] * deviceKernel[bz*KERNEL_WIDTH*KERNEL_WIDTH+idx_y*KERNEL_WIDTH+idx_x]; } } //copy the result into the output data __syncthreads(); if((x_out<x_size) && (y_out<y_size) && (bz<template_num)) { B[bz*x_size*y_size + y_out*x_size + x_out] = res/sqrt(templ_res*img_res); } __syncthreads(); } } /////////////////////////////////////////////////////////////////// /* cuda_tp_img * Description: This function use for preparation step for the * cuda kernel. It is allocate several memory space * on both GPU and CPU. It also be used to select the * peak value of the convolution results * Input: templates number -- the total number of templates that need to * be matched. * Output: 0 -- success, -1 -- failure * * */ /////////////////////////////////////////////////////////////////// int cuda_tp_img(int template_num) { //get size of templates and images. int x_size = img_vec[0].cols; int y_size = img_vec[0].rows; int tmp_x_size = KERNEL_WIDTH;//templs[0].cols; int tmp_y_size = KERNEL_WIDTH;//templs[0].rows; int img_size = x_size * y_size; int tmpl_size = tmp_x_size * tmp_y_size; //allocate a space to store the image intensity float* host_img = (float*) malloc(sizeof(float)*img_size*template_num); float* host_templ = (float*) malloc(sizeof(float)*tmpl_size*template_num); float* gpu_out = (float*) malloc(sizeof(float)*img_size*template_num); float* device_img_input; float* device_img_output; //copy the intensity value from image for(int img_idx=0; img_idx<template_num; img_idx++) { for(int y=0; y<y_size; y++) { for(int x=0; x<x_size; x++) { Scalar intensity = img_vec[img_idx].at<uchar>(y,x); host_img[y*x_size+x + img_idx*img_size] = intensity.val[0]; } } } //copy the intensity value from templates for(int tmpl_idx=0; tmpl_idx<template_num; tmpl_idx++) { for(int y=0; y<tmp_y_size; y++) { for(int x=0; x<tmp_x_size; x++) { Scalar intensity = templs[tmpl_idx].at<uchar>(y,x); host_templ[y*tmp_x_size+x+tmpl_idx*tmpl_size] = intensity.val[0]; } } } //allocate memory in cuda global memory hipMalloc( (void**)&device_img_input, img_size*sizeof(float)*template_num ); hipMalloc( (void**)&device_img_output, img_size*sizeof(float)*template_num ); hipMemcpy( device_img_input, host_img, img_size*sizeof(float)*template_num, hipMemcpyHostToDevice); hipMemcpyToSymbol( deviceKernel, host_templ, tmpl_size*sizeof(float)*template_num); //assign blocks and threads dim3 Dimblock(BLK_SIZE, BLK_SIZE, 1); dim3 DimGrid(((TILE_WIDTH+x_size)-1/TILE_WIDTH), ((TILE_WIDTH+y_size)-1/TILE_WIDTH),template_num); //calling the convolution gpu function hipLaunchKernelGGL(( conv2d) , dim3(DimGrid), dim3(Dimblock) , 0, 0, device_img_input, device_img_output, x_size, y_size, template_num); hipDeviceSynchronize(); hipMemcpy( gpu_out, device_img_output, img_size*sizeof(float)*template_num, hipMemcpyDeviceToHost); //Selecting peak value of each image's convolution result and label out on the image. float res = 0; int y_pos; for(int idx=0; idx<template_num; idx++) { y_pos = 0; res = 0; for(int y=0; y<y_size; y++) { for(int x=0; x<x_size; x++) { if(gpu_out[idx*img_size+y*x_size+x]>res) { res = gpu_out[idx*img_size+y*x_size+x]; y_pos = y; } } } ext_vec[idx].x = kpt_vec[idx].x; ext_vec[idx].y = (img.rows/CROP_PARAM)+dis[idx]+y_pos; rectangle(img, Point(kpt_vec[idx].x-KERNEL_RADIUS,(img.rows/CROP_PARAM)+dis[idx]+y_pos-KERNEL_RADIUS), Point(kpt_vec[idx].x+KERNEL_RADIUS,(img.rows/CROP_PARAM)+dis[idx]+y_pos+KERNEL_RADIUS), Scalar(0,255,0 ), 1, 4); line(img,kpt_vec[idx],Point(kpt_vec[idx].x,(img.rows/CROP_PARAM)+dis[idx]+y_pos),Scalar(0,0,255),1,8,0); } //Free the allocated memory before hipFree(device_img_input); hipFree(device_img_output); free(host_img); free(host_templ); free(gpu_out); return 0; } ///////////////////////////////////////////////////////////////////////////////////// int main(int argc, char*argv[]) { //declear varible here int template_num; int start = 0; vector<Point2f > pred_vec; vector<Point2f > ref_pred_vec; Mat status; Mat ref_status; Mat err; Mat ref_err; //VideoWriter video("reflection_matching.avi", CV_FOURCC('M','J','P','G'), 10, Size(800, 600),true); char filename[256]; while(fscanf(stdin, "%s", filename)!=EOF) { template_num = TMP_NUM; img = imread(filename, -1); if(!img.data) { cout << "Problem loading image !!!" << endl; return -1; } //convert the image to gray scale in order to only have one pointer cvtColor(img, gray_img, CV_BGR2GRAY); //cropping the image Mat hf_img = gray_img(Rect(0,0,gray_img.cols,gray_img.rows/CROP_PARAM)); Mat mask; bool useHarrisDetector = false; goodFeaturesToTrack(hf_img, corners, TMP_NUM, 0.01, 20.0, mask, 3, useHarrisDetector, 0.04); //imshow("hf_img", hf_img); //waitKey(0); if(corners.size() == 0) { cout << "bad frame" << endl; continue; } Point kpt; for(int temp_generate_idx = 0; temp_generate_idx<template_num; temp_generate_idx++) { kpt = corners[temp_generate_idx]; //get the predict distance dis[temp_generate_idx] = gray_img.rows/CROP_PARAM-kpt.y; //boundary check for the images if( kpt.x < KERNEL_RADIUS) kpt.x = KERNEL_RADIUS; if( kpt.x > (img.cols-KERNEL_WIDTH) ) kpt.x = img.cols-KERNEL_WIDTH; if( kpt.y < KERNEL_RADIUS) kpt.y = KERNEL_RADIUS; if( kpt.y > ((img.rows/CROP_PARAM+dis[temp_generate_idx])-KERNEL_WIDTH) ) kpt.y = (img.rows/CROP_PARAM+dis[temp_generate_idx])-KERNEL_WIDTH; //label the original feature point of the image rectangle(img, Point(kpt.x-KERNEL_RADIUS,kpt.y-KERNEL_RADIUS), Point(kpt.x+KERNEL_RADIUS,kpt.y+KERNEL_RADIUS), Scalar(255,0,0 ), 1, 4); Mat curr_tmpl = hf_img(Rect(kpt.x-KERNEL_RADIUS,kpt.y-KERNEL_RADIUS,KERNEL_WIDTH,KERNEL_WIDTH)); //flip the template in order to find the reflections flip(curr_tmpl,templs[temp_generate_idx],0); /* imshow("img", img); waitKey(0); printf("%d:%d\n", temp_generate_idx,dis[temp_generate_idx]); */ //cropping the image img_vec[temp_generate_idx] = gray_img(Rect(kpt.x-KERNEL_RADIUS,gray_img.rows/CROP_PARAM+dis[temp_generate_idx],KERNEL_WIDTH,gray_img.rows-(gray_img.rows/CROP_PARAM+dis[temp_generate_idx]))); /* imshow("temp_img",img_vec[temp_generate_idx]); waitKey(0); */ kpt_vec[temp_generate_idx] = kpt; } cuda_tp_img(template_num); if( start == 0 ) { start = 1; prev_img = img; continue; } /////**optical flow track starts here**///// calcOpticalFlowPyrLK(prev_img, img, corners, pred_vec, status, err); //calcOpticalFlowPyrLK(prev_img, img, ref_corners, ref_pred_vec, ref_status, ref_err); prev_img = img; //video.write(img); //line(img, Point(0,img.rows/CROP_PARAM), Point(img.cols,img.rows/CROP_PARAM), Scalar(110,220,0)); imshow("img", img); waitKey(1); } }
f853d9296fc73867e5dcbdd5fe67b3aca0e52c3a.cu
//Author: Dongwei Shi //Created: 06/15/2016 //Description: this program is for template matching with cuda. The program is expected to template match several template simutaneously #include <stdio.h> #include <iostream> #include <math.h> #include <vector> #include <unistd.h> #include <string> #include <opencv2/opencv.hpp> #include <opencv2/nonfree/features2d.hpp> #include </usr/local/cuda-8.0/include/cuda.h> #include </usr/local/cuda-8.0/include/cufft.h> #include </usr/local/cuda-8.0/include/cufft.h> #define KERNEL_WIDTH 17 #define KERNEL_RADIUS (KERNEL_WIDTH/2) #define TILE_WIDTH (33-KERNEL_WIDTH) #define BLK_SIZE (TILE_WIDTH+KERNEL_WIDTH-1) #define TMP_NUM 48 #define ACCURATE_MODE KERNEL_WIDTH #define SPEED_MODE 1 #define RECORD 0 #define CROP_PARAM 2.2 using namespace std; using namespace cv; //global image and templates Mat img, gray_img, prev_img; Mat templs[TMP_NUM]; Mat img_vec[TMP_NUM]; Point kpt_vec[TMP_NUM]; Point ext_vec[TMP_NUM]; vector<Point2f > corners; int dis[TMP_NUM]; //deviceKernel for storing the templates __constant__ float deviceKernel[TMP_NUM*KERNEL_WIDTH*KERNEL_WIDTH]; /////////////////////////////////////////////////////////////////// /* conv2d * Description: This funtion is CUDA kernel. Where perform the 2D convolution of the images and templates. * Using CV_TM_CCOEFF_NORMED method for template matching. Simutaneously perform 2D convolution * on several images with specific templates. * Input: A -- the input data of images * x_size -- the image width * y_size -- the image height * template_num -- the total templates need to be matched. * Output: B -- the convolution results of the images. * * */ /////////////////////////////////////////////////////////////////// __global__ void conv2d(float* A, float* B, const int x_size, const int y_size, const int template_num) { //allocated shared memory for storing the image __shared__ float Nds[BLK_SIZE][BLK_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int bz = blockIdx.z; int x_out = bx*TILE_WIDTH + tx; int y_out = by*TILE_WIDTH + ty; int x_in = x_out - KERNEL_RADIUS; int y_in = y_out - KERNEL_RADIUS; float res = 0.0; float templ_res = 0.0; float img_res = 0.0; //copy the image to the shared memeory if((x_in>=0) && (x_in<x_size) && (y_in>=0) && (y_in<y_size) && (bz>=0) && (bz<template_num) ) { Nds[ty][tx] = A[bz*x_size*y_size + y_in*x_size + x_in]; } else { Nds[ty][tx] = 0.0; } __syncthreads(); //perform convolution below using CV_TM_CCOEFF_NORMED method for template matching if( (tx<TILE_WIDTH) && (ty<TILE_WIDTH) && (x_out<x_size) && (y_out<y_size) && (bz>=0) && (bz<template_num)) { res = 0.0; templ_res = 0.0; img_res = 0.0; for( int idx_y=0; idx_y<KERNEL_WIDTH; idx_y++ ) { for( int idx_x=0; idx_x<SPEED_MODE; idx_x++ ) { templ_res += pow(deviceKernel[bz*KERNEL_WIDTH*KERNEL_WIDTH+idx_y*KERNEL_WIDTH+idx_x],2); img_res += pow(Nds[ty+idx_y][tx+idx_x],2); res += Nds[ty+idx_y][tx+idx_x] * deviceKernel[bz*KERNEL_WIDTH*KERNEL_WIDTH+idx_y*KERNEL_WIDTH+idx_x]; } } //copy the result into the output data __syncthreads(); if((x_out<x_size) && (y_out<y_size) && (bz<template_num)) { B[bz*x_size*y_size + y_out*x_size + x_out] = res/sqrt(templ_res*img_res); } __syncthreads(); } } /////////////////////////////////////////////////////////////////// /* cuda_tp_img * Description: This function use for preparation step for the * cuda kernel. It is allocate several memory space * on both GPU and CPU. It also be used to select the * peak value of the convolution results * Input: templates number -- the total number of templates that need to * be matched. * Output: 0 -- success, -1 -- failure * * */ /////////////////////////////////////////////////////////////////// int cuda_tp_img(int template_num) { //get size of templates and images. int x_size = img_vec[0].cols; int y_size = img_vec[0].rows; int tmp_x_size = KERNEL_WIDTH;//templs[0].cols; int tmp_y_size = KERNEL_WIDTH;//templs[0].rows; int img_size = x_size * y_size; int tmpl_size = tmp_x_size * tmp_y_size; //allocate a space to store the image intensity float* host_img = (float*) malloc(sizeof(float)*img_size*template_num); float* host_templ = (float*) malloc(sizeof(float)*tmpl_size*template_num); float* gpu_out = (float*) malloc(sizeof(float)*img_size*template_num); float* device_img_input; float* device_img_output; //copy the intensity value from image for(int img_idx=0; img_idx<template_num; img_idx++) { for(int y=0; y<y_size; y++) { for(int x=0; x<x_size; x++) { Scalar intensity = img_vec[img_idx].at<uchar>(y,x); host_img[y*x_size+x + img_idx*img_size] = intensity.val[0]; } } } //copy the intensity value from templates for(int tmpl_idx=0; tmpl_idx<template_num; tmpl_idx++) { for(int y=0; y<tmp_y_size; y++) { for(int x=0; x<tmp_x_size; x++) { Scalar intensity = templs[tmpl_idx].at<uchar>(y,x); host_templ[y*tmp_x_size+x+tmpl_idx*tmpl_size] = intensity.val[0]; } } } //allocate memory in cuda global memory cudaMalloc( (void**)&device_img_input, img_size*sizeof(float)*template_num ); cudaMalloc( (void**)&device_img_output, img_size*sizeof(float)*template_num ); cudaMemcpy( device_img_input, host_img, img_size*sizeof(float)*template_num, cudaMemcpyHostToDevice); cudaMemcpyToSymbol( deviceKernel, host_templ, tmpl_size*sizeof(float)*template_num); //assign blocks and threads dim3 Dimblock(BLK_SIZE, BLK_SIZE, 1); dim3 DimGrid(((TILE_WIDTH+x_size)-1/TILE_WIDTH), ((TILE_WIDTH+y_size)-1/TILE_WIDTH),template_num); //calling the convolution gpu function conv2d <<< DimGrid, Dimblock >>>( device_img_input, device_img_output, x_size, y_size, template_num); cudaDeviceSynchronize(); cudaMemcpy( gpu_out, device_img_output, img_size*sizeof(float)*template_num, cudaMemcpyDeviceToHost); //Selecting peak value of each image's convolution result and label out on the image. float res = 0; int y_pos; for(int idx=0; idx<template_num; idx++) { y_pos = 0; res = 0; for(int y=0; y<y_size; y++) { for(int x=0; x<x_size; x++) { if(gpu_out[idx*img_size+y*x_size+x]>res) { res = gpu_out[idx*img_size+y*x_size+x]; y_pos = y; } } } ext_vec[idx].x = kpt_vec[idx].x; ext_vec[idx].y = (img.rows/CROP_PARAM)+dis[idx]+y_pos; rectangle(img, Point(kpt_vec[idx].x-KERNEL_RADIUS,(img.rows/CROP_PARAM)+dis[idx]+y_pos-KERNEL_RADIUS), Point(kpt_vec[idx].x+KERNEL_RADIUS,(img.rows/CROP_PARAM)+dis[idx]+y_pos+KERNEL_RADIUS), Scalar(0,255,0 ), 1, 4); line(img,kpt_vec[idx],Point(kpt_vec[idx].x,(img.rows/CROP_PARAM)+dis[idx]+y_pos),Scalar(0,0,255),1,8,0); } //Free the allocated memory before cudaFree(device_img_input); cudaFree(device_img_output); free(host_img); free(host_templ); free(gpu_out); return 0; } ///////////////////////////////////////////////////////////////////////////////////// int main(int argc, char*argv[]) { //declear varible here int template_num; int start = 0; vector<Point2f > pred_vec; vector<Point2f > ref_pred_vec; Mat status; Mat ref_status; Mat err; Mat ref_err; //VideoWriter video("reflection_matching.avi", CV_FOURCC('M','J','P','G'), 10, Size(800, 600),true); char filename[256]; while(fscanf(stdin, "%s", filename)!=EOF) { template_num = TMP_NUM; img = imread(filename, -1); if(!img.data) { cout << "Problem loading image !!!" << endl; return -1; } //convert the image to gray scale in order to only have one pointer cvtColor(img, gray_img, CV_BGR2GRAY); //cropping the image Mat hf_img = gray_img(Rect(0,0,gray_img.cols,gray_img.rows/CROP_PARAM)); Mat mask; bool useHarrisDetector = false; goodFeaturesToTrack(hf_img, corners, TMP_NUM, 0.01, 20.0, mask, 3, useHarrisDetector, 0.04); //imshow("hf_img", hf_img); //waitKey(0); if(corners.size() == 0) { cout << "bad frame" << endl; continue; } Point kpt; for(int temp_generate_idx = 0; temp_generate_idx<template_num; temp_generate_idx++) { kpt = corners[temp_generate_idx]; //get the predict distance dis[temp_generate_idx] = gray_img.rows/CROP_PARAM-kpt.y; //boundary check for the images if( kpt.x < KERNEL_RADIUS) kpt.x = KERNEL_RADIUS; if( kpt.x > (img.cols-KERNEL_WIDTH) ) kpt.x = img.cols-KERNEL_WIDTH; if( kpt.y < KERNEL_RADIUS) kpt.y = KERNEL_RADIUS; if( kpt.y > ((img.rows/CROP_PARAM+dis[temp_generate_idx])-KERNEL_WIDTH) ) kpt.y = (img.rows/CROP_PARAM+dis[temp_generate_idx])-KERNEL_WIDTH; //label the original feature point of the image rectangle(img, Point(kpt.x-KERNEL_RADIUS,kpt.y-KERNEL_RADIUS), Point(kpt.x+KERNEL_RADIUS,kpt.y+KERNEL_RADIUS), Scalar(255,0,0 ), 1, 4); Mat curr_tmpl = hf_img(Rect(kpt.x-KERNEL_RADIUS,kpt.y-KERNEL_RADIUS,KERNEL_WIDTH,KERNEL_WIDTH)); //flip the template in order to find the reflections flip(curr_tmpl,templs[temp_generate_idx],0); /* imshow("img", img); waitKey(0); printf("%d:%d\n", temp_generate_idx,dis[temp_generate_idx]); */ //cropping the image img_vec[temp_generate_idx] = gray_img(Rect(kpt.x-KERNEL_RADIUS,gray_img.rows/CROP_PARAM+dis[temp_generate_idx],KERNEL_WIDTH,gray_img.rows-(gray_img.rows/CROP_PARAM+dis[temp_generate_idx]))); /* imshow("temp_img",img_vec[temp_generate_idx]); waitKey(0); */ kpt_vec[temp_generate_idx] = kpt; } cuda_tp_img(template_num); if( start == 0 ) { start = 1; prev_img = img; continue; } /////**optical flow track starts here**///// calcOpticalFlowPyrLK(prev_img, img, corners, pred_vec, status, err); //calcOpticalFlowPyrLK(prev_img, img, ref_corners, ref_pred_vec, ref_status, ref_err); prev_img = img; //video.write(img); //line(img, Point(0,img.rows/CROP_PARAM), Point(img.cols,img.rows/CROP_PARAM), Scalar(110,220,0)); imshow("img", img); waitKey(1); } }
12537bbc9a88847782f650c93e95c7661f9003df.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "mex.h" __global__ void norm_elements(float* pIn, float* pOut, int N) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx < N) { pOut[idx] += pIn[idx]*pIn[idx]; } } void mexFunction(int outArraySize, mxArray *pOutArray[], int inArraySize, const mxArray *pInArray[]) { int i, j, m, n; //pointer to matlab input array and output array double *data1, *data2; //int rowMax, colMax; //double *pMatrix, *pX; //pointers of Host data with double precision float *data1f, *data2f; // pointers of GPU data float *data1f_gpu, *data2f_gpu; m = mxGetM(pInArray[0]); n = mxGetN(pInArray[0]); //--create array for output------------------------------------------- pOutArray[0] = mxCreateDoubleMatrix(m,n,mxREAL); //--create array data on the GPU----------------------------------------------------- hipMalloc((void **)&data1f_gpu,sizeof(float)*m*n); hipMalloc((void **)&data2f_gpu,sizeof(float)*m*n); data1 = mxGetPr(pInArray[0]); //------------------------------------------ hipMemcpy(data1f_gpu,data1, sizeof(float)*m*n, hipMemcpyHostToDevice); //---configure GPU thread------------------------------------------------ dim3 dimBlock(128); dim3 dimGrid(m*n/dimBlock.x); //if ((n*m) % 128 != 0) dimGrid.x+=1; //---call GPU function------------------------------------------------ //data1f_gpu:input data; data2f_gpu: output data hipLaunchKernelGGL(( norm_elements), dim3(dimGrid), dim3(dimBlock), 0, 0, data1f_gpu, data2f_gpu, n*m); //--allocate matlab double precision---------------------------- data2f = (float *)mxMalloc(sizeof(float)*m*n); //---copy result back to host------------------------------------------------ hipMemcpy(data2f,data2f_gpu,sizeof(float)*n*m, hipMemcpyDeviceToHost); data2 = mxGetPr(pOutArray[0]); float tmp; tmp = 0; for (i = 0; i < m*n; i++) { tmp += data2f[i]; } *data2 = sqrt((double )tmp); mxFree(data1f); mxFree(data2f); hipFree(data1f_gpu); hipFree(data2f_gpu); }
12537bbc9a88847782f650c93e95c7661f9003df.cu
#include "cuda.h" #include "mex.h" __global__ void norm_elements(float* pIn, float* pOut, int N) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx < N) { pOut[idx] += pIn[idx]*pIn[idx]; } } void mexFunction(int outArraySize, mxArray *pOutArray[], int inArraySize, const mxArray *pInArray[]) { int i, j, m, n; //pointer to matlab input array and output array double *data1, *data2; //int rowMax, colMax; //double *pMatrix, *pX; //pointers of Host data with double precision float *data1f, *data2f; // pointers of GPU data float *data1f_gpu, *data2f_gpu; m = mxGetM(pInArray[0]); n = mxGetN(pInArray[0]); //--create array for output------------------------------------------- pOutArray[0] = mxCreateDoubleMatrix(m,n,mxREAL); //--create array data on the GPU----------------------------------------------------- cudaMalloc((void **)&data1f_gpu,sizeof(float)*m*n); cudaMalloc((void **)&data2f_gpu,sizeof(float)*m*n); data1 = mxGetPr(pInArray[0]); //------------------------------------------ cudaMemcpy(data1f_gpu,data1, sizeof(float)*m*n, cudaMemcpyHostToDevice); //---configure GPU thread------------------------------------------------ dim3 dimBlock(128); dim3 dimGrid(m*n/dimBlock.x); //if ((n*m) % 128 != 0) dimGrid.x+=1; //---call GPU function------------------------------------------------ //data1f_gpu:input data; data2f_gpu: output data norm_elements<<<dimGrid, dimBlock>>>(data1f_gpu, data2f_gpu, n*m); //--allocate matlab double precision---------------------------- data2f = (float *)mxMalloc(sizeof(float)*m*n); //---copy result back to host------------------------------------------------ cudaMemcpy(data2f,data2f_gpu,sizeof(float)*n*m, cudaMemcpyDeviceToHost); data2 = mxGetPr(pOutArray[0]); float tmp; tmp = 0; for (i = 0; i < m*n; i++) { tmp += data2f[i]; } *data2 = sqrt((double )tmp); mxFree(data1f); mxFree(data2f); cudaFree(data1f_gpu); cudaFree(data2f_gpu); }
f936cb6ae8e6fb927270e54a736bfc1e0caccf91.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ float fitness_function(float x[]) { float y,yp; float res=0; float y1=1+(x[0]-1)/4; float yn=1+(x[NUM_OF_DIMENSIONS-1]-1)/4; res+=pow(sin(phi*y1),2)+pow(yn-1,2); for(int i=0;i<NUM_OF_DIMENSIONS-1;i++) { y=1+(x[i]-1)/4; yp=1+(x[i+1]-1)/4; res+=pow(y-1,2)*(1+10*pow(sin(phi*yp),2)); } return res; } __global__ void kernelUpdatePBest(float *positions,float *pBests,float *gBest) { int i=blockIdx.x*blockDim.x+threadIdx.x; if(i>=NUM_OF_PARTICLES*NUM_OF_DIMENSIONS||i%NUM_OF_DIMENSIONS!=0) return; float tempParticle1[NUM_OF_DIMENSIONS]; float tempParticle2[NUM_OF_DIMENSIONS]; for(int j=0;j<NUM_OF_DIMENSIONS;j++) { tempParticle1[j]=positions[i+j]; tempParticle2[j]=pBests[i+j]; } if(fitness_function(tempParticle1)<fitness_function(tempParticle2)) { for(int j=0;j<NUM_OF_DIMENSIONS;j++) pBests[i+j]=tempParticle1[j]; if(fitness_function(tempParticle1)<fitness_function(gBest)) { for(int j=0;j<NUM_OF_DIMENSIONS;j++) atomicExch(gBest+j,tempParticle1[j]); } } }
f936cb6ae8e6fb927270e54a736bfc1e0caccf91.cu
#include "includes.h" __device__ float fitness_function(float x[]) { float y,yp; float res=0; float y1=1+(x[0]-1)/4; float yn=1+(x[NUM_OF_DIMENSIONS-1]-1)/4; res+=pow(sin(phi*y1),2)+pow(yn-1,2); for(int i=0;i<NUM_OF_DIMENSIONS-1;i++) { y=1+(x[i]-1)/4; yp=1+(x[i+1]-1)/4; res+=pow(y-1,2)*(1+10*pow(sin(phi*yp),2)); } return res; } __global__ void kernelUpdatePBest(float *positions,float *pBests,float *gBest) { int i=blockIdx.x*blockDim.x+threadIdx.x; if(i>=NUM_OF_PARTICLES*NUM_OF_DIMENSIONS||i%NUM_OF_DIMENSIONS!=0) return; float tempParticle1[NUM_OF_DIMENSIONS]; float tempParticle2[NUM_OF_DIMENSIONS]; for(int j=0;j<NUM_OF_DIMENSIONS;j++) { tempParticle1[j]=positions[i+j]; tempParticle2[j]=pBests[i+j]; } if(fitness_function(tempParticle1)<fitness_function(tempParticle2)) { for(int j=0;j<NUM_OF_DIMENSIONS;j++) pBests[i+j]=tempParticle1[j]; if(fitness_function(tempParticle1)<fitness_function(gBest)) { for(int j=0;j<NUM_OF_DIMENSIONS;j++) atomicExch(gBest+j,tempParticle1[j]); } } }
1994654428c9404e7229b35a5c2a2dc2f2d8d8a2.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 8, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutSrc, int32_t, LayoutSrc, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, true>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
1994654428c9404e7229b35a5c2a2dc2f2d8d8a2.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< int8_t, 8, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutSrc, int32_t, LayoutSrc, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, true>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
f6175ede8afee88e7fdfb6eccc80d0eb802df3d0.hip
// !!! This is a file automatically generated by hipify!!! //Deprecated //all functions are moved to parallalShrinker #include "shrink_parallel.cuh" #include <hip/hip_runtime.h> #include <stdio.h> #include <cmath> #include <ctime> #include <device_launch_parameters.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/transform.h> #include <thrust/sequence.h> #include <thrust/copy.h> #include <thrust/fill.h> #include <thrust/replace.h> #include <thrust/functional.h> void checkResult(float* hostRef, float* gpuRef, const int N) { double epsilon = 1.0E-8; bool match = true; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = false; printf("Arrays do not match!\n"); printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i); break; } } if (match) printf("Arrays match.\n\n"); } template <int D> __device__ Cell<D> Comp(Cell<D>& ca, Cell<D>& cb) { for (int i = 2; i < D; ++i) { if (ca[i] != cb[i]) return cb; } if (ca[0] <= cb[0] && ca[1] <= cb[1]) return ca; return cb; } template <int D> __global__ void ProcessBo(int j_max, Cell<D>* bo_low, Cell<D>* bo_high) { const int j = threadIdx.x; if (j <= j_max) bo_high[j] = Comp(bo_low[2 * j], bo_low[2 * j + 1]); } template <int D> __global__ void ProcessBl(int j_max, Cell<D>* bl_low, Cell<D>* bl_high, Cell<D>* bo) { const int j = threadIdx.x; if (j <= j_max) { if (j == 0) { bl_low[j] = bo[j]; return; } if (j % 2 == 1) { bl_low[j] = bl_high[(j - 1) / 2]; return; } bl_low[j] = Comp(bl_high[j / 2 - 1], bo[j]); } } int process3(std::vector<Cell<3>>& cells, std::vector<KeyCell<3>>& key_cells) { const int l = log2(cells.size()); const auto dev = 0; hipSetDevice(dev); const int cell_num = cells.size(); const auto n_bytes = l * cell_num * sizeof(Cell<3>); Cell<3>* h_bo = static_cast<Cell<3>*>(malloc(n_bytes)); Cell<3>* h_bl = static_cast<Cell<3>*>(malloc(n_bytes)); for (int i = 0; i < cells.size(); ++i) { h_bo[i] = cells[i]; } Cell<3> *d_bo, *d_bl; hipMalloc(static_cast<Cell<3>**>(&d_bo), n_bytes); hipMalloc(static_cast<Cell<3>**>(&d_bl), n_bytes); hipMemcpy(d_bo, h_bo, n_bytes, hipMemcpyHostToDevice); hipMemcpy(d_bl, h_bl, n_bytes, hipMemcpyHostToDevice); dim3 block(32 > cell_num ? cell_num : 32); dim3 grid((cell_num + block.x - 1) / block.x); for (int i = 1; i <= l; ++i) { std::cout << "pow result:" << pow(2, l - i) << std::endl; hipLaunchKernelGGL(( ProcessBo) , dim3(grid), dim3(block), 0, 0, int(pow(2, l - i)), d_bo + (i - 1) * cell_num, d_bo + i * cell_num); hipDeviceSynchronize(); } for (int i = l; i >= 0; i--) { std::cout << "pow result bl:" << pow(2, l - i) << std::endl; hipLaunchKernelGGL(( ProcessBl), dim3(grid), dim3(block), 0, 0, pow(2, l - i), d_bl + i * cell_num, d_bl + (i + 1) * cell_num, d_bo + i * cell_num); hipDeviceSynchronize(); } // sumArraysOnGPU <<< grid, block >>>(d_Bo, d_Bl); printf("Execution configuration <<<%d, %d>>>\n", grid.x, block.x); // hipMemcpy(gpu_ref, d_c, n_bytes, hipMemcpyDeviceToHost); hipFree(d_bo); hipFree(d_bl); free(h_bo); free(h_bl); std::cout << "this is parallel" << std::endl; hipDeviceReset(); thrust::host_vector<int> H(4); // initialize individual elements H[0] = 14; H[1] = 20; H[2] = 38; H[3] = 46; thrust::device_vector<int> D = H; int sum = reduce(D.begin(), D.end(), static_cast<int>(0), thrust::plus<int>()); std::cout << "sum:" << sum << std::endl; std::cout << D.size() << ", " << D[0] << std::endl; thrust::copy(D.begin(), D.end(), std::ostream_iterator<int>(std::cout, "\n")); return 0; }
f6175ede8afee88e7fdfb6eccc80d0eb802df3d0.cu
//Deprecated //all functions are moved to parallalShrinker #include "shrink_parallel.cuh" #include <cuda_runtime.h> #include <stdio.h> #include <cmath> #include <ctime> #include <device_launch_parameters.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/transform.h> #include <thrust/sequence.h> #include <thrust/copy.h> #include <thrust/fill.h> #include <thrust/replace.h> #include <thrust/functional.h> void checkResult(float* hostRef, float* gpuRef, const int N) { double epsilon = 1.0E-8; bool match = true; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = false; printf("Arrays do not match!\n"); printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i); break; } } if (match) printf("Arrays match.\n\n"); } template <int D> __device__ Cell<D> Comp(Cell<D>& ca, Cell<D>& cb) { for (int i = 2; i < D; ++i) { if (ca[i] != cb[i]) return cb; } if (ca[0] <= cb[0] && ca[1] <= cb[1]) return ca; return cb; } template <int D> __global__ void ProcessBo(int j_max, Cell<D>* bo_low, Cell<D>* bo_high) { const int j = threadIdx.x; if (j <= j_max) bo_high[j] = Comp(bo_low[2 * j], bo_low[2 * j + 1]); } template <int D> __global__ void ProcessBl(int j_max, Cell<D>* bl_low, Cell<D>* bl_high, Cell<D>* bo) { const int j = threadIdx.x; if (j <= j_max) { if (j == 0) { bl_low[j] = bo[j]; return; } if (j % 2 == 1) { bl_low[j] = bl_high[(j - 1) / 2]; return; } bl_low[j] = Comp(bl_high[j / 2 - 1], bo[j]); } } int process3(std::vector<Cell<3>>& cells, std::vector<KeyCell<3>>& key_cells) { const int l = log2(cells.size()); const auto dev = 0; cudaSetDevice(dev); const int cell_num = cells.size(); const auto n_bytes = l * cell_num * sizeof(Cell<3>); Cell<3>* h_bo = static_cast<Cell<3>*>(malloc(n_bytes)); Cell<3>* h_bl = static_cast<Cell<3>*>(malloc(n_bytes)); for (int i = 0; i < cells.size(); ++i) { h_bo[i] = cells[i]; } Cell<3> *d_bo, *d_bl; cudaMalloc(static_cast<Cell<3>**>(&d_bo), n_bytes); cudaMalloc(static_cast<Cell<3>**>(&d_bl), n_bytes); cudaMemcpy(d_bo, h_bo, n_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_bl, h_bl, n_bytes, cudaMemcpyHostToDevice); dim3 block(32 > cell_num ? cell_num : 32); dim3 grid((cell_num + block.x - 1) / block.x); for (int i = 1; i <= l; ++i) { std::cout << "pow result:" << pow(2, l - i) << std::endl; ProcessBo <<<grid, block>>>(int(pow(2, l - i)), d_bo + (i - 1) * cell_num, d_bo + i * cell_num); cudaDeviceSynchronize(); } for (int i = l; i >= 0; i--) { std::cout << "pow result bl:" << pow(2, l - i) << std::endl; ProcessBl<<<grid, block>>>(pow(2, l - i), d_bl + i * cell_num, d_bl + (i + 1) * cell_num, d_bo + i * cell_num); cudaDeviceSynchronize(); } // sumArraysOnGPU <<< grid, block >>>(d_Bo, d_Bl); printf("Execution configuration <<<%d, %d>>>\n", grid.x, block.x); // cudaMemcpy(gpu_ref, d_c, n_bytes, cudaMemcpyDeviceToHost); cudaFree(d_bo); cudaFree(d_bl); free(h_bo); free(h_bl); std::cout << "this is parallel" << std::endl; cudaDeviceReset(); thrust::host_vector<int> H(4); // initialize individual elements H[0] = 14; H[1] = 20; H[2] = 38; H[3] = 46; thrust::device_vector<int> D = H; int sum = reduce(D.begin(), D.end(), static_cast<int>(0), thrust::plus<int>()); std::cout << "sum:" << sum << std::endl; std::cout << D.size() << ", " << D[0] << std::endl; thrust::copy(D.begin(), D.end(), std::ostream_iterator<int>(std::cout, "\n")); return 0; }
673da1d246675bb5f845f77feada91484bd0c35a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "sop.h" __device__ __constant__ FLOAT sigma_rep_mat[3][3] = { {0.0, 0.0, 0.0}, {0.0, 3.8, 5.4}, {0.0, 5.4, 7.0} }; int main(int argc, char* argv[]) { if (argc < 2) { cerr << "Usage: " << argv[0] << " < input_file >" << endl; exit(-1); } time_t tm0 = time(0); // wall time at this point cout << "CURRENT TIME IS: " << ctime(&tm0); if (getcwd(pathname, MAXPATHLEN) == NULL) { cerr << "PROBLEM GETTING PATH" << endl; } else { cout << "CURRENT WORKING DIRECTORY: " << pathname << endl; } //Allocates certain arrays and initializes some variables alloc_arrays(); //Read input file read_input(argv[1]); //Clock ticks at this point clock_t ck0 = clock(); //Perform commands (simulation) ex_cmds(); // time stats time_t tm1 = time(0); clock_t ck1 = clock(); cout << "+-------------------+" << endl; cout << "| Simulation Stats: |" << endl; cout << "+-------------------+" << endl; cout << "Wall Time : " << difftime(tm1, tm0) << " sec" << endl; cout << "Total Computation Time : " << float(ck1 - ck0) / CLOCKS_PER_SEC << " sec" << endl; cout << "Computation Rate : " << float(ck1 - ck0) / CLOCKS_PER_SEC / nstep << " sec / timestep" << endl; cout << "CURRENT TIME IS : " << ctime(&tm1); return 0; } //Execute the commands specified by the input file. This will include reading //in the necessary values, running the simulation, etc. void ex_cmds() { for (int i = 1; i <= ncmd; i++) { //Read data if (!strcmp(cmd[i], "load")) { load(i); } //Set parameters else if (!strcmp(cmd[i], "set")) { set_params(i); } //Run simulation else if (!strcmp(cmd[i], "run")) { simulation_ctrl(); } //TODO: Figure out what to do here or if it should just skip. else { }; } }//end ex_cmds() //Run the simulation. Will transfer control over to either underdamped_ctrl() //or overdamped_ctrl() void simulation_ctrl() { switch (sim_type) { case 1: underdamped_ctrl(); break; case 2: overdamped_ctrl(); break; default: cerr << "UNRECOGNIZED SIM_TYPE!" << endl; exit(-1); } }//end simulation_ctrl() //Run the underdamped simulation void underdamped_ctrl() { char oline[2048]; FLOAT istep = 1.0; int iup = 1; int inlup = 1; ofstream out(ufname, ios::out | ios::app); static int first_time = 1; //TODO: Check if this is necessary when everything is done on the GPU FLOAT3* incr = new FLOAT3[nbead]; //If this is the start of a new simulation, zero the velocity and force arrays if ((!restart) && first_time) { // zero out the velocities and forces for (int i = 0; i < nbead; i++) { vel[i].x = 0.0; vel[i].y = 0.0; vel[i].z = 0.0; force[i].x = 0.0; force[i].y = 0.0; force[i].z = 0.0; }//end for }//end if //The vel and force GPU arrays will be zeroed because of the previous section //of code. If it is removed, the vel and force arrays will need to be //zeroed when the simulation is not starting from a restart state alloc_GPU_arrays(); alloc_cudpp(); print_sim_params(); #ifdef USE_CURAND //NOTE: CURAND setup does not currently support restarting setup_rng(1234, 0); #endif //If using the neighbor list, update the neighbor and pair lists if (neighborlist == 1) { update_neighbor_list(); update_pair_list(); } //Cell list is not yet implemented else if (celllist == 1) { cout << "Cell list not implemented" << endl; exit(-1); // update_cell_list(); update_pair_list(); }//end else if celllist == 1 //Set the energy terms to be evaluated set_potential(); // set_forces(); //The forces to be used are now hard-coded to allow streams //This can be modified when different combinations are used //If restarting, load the old coordinates and velocities and set istep to the //correct value if (restart) { load_coords(cfname, unccfname); load_vels(vfname); istep = istep_restart + 1.0; }//end if //If the RNG should be restarted, do so. //TODO: Implement this for the GPU-based RNG if (rgen_restart) { generator.restart(); }//end if //If this is the first time the simulation has been run, evaluate the energy //and forces if (first_time) { //If it is the first time, the data in the host arrays will be up to date, //so no data will need to be transfered from the device energy_eval(); force_eval(); }//end if // ??? if (binsave) { // ??? if ((first_time) && (!rgen_restart)) { record_traj(binfname, uncbinfname); } //Iterate through the time steps while (istep <= nstep) { //Compute pair separation list if ((inlup % nnlup) == 0) { if (neighborlist == 1) { update_neighbor_list(); } else if (celllist == 1) { cout << "Cell list not implemented" << endl; exit(-1); //update_cell_list(); }//end if //Output progress every 100,000 steps if (!((int) istep % 100000)) fprintf(stdout, "(%.0lf) neighbor list: (%d/%d)\n", istep, nnl_att, nnl_rep); inlup = 0; }//end if inlup % nnlup == 0 inlup++; if (neighborlist == 1 || celllist == 1) { update_pair_list(); // fprintf(stdout, "(%.0lf) pair list: (%d/%d)\n", istep, nil_att, // nil_rep); }//end if underdamped_iteration(); //Evaluate the energy of the structure and output all relevant data //every nup time steps if (!(iup % nup)) { // updates //Copy all of the data that will be needed for energy evaluation and //logging from the device to the host. One more transfer to update //the increment array will take place in the calculate_observables() //function if sim_type is 2. cutilSafeCall(hipMemcpy(pos, dev_pos, pos_size, hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(unc_pos, dev_unc_pos, unc_pos_size, hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(idx_pair_list_att, dev_idx_pair_list_att, nil_att * sizeof (ushort2), hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(idx_pair_list_rep, dev_idx_pair_list_rep, nil_rep * sizeof (ushort2), hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(pl_lj_nat_pdb_dist, dev_pl_lj_nat_pdb_dist, nil_att * sizeof (PDB_FLOAT), hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(vel, dev_vel, vel_size, hipMemcpyDeviceToHost)); energy_eval(); calculate_observables(incr); sprintf(oline, "%.0lf %f %f %f %f %f %f %f %f %f %d %f", istep, T, kinT, e_bnd, e_ang_ss, e_vdw_rr_att, e_vdw_rr_rep, e_vdw_rr, rna_etot, Q, contct_nat, rgsq); out << oline << endl; iup = 0; record_traj(binfname, uncbinfname); save_coords(cfname, unccfname); save_vels(vfname); generator.save_state(); } istep += 1.0; iup++; } out.close(); } if (first_time) first_time = 0; delete [] incr; return; }//end underdamped_ctrl() //TODO: Parallelize this. Currently will not work! void overdamped_ctrl() { using namespace std; char oline[2048]; FLOAT istep = 1.0; int iup = 1; ofstream out(ufname, ios::out | ios::app); static int first_time = 1; FLOAT3* incr = new FLOAT3[nbead]; //If this is the start of a simulation, zero the velocity and force arrays if ((!restart) && first_time) { // zero out the velocities and forces for (int i = 0; i < nbead; i++) { vel[i].x = 0.0; vel[i].y = 0.0; vel[i].z = 0.0; force[i].x = 0.0; force[i].y = 0.0; force[i].z = 0.0; }//end for }//end if print_sim_params(); if (neighborlist == 1) { update_neighbor_list(); update_pair_list(); } else if (celllist == 1) { cout << "Cell list not yet implemented." << endl; exit(-1); // update_cell_list(); update_pair_list(); } set_potential(); // set_forces(); if (restart) { load_coords(cfname, unccfname); // load_vels(vfname); istep = istep_restart + 1.0; } if (rgen_restart) { generator.restart(); } if (first_time) { energy_eval(); force_eval(); } if (binsave) { if ((first_time) && (!rgen_restart)) { record_traj(binfname, uncbinfname); } while (istep <= nstep) { // compute pair separation list if ((inlup % nnlup) == 0) { if (neighborlist == 1) { update_neighbor_list(); } else if (celllist == 1) { cout << "Cell list not yet implemented" << endl; exit(-1); update_cell_list(); } // fprintf(stderr, "(%.0lf) neighbor list: (%d/%d)\n", istep, nnl_att, nnl_rep); inlup = 0; } inlup++; if (neighborlist == 1 || celllist == 1) { update_pair_list(); // fprintf(stderr, "(%.0lf) pair list: (%d/%d)\n", istep, nil_att, nil_rep); } overdamped_iteration(incr); if (!(iup % nup)) { // updates energy_eval(); calculate_observables(incr); sprintf(oline, "%.0lf %f %f %f %f %f %f %f %d %f", istep, T, kinT, e_bnd, e_ang_ss, e_vdw_rr, rna_etot, Q, contct_nat, rgsq); out << oline << endl; iup = 0; record_traj(binfname, uncbinfname); save_coords(cfname, unccfname); save_vels(vfname); generator.save_state(); } istep += 1.0; iup++; } out.close(); } if (first_time) first_time = 0; delete [] incr; return; }//end overdamped_ctrl() //Kernel to perform the necessary calculations for each iteration when using //an underdamped simulation __global__ void underdamped_iteration_kernel(FLOAT3 *dev_incr, FLOAT3 *dev_vel, float3 *dev_force, FLOAT3 *dev_pos, FLOAT3 *dev_unc_pos, int nbead, FLOAT a1, FLOAT a2, FLOAT boxl) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < nbead) { // compute position increments dev_incr[i].x = a1 * dev_vel[i].x + a2 * dev_force[i].x; dev_incr[i].y = a1 * dev_vel[i].y + a2 * dev_force[i].y; dev_incr[i].z = a1 * dev_vel[i].z + a2 * dev_force[i].z; // update bead positions dev_pos[i].x += dev_incr[i].x; dev_pos[i].y += dev_incr[i].y; dev_pos[i].z += dev_incr[i].z; dev_pos[i].x -= boxl * rintf(dev_pos[i].x / boxl); dev_pos[i].y -= boxl * rintf(dev_pos[i].y / boxl); dev_pos[i].z -= boxl * rintf(dev_pos[i].z / boxl); dev_unc_pos[i].x += dev_incr[i].x; dev_unc_pos[i].y += dev_incr[i].y; dev_unc_pos[i].z += dev_incr[i].z; }//end if i < nbead }//end underdamped_iteration_kernel //Kernel to update the velocities of the beads __global__ void update_velocities_kernel(FLOAT3 * dev_vel, FLOAT3 *dev_incr, float3 *dev_force, int nbead, FLOAT a3, FLOAT a4) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < nbead) { // compute velocity increments dev_vel[i].x = a3 * dev_incr[i].x + a4 * dev_force[i].x; dev_vel[i].y = a3 * dev_incr[i].y + a4 * dev_force[i].y; dev_vel[i].z = a3 * dev_incr[i].z + a4 * dev_force[i].z; }//end if i < nbead }//end update_velocities_kernel //Perform the necessary calculations for the underdamped iteration //TODO: Inline this in underdamped_ctrl() ? void underdamped_iteration() { static const FLOAT eps = 1.0e-5; dim3 threads(BLOCK_DIM, 1, 1); dim3 grid((int) ceil((nbead + 1.0) / (float) threads.x), 1, 1); hipLaunchKernelGGL(( underdamped_iteration_kernel) , dim3(grid), dim3(threads), 0, 0, dev_incr, dev_vel, dev_force, dev_pos, dev_unc_pos, nbead, a1, a2, boxl); // force_update force_eval(); if (T < eps) return; // don't update velocities for steepest descent // update_velocities hipLaunchKernelGGL(( update_velocities_kernel) , dim3(grid), dim3(threads), 0, 0, dev_vel, dev_incr, dev_force, nbead, a3, a4); }//end underdamped_iteration //TODO: Parallelize. Currently will not work! void overdamped_iteration(FLOAT3* incr) { using namespace std; for (int i = 0; i < nbead; i++) { // compute position increments incr[i].x = a5 * force[i].x; incr[i].y = a5 * force[i].y; incr[i].z = a5 * force[i].z; // update bead positions unc_pos[i].x += incr[i].x; unc_pos[i].y += incr[i].y; unc_pos[i].z += incr[i].z; pos[i].x += incr[i].x; pos[i].y += incr[i].y; pos[i].z += incr[i].z; pos[i].x -= boxl * rnd(pos[i].x / boxl); pos[i].y -= boxl * rnd(pos[i].y / boxl); pos[i].z -= boxl * rnd(pos[i].z / boxl); } // force_update force_eval(); } //Arrays that are referenced in this function are copied from the device to the //host in the underdamped_ctrl function (and will be done in the overdamped_ctrl //function once it is implemented) *EXCEPT* increment, which is only needed if //sim_type == 2. If this is the case, it will be copied to the host in this //function //TODO: Parallelize? void calculate_observables(FLOAT3* increment) { using namespace std; FLOAT dx, dy, dz, d; FLOAT sumvsq; int ibead, jbead; PDB_FLOAT r_ij; // chi, contct_nat, contct_tot, Q contct_nat = 0; for (int i = 0; i < ncon_att; i++) { //idx_bead_lj_nat is static. It never is updated/changed during a simulation ibead = GET_IDX(idx_bead_lj_nat[i].x) - 1; jbead = GET_IDX(idx_bead_lj_nat[i].y) - 1; r_ij = lj_nat_pdb_dist[i]; dx = unc_pos[ibead].x - unc_pos[jbead].x; dy = unc_pos[ibead].y - unc_pos[jbead].y; dz = unc_pos[ibead].z - unc_pos[jbead].z; dx -= boxl * rnd(dx / boxl); dy -= boxl * rnd(dy / boxl); dz -= boxl * rnd(dz / boxl); d = sqrt(dx * dx + dy * dy + dz * dz); if (d / r_ij < 1.25) { contct_nat++; }//end if d / r_ij < 1.25 }//end for Q = FLOAT(contct_nat) / ncon_att; // rgsq rgsq = 0.0; for (int i = 0; i < nbead - 1; i++) { for (int j = i + 1; j < nbead; j++) { dx = unc_pos[i].x - unc_pos[j].x; dy = unc_pos[i].y - unc_pos[j].y; dz = unc_pos[i].z - unc_pos[j].z; dx -= boxl * rnd(dx / boxl); dy -= boxl * rnd(dy / boxl); dz -= boxl * rnd(dz / boxl); rgsq += (dx * dx + dy * dy + dz * dz); }//end for j }//end for i rgsq /= FLOAT(nbead * nbead); // kinT if (sim_type == 1) { sumvsq = 0.0; for (int i = 0; i < nbead; i++) { sumvsq += vel[i].x * vel[i].x + vel[i].y * vel[i].y + vel[i].z * vel[i].z; }//end for i kinT = sumvsq / (3.0 * FLOAT(nbead)); }//end fi sim_type == 1 else if (sim_type == 2) { cutilSafeCall(hipMemcpy(increment, dev_incr, incr_size, hipMemcpyDeviceToHost)); sumvsq = 0.0; for (int i = 0; i < nbead; i++) { sumvsq += increment[i].x * increment[i].x + increment[i].y * increment[i].y + increment[i].z * increment[i].z; }//end for i sumvsq *= zeta / (2.0 * h); kinT = sumvsq / (3.0 * FLOAT(nbead)); }//end if sim_type == 2 else { } }//end calculate_observables //Output the parameters for this simulation void print_sim_params() { using namespace std; char oline[2048]; cout << endl; sprintf(oline, "+------------------------+"); cout << oline << endl; sprintf(oline, "| Simulation Parameters: |"); cout << oline << endl; sprintf(oline, "+------------------------+"); cout << oline << endl; if (sim_type == 1) { sprintf(oline, "Simulation Type : %s", "Underdamped"); cout << oline << endl; } else if (sim_type == 2) { sprintf(oline, "Simulation Type : %s", "Overdamped"); cout << oline << endl; } else { cerr << "UNRECOGNIZED SIMULATION TYPE!" << endl; exit(-1); } sprintf(oline, "Simulation Temperature : %.3f", T); cout << oline << endl; sprintf(oline, "Start Time Step : %.0lf", istep_restart); cout << oline << endl; sprintf(oline, "Final Time Step : %.0lf", nstep); cout << oline << endl; sprintf(oline, "Output Frequency : %d", nup); cout << oline << endl; sprintf(oline, "Friction Coefficient : %.0e", zeta); cout << oline << endl; sprintf(oline, "PBC Box Length : %.1f", boxl); cout << oline << endl; if (neighborlist == 1) { sprintf(oline, "Long-range Cutoff Type : %s", "Neighbor List"); cout << oline << endl; sprintf(oline, "Neighbor List Update Frequency : %d", nnlup); cout << oline << endl; } else if (celllist == 1) { sprintf(oline, "Long-range Cutoff Type : %s", "Cell List"); cout << oline << endl; sprintf(oline, "Cell List Update Frequency : %d", nnlup); cout << oline << endl; sprintf(oline, "Number of Cells Each Dimension : %.0lf", ncell); cout << oline << endl; } else { sprintf(oline, "Long-range Cutoff Type : %s", "None"); cout << oline << endl; } cout << endl; }//end print_sim_params //Kernel to determine which of the interactions should be added to the //attractive neighbor list. Each attractive interaction will be iterated //through and its corresponding entry in dev_is_neighbor_list_att to 0 if it //should be included in the neighbor list and 1 if it is not. //NOTE: The number 0 indicates that the interaction SHOULD be in the list and //the number 1 indicates that the interaction should NOT be list. This is //necessary because of the default way that CUDPP sorts data. __global__ void update_neighbor_list_att_kernel( unsigned int *dev_is_neighbor_list_att, FLOAT boxl, int ncon_att, PDB_FLOAT *dev_lj_nat_pdb_dist, FLOAT3 *dev_unc_pos, ushort2 *dev_idx_bead_lj_nat) { ushort2 idx_bead_lj_nat; FLOAT3 d; FLOAT d2; unsigned int ibead, jbead; PDB_FLOAT lj_nat_pdb_dist; FLOAT rcut, rcut2; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < ncon_att) { idx_bead_lj_nat = dev_idx_bead_lj_nat[i]; lj_nat_pdb_dist = dev_lj_nat_pdb_dist[i]; ibead = GET_IDX(idx_bead_lj_nat.x); jbead = GET_IDX(idx_bead_lj_nat.y); FLOAT3 ipos = dev_unc_pos[ibead - 1]; FLOAT3 jpos = dev_unc_pos[jbead - 1]; d.x = jpos.x - ipos.x; d.y = jpos.y - ipos.y; d.z = jpos.z - ipos.z; //If using doubles, use double-precision rounding. Else use single- //precision rounding. #ifdef SOP_FP_DOUBLE d.x -= boxl * rint(d.x / boxl); d.y -= boxl * rint(d.y / boxl); d.z -= boxl * rint(d.z / boxl); #else d.x -= boxl * rintf(d.x / boxl); d.y -= boxl * rintf(d.y / boxl); d.z -= boxl * rintf(d.z / boxl); #endif d2 = d.x * d.x + d.y * d.y + d.z * d.z; rcut = 3.2 * lj_nat_pdb_dist; rcut2 = rcut*rcut; if (d2 < rcut2) { dev_is_neighbor_list_att[i] = 0; //include else ... = 1? May cut down on memory allocation time before each call }//end if d2 else { dev_is_neighbor_list_att[i] = 1; } }//end if i }//end update_neighbor_list_att_kernel //Kernel to determine which of the interactions should be added to the //repulsive neighbor list. Each repulsive interaction will be iterated //through and its corresponding entry in dev_is_neighbor_list_rep to 0 if it //should be included in the neighbor list and 1 if it is not. //NOTE: The number 0 indicates that the interaction SHOULD be in the list and //the number 1 indicates that the interaction should NOT be list. This is //necessary because of the default way that CUDPP sorts data. __global__ void update_neighbor_list_rep_kernel( unsigned int *dev_is_neighbor_list_rep, FLOAT boxl, int xsize, int ysize, int ncon_rep, FLOAT3 *dev_unc_pos, ushort2 *dev_idx_bead_lj_non_nat) { ushort2 idx_bead_lj_non_nat; FLOAT3 d; FLOAT d2; unsigned int ibead, jbead, itype, jtype; FLOAT rcut, rcut2; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int j = blockIdx.y * blockDim.y + threadIdx.y; //TODO: Clean the nested if's up if (i <= xsize && j <= ysize) { unsigned int idx = j * xsize + i; if (idx < ncon_rep) { idx_bead_lj_non_nat = dev_idx_bead_lj_non_nat[idx]; ibead = GET_IDX(idx_bead_lj_non_nat.x) - 1; jbead = GET_IDX(idx_bead_lj_non_nat.y) - 1; itype = GET_TYPE(idx_bead_lj_non_nat.x); jtype = GET_TYPE(idx_bead_lj_non_nat.y); FLOAT3 ipos = dev_unc_pos[ibead]; FLOAT3 jpos = dev_unc_pos[jbead]; d.x = jpos.x - ipos.x; d.y = jpos.y - ipos.y; d.z = jpos.z - ipos.z; //If using doubles, use double-precision rounding. Else, use single- //precision rounding. #ifdef SOP_FP_DOUBLE d.x -= boxl * rint(d.x / boxl); d.y -= boxl * rint(d.y / boxl); d.z -= boxl * rint(d.z / boxl); #else d.x -= boxl * rintf(d.x / boxl); d.y -= boxl * rintf(d.y / boxl); d.z -= boxl * rintf(d.z / boxl); #endif d2 = d.x * d.x + d.y * d.y + d.z * d.z; rcut = 3.2 * sigma_rep_mat[itype][jtype]; rcut2 = rcut*rcut; if (d2 < rcut2) { dev_is_neighbor_list_rep[idx] = 0; }//end if d2 else dev_is_neighbor_list_rep[idx] = 1; }//end if idx }//end if i }//end update_neighbor_list_rep_kernel //Kernel to determine which of the interactions should be added to the //attractive pair list. Each attractive interaction will be iterated //through and its corresponding entry in dev_is_pair_list_att to 0 if it //should be included in the pair list and 1 if it is not. //NOTE: The number 0 indicates that the interaction SHOULD be in the list and //the number 1 indicates that the interaction should NOT be list. This is //necessary because of the default way that CUDPP sorts data. __global__ void update_pair_list_att_kernel(unsigned int *dev_is_pair_list_att, FLOAT boxl, int nnl_att, PDB_FLOAT *dev_nl_lj_nat_pdb_dist, FLOAT3 *dev_unc_pos, ushort2 *dev_idx_neighbor_list_att) { FLOAT3 d; FLOAT d2; unsigned int ibead, jbead; FLOAT rcut, rcut2; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < nnl_att) { ibead = GET_IDX(dev_idx_neighbor_list_att[i].x) - 1; jbead = GET_IDX(dev_idx_neighbor_list_att[i].y) - 1; d.x = dev_unc_pos[jbead].x - dev_unc_pos[ibead].x; d.y = dev_unc_pos[jbead].y - dev_unc_pos[ibead].y; d.z = dev_unc_pos[jbead].z - dev_unc_pos[ibead].z; d.x -= boxl * rintf(d.x / boxl); d.y -= boxl * rintf(d.y / boxl); d.z -= boxl * rintf(d.z / boxl); d2 = d.x * d.x + d.y * d.y + d.z * d.z; rcut = 2.5 * dev_nl_lj_nat_pdb_dist[i]; rcut2 = rcut*rcut; if (d2 < rcut2) { dev_is_pair_list_att[i] = 0; }//end if d2 < rcut2 else { dev_is_pair_list_att[i] = 1; } }//end if i ... }//end update_pair_list_att_kernel //Kernel to determine which of the interactions should be added to the //repulsive pair list. Each repulsive interaction will be iterated //through and its corresponding entry in dev_is_pair_list_rep to 0 if it //should be included in the pair list and 1 if it is not. //NOTE: The number 0 indicates that the interaction SHOULD be in the list and //the number 1 indicates that the interaction should NOT be list. This is //necessary because of the default way that CUDPP sorts data. __global__ void update_pair_list_rep_kernel(unsigned int *dev_is_pair_list_rep, FLOAT boxl, int nnl_rep, FLOAT3 *dev_unc_pos, ushort2 *dev_idx_neighbor_list_rep) { FLOAT dx, dy, dz; FLOAT d2; unsigned int ibead, jbead, itype, jtype; FLOAT rcut, rcut2; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < nnl_rep) { ibead = GET_IDX(dev_idx_neighbor_list_rep[i].x) - 1; jbead = GET_IDX(dev_idx_neighbor_list_rep[i].y) - 1; itype = GET_TYPE(dev_idx_neighbor_list_rep[i].x); jtype = GET_TYPE(dev_idx_neighbor_list_rep[i].y); dx = dev_unc_pos[jbead].x - dev_unc_pos[ibead].x; dy = dev_unc_pos[jbead].y - dev_unc_pos[ibead].y; dz = dev_unc_pos[jbead].z - dev_unc_pos[ibead].z; dx -= (boxl) * rintf(dx / boxl); dy -= (boxl) * rintf(dy / boxl); dz -= (boxl) * rintf(dz / boxl); d2 = dx * dx + dy * dy + dz*dz; rcut = 2.5 * sigma_rep_mat[itype][jtype]; rcut2 = rcut*rcut; if (d2 < rcut2) { dev_is_pair_list_rep[i] = 0; } else dev_is_pair_list_rep[i] = 1; }//end if i... }// end update_pair_list_rep_kernel //If USE_GPU_NL_PL is defined, use the GPU for all of the neighbor list //calculations #ifdef USE_GPU_NL_PL //Update the neighbor list. This function involves using kernels to denote //which interactions should be added to the neighbor list and then uses CUDPP //sort and scan functionality to transfer the interactions to the neighbor list. void update_neighbor_list() { nnl_att = 0; nnl_rep = 0; //<editor-fold defaultstate="collapsed" desc="Execution parameters"> // setup execution parameters dim3 threads_att(BLOCK_DIM, 1, 1); dim3 grid_att((int) ceil(((float) ncon_att) / (float) threads_att.x), 1, 1); int blocksx, blocksy, gridsx, gridsy; if (ncon_rep / BLOCK_DIM <= GRID_DIM) { blocksx = BLOCK_DIM; blocksy = 1; gridsx = (int) ceil(((float) ncon_rep) / (float) BLOCK_DIM); gridsy = 1; } else if (ncon_rep / BLOCK_DIM > GRID_DIM) { blocksx = 32; blocksy = 16; gridsx = (int) ceil(sqrt(ncon_rep) / blocksx + 1.0); gridsy = (int) ceil(sqrt(ncon_rep) / blocksy + 1.0); } dim3 threads_rep(blocksx, blocksy, 1); dim3 grid_rep(gridsx, gridsy, 1); //</editor-fold> //<editor-fold defaultstate="collapsed" desc="Update kernels"> //Call the kernels that determine which interactions should be added to the //attractive and repulsive neighbor lists. The entries of the //dev_idx_bead_lj_nat represent the attractive interactions and correspond to //the entries of the dev_is_list_att array which will denote whether or not a //given interaction should be added to the neighbor list. Similarly, the //dev_idx_bead_lj_non_nat array represents the repulsive interactions and //correspond to the entries of the dev_is_list_rep array which will denote //whether or not a given interaction should be added to the neighbor list hipLaunchKernelGGL(( update_neighbor_list_att_kernel) , dim3(grid_att), dim3(threads_att) , 0, 0, dev_is_list_att, boxl, ncon_att, dev_lj_nat_pdb_dist, dev_unc_pos, dev_idx_bead_lj_nat); hipLaunchKernelGGL(( update_neighbor_list_rep_kernel) , dim3(grid_rep), dim3(threads_rep) , 0, 0, dev_is_list_rep, boxl, blocksx*gridsx, blocksy*gridsy, ncon_rep, dev_unc_pos, dev_idx_bead_lj_non_nat); hipDeviceSynchronize(); cutilCheckMsg("update_neighbor_list_rep_kernel failed"); //</editor-fold> //<editor-fold defaultstate="collapsed" desc="CUDPP Att code"> //The following code uses CUDPP to create the neighbor list for the attractive //interactions and calculate how many attractive entries there are in //the neighbor list. //Obtain a copy of dev_is_list_att for use with pdb. This is //necessary because both the pdb array and the idx array must be sorted in //the same manner. When the is_list_att array is sorted the first time, //the order is lost. Obtaining a copy allows the pdb array to be sorted //in an identical way to the idx array, insuring that the corresponding //values are in identical positions in the arrays. cutilSafeCall(hipMemcpy(dev_is_nl_2, dev_is_list_att, is_list_att_size, hipMemcpyDeviceToDevice)); //Copy the default values of idx_bead_lj_nat to idx_neighbor_list_att. The //idx_bead_lj_nat array must be kept in its initial order and the //idx_neighbor_list array must be identical to the idx_bead_lj_nat array //before the sort and scan algorithm is used. cutilSafeCall(hipMemcpy(dev_idx_neighbor_list_att, dev_idx_bead_lj_nat, idx_bead_lj_nat_size, hipMemcpyDeviceToDevice)); //Sort the idx_neighbor_list_att array based on the information in //the is_list_att array. The entries that are in the neighbor list //will be in the first portion of the array and those that are not will be //in the last portion result = cudppSort(sort_plan, dev_is_list_att, dev_idx_neighbor_list_att, 1, ncon_att); if (CUDPP_SUCCESS != result) { printf("Error calling cppSort(sort_plan_att) 1\n"); exit(-1); } err = hipGetLastError(); cutilSafeCall(err); //Copy the default values of lj_nat_pdb_dist to nl_lj_nat_pdb_dist. The //jl_nat_pdb_dist array must be kept in its initial order and the //nl_lj_nat_pdb_dist array must be identical to the lj_nat_pdb_dist array //before the sort and scan algorithm is used. cutilSafeCall(hipMemcpy(dev_nl_lj_nat_pdb_dist, dev_lj_nat_pdb_dist, lj_nat_pdb_dist_size, hipMemcpyDeviceToDevice)); //Sort the lj_nat_pdb_dist array based on the information in the copy //of is_list_att array. The entries corresponding to the interactions in the //pair list will be in the first portion of the array and those that are not //will be in the last portion result = cudppSort(sort_plan, dev_is_nl_2, dev_nl_lj_nat_pdb_dist, 1, ncon_att); if (CUDPP_SUCCESS != result) { printf("Error calling cppSort(sort_plan_att) 2\n"); exit(-1); } err = hipGetLastError(); cutilSafeCall(err); //Perform the parallel scan of the is_list_att array, counting the number //of 1's that appear. This number corresponds to the number of interactions //that are NOT in the neighbor list. The is_list_att array will be untouched //and the result of the scan will be stored in dev_is_nl_scan_att result = cudppScan(scan_plan, dev_is_nl_scan_att, dev_is_list_att, ncon_att); if (CUDPP_SUCCESS != result) { printf("Error scanning att\n"); exit(-1); } err = hipGetLastError(); cutilSafeCall(err); //Temporary storage for the result of the scan unsigned int *num; num = new unsigned int[1]; //Copy the last entry of dev_is_nl_scan_att, corresponding to the total sum //of 1's in is_list_att to the host variable "num" cutilSafeCall(hipMemcpy(num, &(dev_is_nl_scan_att[ncon_att - 1]), sizeof (unsigned int), hipMemcpyDeviceToHost)); err = hipGetLastError(); cutilSafeCall(err); //The total number of attractive entries in the neighbor list is equal to //the total number of attractive interactions (ncon_att) minus the number //of attractive entries NOT in the neighbor list (num) nnl_att = ncon_att - *num; //</editor-fold> //<editor-fold defaultstate="collapsed" desc="CUDPP Rep code"> //The following code uses CUDPP to create the neighbor list for the repulsive //interactions and calculate how many repulsive entries there are in //the neighbor list. //The CUDPP algorithms fail with arrays larger than about 32 million entries. //As a workaround, if the number of entries is greater than 32 million, the //array can be partitioned into two arrays and each array sorted and scanned //individually and recombined afterwards //If there are less than 32 million entries, no partitioning is necessary if (ncon_rep <= NCON_REP_CUTOFF) { //Copy the default values of idx_bead_lj_non_nat to idx_neighbor_list_rep. //The idx_bead_lj_non_nat array must be kept in its initial order and the //idx_neighbor_list array must be identical to the idx_bead_lj_non_nat array //before the sort and scan algorithm is used. cutilSafeCall(hipMemcpy(dev_idx_neighbor_list_rep, dev_idx_bead_lj_non_nat, idx_bead_lj_non_nat_size, hipMemcpyDeviceToDevice)); err = hipGetLastError(); cutilSafeCall(err); //Sort the idx_neighbor_list_rep array based on the information in //the is_list_rep array. The entries that are in the neighbor list //will be in the first portion of the array and those that are not will be //in the last portion result = cudppSort(sort_plan, dev_is_list_rep, dev_idx_neighbor_list_rep, 1, ncon_rep); if (CUDPP_SUCCESS != result) { printf("Error calling cppSort(sort_plan_rep) 1\n"); exit(-1); } err = hipGetLastError(); cutilSafeCall(err); //Perform the parallel scan of the is_list_rep array, counting the number //of 1's that appear. This number corresponds to the number of interactions //that are NOT in the neighbor list. The is_list_rep array will be //untouched and the result of the scan will be stored in dev_is_nl_scan_rep result = cudppScan(scan_plan, dev_is_nl_scan_rep, dev_is_list_rep, ncon_rep); if (CUDPP_SUCCESS != result) { printf("Error scanning rep\n"); exit(-1); } hipDeviceSynchronize(); err = hipGetLastError(); cutilSafeCall(err); //Copy the last entry of dev_is_nl_scan_rep, corresponding to the total sum //of 1's in is_list_rep to the host variable "num" cutilSafeCall(hipMemcpy(num, &(dev_is_nl_scan_rep[ncon_rep - 1]), sizeof (unsigned int), hipMemcpyDeviceToHost)); //The total number of repulsive entries in the neighbor list is equal to //the total number of repulsive interactions (ncon_rep) minus the number //of repulsive entries NOT in the neighbor list (num) nnl_rep = ncon_rep - *num; //The temporary variable num is no longer needed, so it can be freed. free(num); }//end if //If there are over 32 million entries, the first 32 million entries will be //sorted as usual, then the remaining entries will be sorted in separate //arrays. The entries that are members of the neighbor list are then //copied back to the original list. The result is that the repulsive //neighbor list ends up sorted exactly as it would be if CUDPP could handle //arrays larger than 32 million entries. else { //Copy first NCON_REP_CUTOFF elements to idx_nl_rep. cutilSafeCall(hipMemcpy(dev_idx_neighbor_list_rep, dev_idx_bead_lj_non_nat, sizeof (ushort2) * NCON_REP_CUTOFF, hipMemcpyDeviceToDevice)); //Calculate the number or entries that will be in the temporary array. This //is simply the total number of repulsive interactions (ncon_rep) minus the //cutoff value (currently 32 million) int numTmp = ncon_rep - NCON_REP_CUTOFF; //Create temporary arrays //idx_rep_temp will hold the entries at and above the 32 millionth index //in the original idx list ushort2* idx_rep_tmp; cutilSafeCall(hipMalloc((void**) &idx_rep_tmp, sizeof (ushort2) * numTmp)); //is_nl_rep_tmp will hold the entries at and above the 32 millionth index //in the original is_list unsigned int* is_nl_rep_tmp; cutilSafeCall(hipMalloc((void**) &is_nl_rep_tmp, sizeof (unsigned int) * numTmp)); //Copy last ncon_rep - NCON_REP_CUTOFF elements to temporary arrays cutilSafeCall(hipMemcpy(idx_rep_tmp, &(dev_idx_bead_lj_non_nat[NCON_REP_CUTOFF]), sizeof (ushort2) * numTmp, hipMemcpyDeviceToDevice)); cutilSafeCall(hipMemcpy(is_nl_rep_tmp, &(dev_is_list_rep[NCON_REP_CUTOFF]), sizeof (unsigned int) * numTmp, hipMemcpyDeviceToDevice)); //Sort first NCON_REP_CUTOFF elements of original array err = hipGetLastError(); cutilSafeCall(err); result = cudppSort(sort_plan, dev_is_list_rep, dev_idx_neighbor_list_rep, 1, NCON_REP_CUTOFF); if (CUDPP_SUCCESS != result) { printf("Error calling cppSort(sort_plan_rep) 1\n"); exit(-1); } err = hipGetLastError(); cutilSafeCall(err); //Scan first NCON_REP_CUTOFF elements to determine how many entries would be //in is_nl_rep result = cudppScan(scan_plan, dev_is_nl_scan_rep, dev_is_list_rep, NCON_REP_CUTOFF); if (CUDPP_SUCCESS != result) { printf("Error scanning rep\n"); exit(-1); } err = hipGetLastError(); cutilSafeCall(err); //Copy the 32million - 1st entry of dev_is_nl_scan_rep to the host. This //corresponds to the number of 1's in the array, or the number of entries //that are NOT in the pair list cutilSafeCall(hipMemcpy(num, &(dev_is_nl_scan_rep[NCON_REP_CUTOFF - 1]), sizeof (unsigned int), hipMemcpyDeviceToHost)); err = hipGetLastError(); cutilSafeCall(err); //The number of entries in the neighbor list (to be stored in num) is equal //to the total number of values sorted (NCON_REP_CUTOFF) minus the number //of entries NOT in the neighbor list (num) *num = NCON_REP_CUTOFF - *num; //Sort elements of temp array result = cudppSort(sort_plan, is_nl_rep_tmp, idx_rep_tmp, 1, numTmp); if (CUDPP_SUCCESS != result) { printf("Error calling cppSort(sort_plan_rep) 1\n"); exit(-1); } err = hipGetLastError(); cutilSafeCall(err); //Scan elements of temp array to determine how many will be copied back to //the original array result = cudppScan(scan_plan, dev_is_nl_scan_rep, is_nl_rep_tmp, numTmp); if (CUDPP_SUCCESS != result) { printf("Error scanning rep\n"); exit(-1); } err = hipGetLastError(); cutilSafeCall(err); //num2 is a temporary variable to store the number of entries in the //temporary array that are NOT in the neighbor list unsigned int* num2; num2 = new unsigned int[1]; //Copy the last entry in dev_is_nl_scan_rep, corresponding to the number //of entires in the temporary array that are NOT in the neighbor list, to //the host //std::cout << "numTmp: " << numTmp << std::endl; cutilSafeCall(hipMemcpy(num2, &(dev_is_nl_scan_rep[numTmp - 1]), sizeof (unsigned int), hipMemcpyDeviceToHost)); //The number of entries in the neighbor list (to be stored in num2) that are //in the temporary array is equal to the total number of values sorted //in the temporary array (numTmp) minus the number of entries NOT in the //neighbor list (num2) *num2 = numTmp - *num2; //Copy num_is_temp valid entries to original array starting at the num'th //entry cutilSafeCall(hipMemcpy(&(dev_idx_neighbor_list_rep[(*num)]), idx_rep_tmp, sizeof (ushort2) * (*num2), hipMemcpyDeviceToDevice)); //The total number of entries in the repulsive neighbor list (nnl_rep) is //equal to the number of entries in the original list (num) plus the number //of entries in the temporary list (num2) nnl_rep = *num + *num2; //Free temp arrays free(num); free(num2); cutilSafeCall(hipFree(idx_rep_tmp)); cutilSafeCall(hipFree(is_nl_rep_tmp)); } //</editor-fold> if (nnl_rep == 0) { cerr << "Neighbor List is EMPTY!!" << endl; exit(-1); } }//end update_neighbor_list //If the GPU is not to be used for all neighbor list calculations, //USE_GPU_NL_PL_NAIVE can be defined to use the "naive" GPU approach or nothing //can be defined to use a CPU-only neighbor list calculation #else #ifdef USE_GPU_NL_PL_NAIVE //Update the neighbor list WITHOUT using CUDPP. This uses parallel kernels to //determine which interactions should be added to the neighbor list and then //adds them to the neighbor list sequentially on the CPU. This is included for //timing and comparison purposes only. void update_neighbor_list() { using namespace std; nnl_att = 0; nnl_rep = 0; //<editor-fold defaultstate="collapsed" desc="Execution parameters"> // setup execution parameters dim3 threads_att(BLOCK_DIM, 1, 1); dim3 grid_att((int) ceil(((float) ncon_att) / (float) threads_att.x), 1, 1); int blocksx, blocksy, gridsx, gridsy; if (ncon_rep / BLOCK_DIM <= GRID_DIM) { blocksx = BLOCK_DIM; blocksy = 1; gridsx = (int) ceil(((float) ncon_rep) / (float) BLOCK_DIM); gridsy = 1; } else if (ncon_rep / BLOCK_DIM > GRID_DIM) { blocksx = 32; blocksy = 16; gridsx = (int) ceil(sqrt(ncon_rep) / blocksx + 1.0); gridsy = (int) ceil(sqrt(ncon_rep) / blocksy + 1.0); } dim3 threads_rep(blocksx, blocksy, 1); dim3 grid_rep(gridsx, gridsy, 1); //</editor-fold> //<editor-fold defaultstate="collapsed" desc="Update kernels"> hipLaunchKernelGGL(( update_neighbor_list_att_kernel) , dim3(grid_att), dim3(threads_att) , 0, 0, dev_is_list_att, boxl, ncon_att, dev_lj_nat_pdb_dist, dev_unc_pos, dev_idx_bead_lj_nat); hipLaunchKernelGGL(( update_neighbor_list_rep_kernel) , dim3(grid_rep), dim3(threads_rep) , 0, 0, dev_is_list_rep, boxl, blocksx*gridsx, blocksy*gridsy, ncon_rep, dev_unc_pos, dev_idx_bead_lj_non_nat); hipDeviceSynchronize(); cutilCheckMsg("update_neighbor_list_rep_kernel failed"); //</editor-fold> //Copy needed arrays to the host cutilSafeCall(hipMemcpy(is_list_att, dev_is_list_att, is_list_att_size, hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(is_list_rep, dev_is_list_rep, is_list_rep_size, hipMemcpyDeviceToHost)); // should be native distance for (int i=0; i<ncon_att; i++) { if (is_list_att[i] == 0) { // add to interaction neighbor list idx_neighbor_list_att[nnl_att] = idx_bead_lj_nat[i]; nl_lj_nat_pdb_dist[nnl_att] = lj_nat_pdb_dist[i]; nnl_att++; } } for (int i=0; i<ncon_rep; i++) { if (is_list_rep[i] == 0) { // add to interaction neighbor list idx_neighbor_list_rep[nnl_rep] = idx_bead_lj_non_nat[i]; nnl_rep++; } } //Copy updated values back to the GPU cutilSafeCall(hipMemcpy(dev_idx_neighbor_list_att, idx_neighbor_list_att, /*idx_neighbor_list_att_size**/ nnl_att * sizeof(ushort2), hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(dev_nl_lj_nat_pdb_dist, nl_lj_nat_pdb_dist, /*nl_lj_nat_pdb_dist_size**/ nnl_att * sizeof(PDB_FLOAT), hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(dev_idx_neighbor_list_rep, idx_neighbor_list_rep, /*idx_neighbor_list_rep_size**/ nnl_rep * sizeof(ushort2), hipMemcpyHostToDevice)); if (nnl_rep == 0) { cerr << "Neighbor List is EMPTY!!" << endl; exit(-1); } } #else //Update the neighbor list using ONLY the CPU. This is included for timing //and comparison purposes only. void update_neighbor_list() { FLOAT dx, dy, dz; FLOAT d2; int ibead, jbead, itype, jtype; FLOAT rcut, rcut2; nnl_att = 0; nnl_rep = 0; //Copy the needed data to the CPU from the GPU. The unc_pos array will need //to be copied, but the other arrays that are read from in NL calculations //are static/global arrays cutilSafeCall(hipMemcpy(unc_pos, dev_unc_pos, unc_pos_size, hipMemcpyDeviceToHost)); for (int i=0; i<ncon_att; i++) { ibead = GET_IDX(idx_bead_lj_nat[i].x) - 1; jbead = GET_IDX(idx_bead_lj_nat[i].y) - 1; itype = GET_TYPE(idx_bead_lj_nat[i].x); jtype = GET_TYPE(idx_bead_lj_nat[i].y); dx = unc_pos[jbead].x - unc_pos[ibead].x; dy = unc_pos[jbead].y - unc_pos[ibead].y; dz = unc_pos[jbead].z - unc_pos[ibead].z; dx -= boxl*rnd(dx/boxl); dy -= boxl*rnd(dy/boxl); dz -= boxl*rnd(dz/boxl); d2 = dx*dx+dy*dy+dz*dz; rcut = 3.2*lj_nat_pdb_dist[i]; rcut2 = rcut*rcut; if (d2 < rcut2) { // add to neighbor list // ibead_neighbor_list_att[nnl_att] = ibead + 1; // jbead_neighbor_list_att[nnl_att] = jbead + 1; // itype_neighbor_list_att[nnl_att] = itype; // jtype_neighbor_list_att[nnl_att] = jtype; idx_neighbor_list_att[nnl_att] = idx_bead_lj_nat[i]; nl_lj_nat_pdb_dist[nnl_att] = lj_nat_pdb_dist[i]; // nl_lj_nat_pdb_dist2[nnl_att] = lj_nat_pdb_dist2[i]; // nl_lj_nat_pdb_dist6[nnl_att] = lj_nat_pdb_dist6[i]; // nl_lj_nat_pdb_dist12[nnl_att] = lj_nat_pdb_dist12[i]; nnl_att++; } } for (int i=0; i<ncon_rep; i++) { ibead = GET_IDX(idx_bead_lj_non_nat[i].x) - 1; jbead = GET_IDX(idx_bead_lj_non_nat[i].y) - 1; itype = GET_TYPE(idx_bead_lj_non_nat[i].x); jtype = GET_TYPE(idx_bead_lj_non_nat[i].y); dx = unc_pos[jbead].x - unc_pos[ibead].x; dy = unc_pos[jbead].y - unc_pos[ibead].y; dz = unc_pos[jbead].z - unc_pos[ibead].z; dx -= boxl*rnd(dx/boxl); dy -= boxl*rnd(dy/boxl); dz -= boxl*rnd(dz/boxl); d2 = dx*dx+dy*dy+dz*dz; rcut = 3.2*sigma_rep[itype][jtype]; rcut2 = rcut*rcut; if (d2 < rcut2) { // add to neighbor list // ibead_neighbor_list_rep[nnl_rep] = ibead + 1; // jbead_neighbor_list_rep[nnl_rep] = jbead + 1; // itype_neighbor_list_rep[nnl_rep] = itype; // jtype_neighbor_list_rep[nnl_rep] = jtype; idx_neighbor_list_rep[nnl_rep] = idx_bead_lj_non_nat[i]; nnl_rep++; } } //Write the modified arrays back to the GPU cutilSafeCall(hipMemcpy(dev_idx_neighbor_list_att, idx_neighbor_list_att, /*idx_neighbor_list_att_size**/ nnl_att * sizeof(ushort2), hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(dev_nl_lj_nat_pdb_dist, nl_lj_nat_pdb_dist, /*nl_lj_nat_pdb_dist_size**/ nnl_att * sizeof(PDB_FLOAT), hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(dev_idx_neighbor_list_rep, idx_neighbor_list_rep, /*idx_neighbor_list_rep_size**/ nnl_rep * sizeof(ushort2), hipMemcpyHostToDevice)); } #endif #endif #ifdef USE_GPU_NL_PL //Update the pair list. This function involves using kernels to denote //which interactions should be added to the pair list and then uses CUDPP //sort and scan functionality to transfer the interactions to the pair list. void update_pair_list() { nil_att = 0; nil_rep = 0; //<editor-fold defaultstate="collapsed" desc="Execution params"> // setup execution parameters dim3 threads_att(BLOCK_DIM, 1, 1); dim3 grid_att((int) ceil(((float) nnl_att) / (float) threads_att.x), 1, 1); dim3 threads_rep(BLOCK_DIM, 1, 1); dim3 grid_rep((int) ceil(((float) nnl_rep) / (float) threads_rep.x), 1, 1); //</editor-fold> //<editor-fold defaultstate="collapsed" desc="Kernels"> //Call the kernels that determine which interactions should be added to the //attractive and repulsive neighbor lists. The entries of the //dev_idx_neighbor_list_att represent the attractive interactions in the //neigbhor list andcorrespond to the entries of the dev_is_list_att array //which will denote whether or not a given interaction should be added to the //pair list. Similarly, the dev_idx_neighbor_list_rep array represents the //repulsive interactions in the neighbor list and correspond to the entries of //the dev_is_list_rep array which will denote whether or not a given //interaction should be added to the pair list hipLaunchKernelGGL(( update_pair_list_att_kernel) , dim3(grid_att), dim3(threads_att) , 0, 0, dev_is_list_att, boxl, nnl_att, dev_nl_lj_nat_pdb_dist, dev_unc_pos, dev_idx_neighbor_list_att); hipLaunchKernelGGL(( update_pair_list_rep_kernel) , dim3(grid_rep), dim3(threads_rep), 0, 0, dev_is_list_rep, boxl, nnl_rep, dev_unc_pos, dev_idx_neighbor_list_rep); hipDeviceSynchronize(); cutilCheckMsg("Kernel execution failed"); //</editor-fold> //<editor-fold defaultstate="collapsed" desc="CUDPP Att code"> //The following code uses CUDPP to create the pair list for the attractive //interactions and calculate how many attractive entries there are in //the pair list. //Obtain a copy of dev_is_list_att for use with pdb. This is //necessary because both the pdb array and the idx array must be sorted in //the same manner. When the is_list_att array is sorted the first time, //the order is lost. Obtaining a copy allows the pdb array to be sorted //in an identical way to the idx array, insuring that the corresponding //values are in identical positions in the arrays. cutilSafeCall(hipMemcpy(dev_is_nl_2, dev_is_list_att, is_list_att_size, hipMemcpyDeviceToDevice)); //Re-use the space allocated for the neighbor list for the pair list. The //entries of the neighbor list will still be in the first nnl_att entries //and the entries of the pair list will be in the first nil_att entries. dev_idx_pair_list_att = dev_idx_neighbor_list_att; //Sort the idx_pair_list_att array based on the information in //the is_list_att array. The entries that are in the pair list //will be in the first portion of the array and those that are not will be //in the last portion result = cudppSort(sort_plan, dev_is_list_att, dev_idx_pair_list_att, 1, nnl_att); if (CUDPP_SUCCESS != result) { printf("Error calling cppSort(sort_plan_att) 1\n"); exit(-1); } err = hipGetLastError(); cutilSafeCall(err); //Re-use the space allocated for dev_nl_lj_nat_pdb_dist for the //dev_pl_lj_nat_pdb_dist array. The entries of the neighbor list will still //be in the first nnl_att entries and the entries of the pair list will be in //the first nil_att entries. dev_pl_lj_nat_pdb_dist = dev_nl_lj_nat_pdb_dist; //Sort the dev_pl_lj_nat_pdb_dist array based on the information in the copy //of is_list_att array. The entries corresponding to the interactions in the //pair list will be in the first portion of the array and those that are not //will be in the last portion result = cudppSort(sort_plan, dev_is_nl_2, dev_pl_lj_nat_pdb_dist, 1, nnl_att); if (CUDPP_SUCCESS != result) { printf("Error calling cppSort(sort_plan_att) 2\n"); exit(-1); } err = hipGetLastError(); cutilSafeCall(err); //Perform the parallel scan of the is_list_att array, counting the number //of 1's that appear. This number corresponds to the number of interactions //that are NOT in the neighbor list. The is_list_att array will be untouched //and the result of the scan will be stored in dev_is_nl_scan_att result = cudppScan(scan_plan, dev_is_nl_scan_att, dev_is_list_att, nnl_att); if (CUDPP_SUCCESS != result) { printf("Error scanning att\n"); exit(-1); } err = hipGetLastError(); cutilSafeCall(err); //Temporary storage for the result of the scan unsigned int *num; num = new unsigned int[1]; //Copy the last entry of dev_is_nl_scan_att, corresponding to the total sum //of 1's in is_list_att to the host variable "num" cutilSafeCall(hipMemcpy(num, &(dev_is_nl_scan_att[nnl_att - 1]), sizeof (unsigned int), hipMemcpyDeviceToHost)); err = hipGetLastError(); cutilSafeCall(err); //The total number of attractive entries in the neighbor list is equal to //the total number of attractive interactions (ncon_att) minus the number //of attractive entries NOT in the neighbor list (num) nil_att = nnl_att - *num; //</editor-fold> //<editor-fold defaultstate="collapsed" desc="CUDPP Rep code"> //The following code uses CUDPP to create the pair list for the repulsive //interactions and calculate how many repulsive entries there are in //the pair list. //Reuse the neighbor list array for the pair list dev_idx_pair_list_rep = dev_idx_neighbor_list_rep; //Sort the idx_pair_list_rep array based on the information in //the is_list_rep array. The entries that are in the pair list //will be in the first portion of the array and those that are not will be //in the last portion result = cudppSort(sort_plan, dev_is_list_rep, dev_idx_pair_list_rep, 1, nnl_rep); if (CUDPP_SUCCESS != result) { printf("Error calling cppSort(sort_plan_rep) 1\n"); exit(-1); } err = hipGetLastError(); cutilSafeCall(err); //Perform the parallel scan of the is_list_rep array, counting the number //of 1's that appear. This number corresponds to the number of interactions //that are NOT in the pair list. The is_list_rep array will be //untouched and the result of the scan will be stored in dev_is_nl_scan_rep result = cudppScan(scan_plan, dev_is_nl_scan_rep, dev_is_list_rep, nnl_rep); err = hipGetLastError(); cutilSafeCall(err); if (CUDPP_SUCCESS != result) { printf("Error scanning rep\n"); exit(-1); } //Copy the last entry of dev_is_nl_scan_rep, corresponding to the total sum //of 1's in is_list_rep to the host variable "num" cutilSafeCall(hipMemcpy(num, &(dev_is_nl_scan_rep[nnl_rep - 1]), sizeof (unsigned int), hipMemcpyDeviceToHost)); //The total number of repulsive entries in the pair list is equal to //the total number of repulsive interactions in the neighbor list(nnl_rep) //minus the number of repulsive entries NOT in the pair list (num) nil_rep = nnl_rep - *num; //</editor-fold> free(num); }//end update_pair_list #else #ifdef USE_GPU_NL_PL_NAIVE //Update the pair list WITHOUT using CUDPP. This uses parallel kernels to //determine which interactions should be added to the pair list and then //adds them to the pair list sequentially on the CPU. This is included for //timing and comparison purposes only. void update_neighbor_list() void update_pair_list() { using namespace std; nil_att = 0; nil_rep = 0; //<editor-fold defaultstate="collapsed" desc="Execution params"> // setup execution parameters dim3 threads_att(BLOCK_DIM, 1, 1); dim3 grid_att((int) ceil(((float) nnl_att) / (float) threads_att.x), 1, 1); dim3 threads_rep(BLOCK_DIM, 1, 1); dim3 grid_rep((int) ceil(((float) nnl_rep) / (float) threads_rep.x), 1, 1); //</editor-fold> //<editor-fold defaultstate="collapsed" desc="Kernels"> hipLaunchKernelGGL(( update_pair_list_att_kernel) , dim3(grid_att), dim3(threads_att) , 0, 0, dev_is_list_att, boxl, nnl_att, dev_nl_lj_nat_pdb_dist, dev_unc_pos, dev_idx_neighbor_list_att); hipLaunchKernelGGL(( update_pair_list_rep_kernel) , dim3(grid_rep), dim3(threads_rep), 0, 0, dev_is_list_rep, boxl, nnl_rep, dev_unc_pos, dev_idx_neighbor_list_rep); hipDeviceSynchronize(); cutilCheckMsg("Kernel execution failed"); //</editor-fold> //Copy needed values to the CPU cutilSafeCall(hipMemcpy(is_list_att, dev_is_list_att, nnl_att * sizeof(unsigned int) /*is_list_att_size**/, hipMemcpyDeviceToHost)); //Might still be up to date from neighbor list update // cutilSafeCall(hipMemcpy(idx_neighbor_list_att, dev_idx_neighbor_list_att, idx_neighbor_list_att_size, hipMemcpyDeviceToHost)); // cutilSafeCall(hipMemcpy(idx_pair_list_att, dev_idx_pair_list_att, idx_pair_list_att_size, hipMemcpyDeviceToHost)); //Might still be up to date from neighbor list update // cutilSafeCall(hipMemcpy(nl_lj_nat_pdb_dist, dev_nl_lj_nat_pdb_dist, nl_lj_nat_pdb_dist_size, hipMemcpyDeviceToHost)); // cutilSafeCall(hipMemcpy(pl_lj_nat_pdb_dist, dev_pl_lj_nat_pdb_dist, pl_lj_nat_pdb_dist_size, hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(is_list_rep, dev_is_list_rep, nnl_rep * sizeof(unsigned int) /*is_list_rep_size**/, hipMemcpyDeviceToHost)); //Might still be up to date from neighbor list update // cutilSafeCall(hipMemcpy(idx_neighbor_list_rep, dev_idx_neighbor_list_rep, idx_neighbor_list_rep_size, hipMemcpyDeviceToHost)); // should be native distance for (int i=0; i<nnl_att; i++) { if (is_list_att[i] == 0) { // add to interaction pair list // idx_pair_list_att[nil_att].x = idx_neighbor_list_att[i].x; // idx_pair_list_att[nil_att].y = idx_neighbor_list_att[i].y; // idx_pair_list_att[nil_att].z = idx_neighbor_list_att[i].z; // idx_pair_list_att[nil_att].w = idx_neighbor_list_att[i].w; idx_pair_list_att[nil_att] = idx_neighbor_list_att[i]; pl_lj_nat_pdb_dist[nil_att] = nl_lj_nat_pdb_dist[i]; nil_att++; } } for (int i=0; i<nnl_rep; i++) { if (is_list_rep[i] == 0) { // add to interaction pair list // idx_pair_list_rep[nil_rep].x = idx_neighbor_list_rep[i].x; // idx_pair_list_rep[nil_rep].y = idx_neighbor_list_rep[i].y; // idx_pair_list_rep[nil_rep].z = idx_neighbor_list_rep[i].z; // idx_pair_list_rep[nil_rep].w = idx_neighbor_list_rep[i].w; idx_pair_list_rep[nil_rep] = idx_neighbor_list_rep[i]; nil_rep++; } } //Copy updated values back to the GPU cutilSafeCall(hipMemcpy(dev_idx_neighbor_list_att, idx_pair_list_att, nil_att * sizeof(ushort2) /*idx_pair_list_att_size**/, hipMemcpyHostToDevice)); // cutilSafeCall(hipMemcpy(dev_idx_pair_list_att, idx_pair_list_att, nil_att * sizeof(ushort2) /*idx_pair_list_att_size**/, hipMemcpyHostToDevice)); // cutilSafeCall(hipMemcpy(dev_pl_lj_nat_pdb_dist, pl_lj_nat_pdb_dist, nil_att * sizeof(PDB_FLOAT) /*pl_lj_nat_pdb_dist_size**/, hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(dev_nl_lj_nat_pdb_dist, pl_lj_nat_pdb_dist, nil_att * sizeof(PDB_FLOAT) /*pl_lj_nat_pdb_dist_size**/, hipMemcpyHostToDevice)); // cutilSafeCall(hipMemcpy(dev_idx_pair_list_rep, idx_pair_list_rep, nil_rep * sizeof(ushort2) /*idx_pair_list_rep_size**/, hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(dev_idx_neighbor_list_rep, idx_pair_list_rep, nil_rep * sizeof(ushort2) /*idx_pair_list_rep_size**/, hipMemcpyHostToDevice)); dev_idx_pair_list_att = dev_idx_neighbor_list_att; dev_pl_lj_nat_pdb_dist = dev_nl_lj_nat_pdb_dist; dev_idx_pair_list_rep = dev_idx_neighbor_list_rep; } #else //Update the pair list using ONLY the CPU. This is included for timing //and comparison purposes only. void update_pair_list() { using namespace std; // declare host variables FLOAT dx, dy, dz; FLOAT d2; unsigned int ibead, jbead, itype, jtype; FLOAT rcut, rcut2; nil_att = 0; nil_rep = 0; //Copy needed arrays to the CPU from the GPU cutilSafeCall(hipMemcpy(idx_neighbor_list_att, dev_idx_neighbor_list_att, /*idx_neighbor_list_att_size**/ nnl_att * sizeof(ushort2), hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(unc_pos, dev_unc_pos, unc_pos_size, hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(nl_lj_nat_pdb_dist, dev_nl_lj_nat_pdb_dist, /*nl_lj_nat_pdb_dist_size**/ nnl_att * sizeof(PDB_FLOAT), hipMemcpyDeviceToHost)); cutilSafeCall(hipMemcpy(idx_neighbor_list_rep, dev_idx_neighbor_list_rep, /*idx_neighbor_list_rep_size**/ nnl_rep * sizeof(ushort2), hipMemcpyDeviceToHost)); // should be native distance for (int i=0; i<nnl_att; i++) { // ibead = ibead_neighbor_list_att[i] - 1; // jbead = jbead_neighbor_list_att[i] - 1; // itype = itype_neighbor_list_att[i]; // jtype = jtype_neighbor_list_att[i]; ibead = GET_IDX(idx_neighbor_list_att[i].x) - 1; jbead = GET_IDX(idx_neighbor_list_att[i].y) - 1; itype = GET_TYPE(idx_neighbor_list_att[i].x); jtype = GET_TYPE(idx_neighbor_list_att[i].y); dx = unc_pos[jbead].x - unc_pos[ibead].x; dy = unc_pos[jbead].y - unc_pos[ibead].y; dz = unc_pos[jbead].z - unc_pos[ibead].z; dx -= boxl*rnd(dx/boxl); dy -= boxl*rnd(dy/boxl); dz -= boxl*rnd(dz/boxl); d2 = dx*dx+dy*dy+dz*dz; rcut = 2.5*nl_lj_nat_pdb_dist[i]; rcut2 = rcut*rcut; if (d2 < rcut2) { // add to interaction pair list // ibead_pair_list_att[nil_att] = ibead + 1; // jbead_pair_list_att[nil_att] = jbead + 1; // itype_pair_list_att[nil_att] = itype; // jtype_pair_list_att[nil_att] = jtype; idx_pair_list_att[nil_att] = idx_neighbor_list_att[i]; pl_lj_nat_pdb_dist[nil_att] = nl_lj_nat_pdb_dist[i]; // pl_lj_nat_pdb_dist2[nil_att] = nl_lj_nat_pdb_dist2[i]; // pl_lj_nat_pdb_dist6[nil_att] = nl_lj_nat_pdb_dist6[i]; // pl_lj_nat_pdb_dist12[nil_att] = nl_lj_nat_pdb_dist12[i]; nil_att++; } } for (int i=0; i<nnl_rep; i++) { // ibead = ibead_neighbor_list_rep[i] - 1; // jbead = jbead_neighbor_list_rep[i] - 1; // itype = itype_neighbor_list_rep[i]; // jtype = jtype_neighbor_list_rep[i]; ibead = GET_IDX(idx_neighbor_list_rep[i].x) - 1; jbead = GET_IDX(idx_neighbor_list_rep[i].y) - 1; itype = GET_TYPE(idx_neighbor_list_rep[i].x); jtype = GET_TYPE(idx_neighbor_list_rep[i].y); dx = unc_pos[jbead].x - unc_pos[ibead].x; dy = unc_pos[jbead].y - unc_pos[ibead].y; dz = unc_pos[jbead].z - unc_pos[ibead].z; dx -= boxl*rnd(dx/boxl); dy -= boxl*rnd(dy/boxl); dz -= boxl*rnd(dz/boxl); d2 = dx*dx+dy*dy+dz*dz; rcut = 2.5*sigma_rep[itype][jtype]; rcut2 = rcut*rcut; if (d2 < rcut2) { // add to interaction pair list // ibead_pair_list_rep[nil_rep] = ibead + 1; // jbead_pair_list_rep[nil_rep] = jbead + 1; // itype_pair_list_rep[nil_rep] = itype; // jtype_pair_list_rep[nil_rep] = jtype; idx_pair_list_rep[nil_rep] = idx_neighbor_list_rep[i]; nil_rep++; } } //Copy updated values back to the GPU cutilSafeCall(hipMemcpy(dev_idx_neighbor_list_att, idx_pair_list_att, nil_att * sizeof(ushort2) /*idx_pair_list_att_size**/, hipMemcpyHostToDevice)); // cutilSafeCall(hipMemcpy(dev_idx_pair_list_att, idx_pair_list_att, nil_att * sizeof(ushort2) /*idx_pair_list_att_size**/, hipMemcpyHostToDevice)); // cutilSafeCall(hipMemcpy(dev_pl_lj_nat_pdb_dist, pl_lj_nat_pdb_dist, nil_att * sizeof(PDB_FLOAT) /*pl_lj_nat_pdb_dist_size**/, hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(dev_nl_lj_nat_pdb_dist, pl_lj_nat_pdb_dist, nil_att * sizeof(PDB_FLOAT) /*pl_lj_nat_pdb_dist_size**/, hipMemcpyHostToDevice)); // cutilSafeCall(hipMemcpy(dev_idx_pair_list_rep, idx_pair_list_rep, nil_rep * sizeof(ushort2) /*idx_pair_list_rep_size**/, hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(dev_idx_neighbor_list_rep, idx_pair_list_rep, nil_rep * sizeof(ushort2) /*idx_pair_list_rep_size**/, hipMemcpyHostToDevice)); dev_idx_pair_list_att = dev_idx_neighbor_list_att; dev_pl_lj_nat_pdb_dist = dev_nl_lj_nat_pdb_dist; dev_idx_pair_list_rep = dev_idx_neighbor_list_rep; } #endif #endif
673da1d246675bb5f845f77feada91484bd0c35a.cu
#include "sop.h" __device__ __constant__ FLOAT sigma_rep_mat[3][3] = { {0.0, 0.0, 0.0}, {0.0, 3.8, 5.4}, {0.0, 5.4, 7.0} }; int main(int argc, char* argv[]) { if (argc < 2) { cerr << "Usage: " << argv[0] << " < input_file >" << endl; exit(-1); } time_t tm0 = time(0); // wall time at this point cout << "CURRENT TIME IS: " << ctime(&tm0); if (getcwd(pathname, MAXPATHLEN) == NULL) { cerr << "PROBLEM GETTING PATH" << endl; } else { cout << "CURRENT WORKING DIRECTORY: " << pathname << endl; } //Allocates certain arrays and initializes some variables alloc_arrays(); //Read input file read_input(argv[1]); //Clock ticks at this point clock_t ck0 = clock(); //Perform commands (simulation) ex_cmds(); // time stats time_t tm1 = time(0); clock_t ck1 = clock(); cout << "+-------------------+" << endl; cout << "| Simulation Stats: |" << endl; cout << "+-------------------+" << endl; cout << "Wall Time : " << difftime(tm1, tm0) << " sec" << endl; cout << "Total Computation Time : " << float(ck1 - ck0) / CLOCKS_PER_SEC << " sec" << endl; cout << "Computation Rate : " << float(ck1 - ck0) / CLOCKS_PER_SEC / nstep << " sec / timestep" << endl; cout << "CURRENT TIME IS : " << ctime(&tm1); return 0; } //Execute the commands specified by the input file. This will include reading //in the necessary values, running the simulation, etc. void ex_cmds() { for (int i = 1; i <= ncmd; i++) { //Read data if (!strcmp(cmd[i], "load")) { load(i); } //Set parameters else if (!strcmp(cmd[i], "set")) { set_params(i); } //Run simulation else if (!strcmp(cmd[i], "run")) { simulation_ctrl(); } //TODO: Figure out what to do here or if it should just skip. else { }; } }//end ex_cmds() //Run the simulation. Will transfer control over to either underdamped_ctrl() //or overdamped_ctrl() void simulation_ctrl() { switch (sim_type) { case 1: underdamped_ctrl(); break; case 2: overdamped_ctrl(); break; default: cerr << "UNRECOGNIZED SIM_TYPE!" << endl; exit(-1); } }//end simulation_ctrl() //Run the underdamped simulation void underdamped_ctrl() { char oline[2048]; FLOAT istep = 1.0; int iup = 1; int inlup = 1; ofstream out(ufname, ios::out | ios::app); static int first_time = 1; //TODO: Check if this is necessary when everything is done on the GPU FLOAT3* incr = new FLOAT3[nbead]; //If this is the start of a new simulation, zero the velocity and force arrays if ((!restart) && first_time) { // zero out the velocities and forces for (int i = 0; i < nbead; i++) { vel[i].x = 0.0; vel[i].y = 0.0; vel[i].z = 0.0; force[i].x = 0.0; force[i].y = 0.0; force[i].z = 0.0; }//end for }//end if //The vel and force GPU arrays will be zeroed because of the previous section //of code. If it is removed, the vel and force arrays will need to be //zeroed when the simulation is not starting from a restart state alloc_GPU_arrays(); alloc_cudpp(); print_sim_params(); #ifdef USE_CURAND //NOTE: CURAND setup does not currently support restarting setup_rng(1234, 0); #endif //If using the neighbor list, update the neighbor and pair lists if (neighborlist == 1) { update_neighbor_list(); update_pair_list(); } //Cell list is not yet implemented else if (celllist == 1) { cout << "Cell list not implemented" << endl; exit(-1); // update_cell_list(); update_pair_list(); }//end else if celllist == 1 //Set the energy terms to be evaluated set_potential(); // set_forces(); //The forces to be used are now hard-coded to allow streams //This can be modified when different combinations are used //If restarting, load the old coordinates and velocities and set istep to the //correct value if (restart) { load_coords(cfname, unccfname); load_vels(vfname); istep = istep_restart + 1.0; }//end if //If the RNG should be restarted, do so. //TODO: Implement this for the GPU-based RNG if (rgen_restart) { generator.restart(); }//end if //If this is the first time the simulation has been run, evaluate the energy //and forces if (first_time) { //If it is the first time, the data in the host arrays will be up to date, //so no data will need to be transfered from the device energy_eval(); force_eval(); }//end if // ??? if (binsave) { // ??? if ((first_time) && (!rgen_restart)) { record_traj(binfname, uncbinfname); } //Iterate through the time steps while (istep <= nstep) { //Compute pair separation list if ((inlup % nnlup) == 0) { if (neighborlist == 1) { update_neighbor_list(); } else if (celllist == 1) { cout << "Cell list not implemented" << endl; exit(-1); //update_cell_list(); }//end if //Output progress every 100,000 steps if (!((int) istep % 100000)) fprintf(stdout, "(%.0lf) neighbor list: (%d/%d)\n", istep, nnl_att, nnl_rep); inlup = 0; }//end if inlup % nnlup == 0 inlup++; if (neighborlist == 1 || celllist == 1) { update_pair_list(); // fprintf(stdout, "(%.0lf) pair list: (%d/%d)\n", istep, nil_att, // nil_rep); }//end if underdamped_iteration(); //Evaluate the energy of the structure and output all relevant data //every nup time steps if (!(iup % nup)) { // updates //Copy all of the data that will be needed for energy evaluation and //logging from the device to the host. One more transfer to update //the increment array will take place in the calculate_observables() //function if sim_type is 2. cutilSafeCall(cudaMemcpy(pos, dev_pos, pos_size, cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(unc_pos, dev_unc_pos, unc_pos_size, cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(idx_pair_list_att, dev_idx_pair_list_att, nil_att * sizeof (ushort2), cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(idx_pair_list_rep, dev_idx_pair_list_rep, nil_rep * sizeof (ushort2), cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(pl_lj_nat_pdb_dist, dev_pl_lj_nat_pdb_dist, nil_att * sizeof (PDB_FLOAT), cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(vel, dev_vel, vel_size, cudaMemcpyDeviceToHost)); energy_eval(); calculate_observables(incr); sprintf(oline, "%.0lf %f %f %f %f %f %f %f %f %f %d %f", istep, T, kinT, e_bnd, e_ang_ss, e_vdw_rr_att, e_vdw_rr_rep, e_vdw_rr, rna_etot, Q, contct_nat, rgsq); out << oline << endl; iup = 0; record_traj(binfname, uncbinfname); save_coords(cfname, unccfname); save_vels(vfname); generator.save_state(); } istep += 1.0; iup++; } out.close(); } if (first_time) first_time = 0; delete [] incr; return; }//end underdamped_ctrl() //TODO: Parallelize this. Currently will not work! void overdamped_ctrl() { using namespace std; char oline[2048]; FLOAT istep = 1.0; int iup = 1; ofstream out(ufname, ios::out | ios::app); static int first_time = 1; FLOAT3* incr = new FLOAT3[nbead]; //If this is the start of a simulation, zero the velocity and force arrays if ((!restart) && first_time) { // zero out the velocities and forces for (int i = 0; i < nbead; i++) { vel[i].x = 0.0; vel[i].y = 0.0; vel[i].z = 0.0; force[i].x = 0.0; force[i].y = 0.0; force[i].z = 0.0; }//end for }//end if print_sim_params(); if (neighborlist == 1) { update_neighbor_list(); update_pair_list(); } else if (celllist == 1) { cout << "Cell list not yet implemented." << endl; exit(-1); // update_cell_list(); update_pair_list(); } set_potential(); // set_forces(); if (restart) { load_coords(cfname, unccfname); // load_vels(vfname); istep = istep_restart + 1.0; } if (rgen_restart) { generator.restart(); } if (first_time) { energy_eval(); force_eval(); } if (binsave) { if ((first_time) && (!rgen_restart)) { record_traj(binfname, uncbinfname); } while (istep <= nstep) { // compute pair separation list if ((inlup % nnlup) == 0) { if (neighborlist == 1) { update_neighbor_list(); } else if (celllist == 1) { cout << "Cell list not yet implemented" << endl; exit(-1); update_cell_list(); } // fprintf(stderr, "(%.0lf) neighbor list: (%d/%d)\n", istep, nnl_att, nnl_rep); inlup = 0; } inlup++; if (neighborlist == 1 || celllist == 1) { update_pair_list(); // fprintf(stderr, "(%.0lf) pair list: (%d/%d)\n", istep, nil_att, nil_rep); } overdamped_iteration(incr); if (!(iup % nup)) { // updates energy_eval(); calculate_observables(incr); sprintf(oline, "%.0lf %f %f %f %f %f %f %f %d %f", istep, T, kinT, e_bnd, e_ang_ss, e_vdw_rr, rna_etot, Q, contct_nat, rgsq); out << oline << endl; iup = 0; record_traj(binfname, uncbinfname); save_coords(cfname, unccfname); save_vels(vfname); generator.save_state(); } istep += 1.0; iup++; } out.close(); } if (first_time) first_time = 0; delete [] incr; return; }//end overdamped_ctrl() //Kernel to perform the necessary calculations for each iteration when using //an underdamped simulation __global__ void underdamped_iteration_kernel(FLOAT3 *dev_incr, FLOAT3 *dev_vel, float3 *dev_force, FLOAT3 *dev_pos, FLOAT3 *dev_unc_pos, int nbead, FLOAT a1, FLOAT a2, FLOAT boxl) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < nbead) { // compute position increments dev_incr[i].x = a1 * dev_vel[i].x + a2 * dev_force[i].x; dev_incr[i].y = a1 * dev_vel[i].y + a2 * dev_force[i].y; dev_incr[i].z = a1 * dev_vel[i].z + a2 * dev_force[i].z; // update bead positions dev_pos[i].x += dev_incr[i].x; dev_pos[i].y += dev_incr[i].y; dev_pos[i].z += dev_incr[i].z; dev_pos[i].x -= boxl * rintf(dev_pos[i].x / boxl); dev_pos[i].y -= boxl * rintf(dev_pos[i].y / boxl); dev_pos[i].z -= boxl * rintf(dev_pos[i].z / boxl); dev_unc_pos[i].x += dev_incr[i].x; dev_unc_pos[i].y += dev_incr[i].y; dev_unc_pos[i].z += dev_incr[i].z; }//end if i < nbead }//end underdamped_iteration_kernel //Kernel to update the velocities of the beads __global__ void update_velocities_kernel(FLOAT3 * dev_vel, FLOAT3 *dev_incr, float3 *dev_force, int nbead, FLOAT a3, FLOAT a4) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < nbead) { // compute velocity increments dev_vel[i].x = a3 * dev_incr[i].x + a4 * dev_force[i].x; dev_vel[i].y = a3 * dev_incr[i].y + a4 * dev_force[i].y; dev_vel[i].z = a3 * dev_incr[i].z + a4 * dev_force[i].z; }//end if i < nbead }//end update_velocities_kernel //Perform the necessary calculations for the underdamped iteration //TODO: Inline this in underdamped_ctrl() ? void underdamped_iteration() { static const FLOAT eps = 1.0e-5; dim3 threads(BLOCK_DIM, 1, 1); dim3 grid((int) ceil((nbead + 1.0) / (float) threads.x), 1, 1); underdamped_iteration_kernel <<<grid, threads>>>(dev_incr, dev_vel, dev_force, dev_pos, dev_unc_pos, nbead, a1, a2, boxl); // force_update force_eval(); if (T < eps) return; // don't update velocities for steepest descent // update_velocities update_velocities_kernel <<<grid, threads>>>(dev_vel, dev_incr, dev_force, nbead, a3, a4); }//end underdamped_iteration //TODO: Parallelize. Currently will not work! void overdamped_iteration(FLOAT3* incr) { using namespace std; for (int i = 0; i < nbead; i++) { // compute position increments incr[i].x = a5 * force[i].x; incr[i].y = a5 * force[i].y; incr[i].z = a5 * force[i].z; // update bead positions unc_pos[i].x += incr[i].x; unc_pos[i].y += incr[i].y; unc_pos[i].z += incr[i].z; pos[i].x += incr[i].x; pos[i].y += incr[i].y; pos[i].z += incr[i].z; pos[i].x -= boxl * rnd(pos[i].x / boxl); pos[i].y -= boxl * rnd(pos[i].y / boxl); pos[i].z -= boxl * rnd(pos[i].z / boxl); } // force_update force_eval(); } //Arrays that are referenced in this function are copied from the device to the //host in the underdamped_ctrl function (and will be done in the overdamped_ctrl //function once it is implemented) *EXCEPT* increment, which is only needed if //sim_type == 2. If this is the case, it will be copied to the host in this //function //TODO: Parallelize? void calculate_observables(FLOAT3* increment) { using namespace std; FLOAT dx, dy, dz, d; FLOAT sumvsq; int ibead, jbead; PDB_FLOAT r_ij; // chi, contct_nat, contct_tot, Q contct_nat = 0; for (int i = 0; i < ncon_att; i++) { //idx_bead_lj_nat is static. It never is updated/changed during a simulation ibead = GET_IDX(idx_bead_lj_nat[i].x) - 1; jbead = GET_IDX(idx_bead_lj_nat[i].y) - 1; r_ij = lj_nat_pdb_dist[i]; dx = unc_pos[ibead].x - unc_pos[jbead].x; dy = unc_pos[ibead].y - unc_pos[jbead].y; dz = unc_pos[ibead].z - unc_pos[jbead].z; dx -= boxl * rnd(dx / boxl); dy -= boxl * rnd(dy / boxl); dz -= boxl * rnd(dz / boxl); d = sqrt(dx * dx + dy * dy + dz * dz); if (d / r_ij < 1.25) { contct_nat++; }//end if d / r_ij < 1.25 }//end for Q = FLOAT(contct_nat) / ncon_att; // rgsq rgsq = 0.0; for (int i = 0; i < nbead - 1; i++) { for (int j = i + 1; j < nbead; j++) { dx = unc_pos[i].x - unc_pos[j].x; dy = unc_pos[i].y - unc_pos[j].y; dz = unc_pos[i].z - unc_pos[j].z; dx -= boxl * rnd(dx / boxl); dy -= boxl * rnd(dy / boxl); dz -= boxl * rnd(dz / boxl); rgsq += (dx * dx + dy * dy + dz * dz); }//end for j }//end for i rgsq /= FLOAT(nbead * nbead); // kinT if (sim_type == 1) { sumvsq = 0.0; for (int i = 0; i < nbead; i++) { sumvsq += vel[i].x * vel[i].x + vel[i].y * vel[i].y + vel[i].z * vel[i].z; }//end for i kinT = sumvsq / (3.0 * FLOAT(nbead)); }//end fi sim_type == 1 else if (sim_type == 2) { cutilSafeCall(cudaMemcpy(increment, dev_incr, incr_size, cudaMemcpyDeviceToHost)); sumvsq = 0.0; for (int i = 0; i < nbead; i++) { sumvsq += increment[i].x * increment[i].x + increment[i].y * increment[i].y + increment[i].z * increment[i].z; }//end for i sumvsq *= zeta / (2.0 * h); kinT = sumvsq / (3.0 * FLOAT(nbead)); }//end if sim_type == 2 else { } }//end calculate_observables //Output the parameters for this simulation void print_sim_params() { using namespace std; char oline[2048]; cout << endl; sprintf(oline, "+------------------------+"); cout << oline << endl; sprintf(oline, "| Simulation Parameters: |"); cout << oline << endl; sprintf(oline, "+------------------------+"); cout << oline << endl; if (sim_type == 1) { sprintf(oline, "Simulation Type : %s", "Underdamped"); cout << oline << endl; } else if (sim_type == 2) { sprintf(oline, "Simulation Type : %s", "Overdamped"); cout << oline << endl; } else { cerr << "UNRECOGNIZED SIMULATION TYPE!" << endl; exit(-1); } sprintf(oline, "Simulation Temperature : %.3f", T); cout << oline << endl; sprintf(oline, "Start Time Step : %.0lf", istep_restart); cout << oline << endl; sprintf(oline, "Final Time Step : %.0lf", nstep); cout << oline << endl; sprintf(oline, "Output Frequency : %d", nup); cout << oline << endl; sprintf(oline, "Friction Coefficient : %.0e", zeta); cout << oline << endl; sprintf(oline, "PBC Box Length : %.1f", boxl); cout << oline << endl; if (neighborlist == 1) { sprintf(oline, "Long-range Cutoff Type : %s", "Neighbor List"); cout << oline << endl; sprintf(oline, "Neighbor List Update Frequency : %d", nnlup); cout << oline << endl; } else if (celllist == 1) { sprintf(oline, "Long-range Cutoff Type : %s", "Cell List"); cout << oline << endl; sprintf(oline, "Cell List Update Frequency : %d", nnlup); cout << oline << endl; sprintf(oline, "Number of Cells Each Dimension : %.0lf", ncell); cout << oline << endl; } else { sprintf(oline, "Long-range Cutoff Type : %s", "None"); cout << oline << endl; } cout << endl; }//end print_sim_params //Kernel to determine which of the interactions should be added to the //attractive neighbor list. Each attractive interaction will be iterated //through and its corresponding entry in dev_is_neighbor_list_att to 0 if it //should be included in the neighbor list and 1 if it is not. //NOTE: The number 0 indicates that the interaction SHOULD be in the list and //the number 1 indicates that the interaction should NOT be list. This is //necessary because of the default way that CUDPP sorts data. __global__ void update_neighbor_list_att_kernel( unsigned int *dev_is_neighbor_list_att, FLOAT boxl, int ncon_att, PDB_FLOAT *dev_lj_nat_pdb_dist, FLOAT3 *dev_unc_pos, ushort2 *dev_idx_bead_lj_nat) { ushort2 idx_bead_lj_nat; FLOAT3 d; FLOAT d2; unsigned int ibead, jbead; PDB_FLOAT lj_nat_pdb_dist; FLOAT rcut, rcut2; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < ncon_att) { idx_bead_lj_nat = dev_idx_bead_lj_nat[i]; lj_nat_pdb_dist = dev_lj_nat_pdb_dist[i]; ibead = GET_IDX(idx_bead_lj_nat.x); jbead = GET_IDX(idx_bead_lj_nat.y); FLOAT3 ipos = dev_unc_pos[ibead - 1]; FLOAT3 jpos = dev_unc_pos[jbead - 1]; d.x = jpos.x - ipos.x; d.y = jpos.y - ipos.y; d.z = jpos.z - ipos.z; //If using doubles, use double-precision rounding. Else use single- //precision rounding. #ifdef SOP_FP_DOUBLE d.x -= boxl * rint(d.x / boxl); d.y -= boxl * rint(d.y / boxl); d.z -= boxl * rint(d.z / boxl); #else d.x -= boxl * rintf(d.x / boxl); d.y -= boxl * rintf(d.y / boxl); d.z -= boxl * rintf(d.z / boxl); #endif d2 = d.x * d.x + d.y * d.y + d.z * d.z; rcut = 3.2 * lj_nat_pdb_dist; rcut2 = rcut*rcut; if (d2 < rcut2) { dev_is_neighbor_list_att[i] = 0; //include else ... = 1? May cut down on memory allocation time before each call }//end if d2 else { dev_is_neighbor_list_att[i] = 1; } }//end if i }//end update_neighbor_list_att_kernel //Kernel to determine which of the interactions should be added to the //repulsive neighbor list. Each repulsive interaction will be iterated //through and its corresponding entry in dev_is_neighbor_list_rep to 0 if it //should be included in the neighbor list and 1 if it is not. //NOTE: The number 0 indicates that the interaction SHOULD be in the list and //the number 1 indicates that the interaction should NOT be list. This is //necessary because of the default way that CUDPP sorts data. __global__ void update_neighbor_list_rep_kernel( unsigned int *dev_is_neighbor_list_rep, FLOAT boxl, int xsize, int ysize, int ncon_rep, FLOAT3 *dev_unc_pos, ushort2 *dev_idx_bead_lj_non_nat) { ushort2 idx_bead_lj_non_nat; FLOAT3 d; FLOAT d2; unsigned int ibead, jbead, itype, jtype; FLOAT rcut, rcut2; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int j = blockIdx.y * blockDim.y + threadIdx.y; //TODO: Clean the nested if's up if (i <= xsize && j <= ysize) { unsigned int idx = j * xsize + i; if (idx < ncon_rep) { idx_bead_lj_non_nat = dev_idx_bead_lj_non_nat[idx]; ibead = GET_IDX(idx_bead_lj_non_nat.x) - 1; jbead = GET_IDX(idx_bead_lj_non_nat.y) - 1; itype = GET_TYPE(idx_bead_lj_non_nat.x); jtype = GET_TYPE(idx_bead_lj_non_nat.y); FLOAT3 ipos = dev_unc_pos[ibead]; FLOAT3 jpos = dev_unc_pos[jbead]; d.x = jpos.x - ipos.x; d.y = jpos.y - ipos.y; d.z = jpos.z - ipos.z; //If using doubles, use double-precision rounding. Else, use single- //precision rounding. #ifdef SOP_FP_DOUBLE d.x -= boxl * rint(d.x / boxl); d.y -= boxl * rint(d.y / boxl); d.z -= boxl * rint(d.z / boxl); #else d.x -= boxl * rintf(d.x / boxl); d.y -= boxl * rintf(d.y / boxl); d.z -= boxl * rintf(d.z / boxl); #endif d2 = d.x * d.x + d.y * d.y + d.z * d.z; rcut = 3.2 * sigma_rep_mat[itype][jtype]; rcut2 = rcut*rcut; if (d2 < rcut2) { dev_is_neighbor_list_rep[idx] = 0; }//end if d2 else dev_is_neighbor_list_rep[idx] = 1; }//end if idx }//end if i }//end update_neighbor_list_rep_kernel //Kernel to determine which of the interactions should be added to the //attractive pair list. Each attractive interaction will be iterated //through and its corresponding entry in dev_is_pair_list_att to 0 if it //should be included in the pair list and 1 if it is not. //NOTE: The number 0 indicates that the interaction SHOULD be in the list and //the number 1 indicates that the interaction should NOT be list. This is //necessary because of the default way that CUDPP sorts data. __global__ void update_pair_list_att_kernel(unsigned int *dev_is_pair_list_att, FLOAT boxl, int nnl_att, PDB_FLOAT *dev_nl_lj_nat_pdb_dist, FLOAT3 *dev_unc_pos, ushort2 *dev_idx_neighbor_list_att) { FLOAT3 d; FLOAT d2; unsigned int ibead, jbead; FLOAT rcut, rcut2; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < nnl_att) { ibead = GET_IDX(dev_idx_neighbor_list_att[i].x) - 1; jbead = GET_IDX(dev_idx_neighbor_list_att[i].y) - 1; d.x = dev_unc_pos[jbead].x - dev_unc_pos[ibead].x; d.y = dev_unc_pos[jbead].y - dev_unc_pos[ibead].y; d.z = dev_unc_pos[jbead].z - dev_unc_pos[ibead].z; d.x -= boxl * rintf(d.x / boxl); d.y -= boxl * rintf(d.y / boxl); d.z -= boxl * rintf(d.z / boxl); d2 = d.x * d.x + d.y * d.y + d.z * d.z; rcut = 2.5 * dev_nl_lj_nat_pdb_dist[i]; rcut2 = rcut*rcut; if (d2 < rcut2) { dev_is_pair_list_att[i] = 0; }//end if d2 < rcut2 else { dev_is_pair_list_att[i] = 1; } }//end if i ... }//end update_pair_list_att_kernel //Kernel to determine which of the interactions should be added to the //repulsive pair list. Each repulsive interaction will be iterated //through and its corresponding entry in dev_is_pair_list_rep to 0 if it //should be included in the pair list and 1 if it is not. //NOTE: The number 0 indicates that the interaction SHOULD be in the list and //the number 1 indicates that the interaction should NOT be list. This is //necessary because of the default way that CUDPP sorts data. __global__ void update_pair_list_rep_kernel(unsigned int *dev_is_pair_list_rep, FLOAT boxl, int nnl_rep, FLOAT3 *dev_unc_pos, ushort2 *dev_idx_neighbor_list_rep) { FLOAT dx, dy, dz; FLOAT d2; unsigned int ibead, jbead, itype, jtype; FLOAT rcut, rcut2; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < nnl_rep) { ibead = GET_IDX(dev_idx_neighbor_list_rep[i].x) - 1; jbead = GET_IDX(dev_idx_neighbor_list_rep[i].y) - 1; itype = GET_TYPE(dev_idx_neighbor_list_rep[i].x); jtype = GET_TYPE(dev_idx_neighbor_list_rep[i].y); dx = dev_unc_pos[jbead].x - dev_unc_pos[ibead].x; dy = dev_unc_pos[jbead].y - dev_unc_pos[ibead].y; dz = dev_unc_pos[jbead].z - dev_unc_pos[ibead].z; dx -= (boxl) * rintf(dx / boxl); dy -= (boxl) * rintf(dy / boxl); dz -= (boxl) * rintf(dz / boxl); d2 = dx * dx + dy * dy + dz*dz; rcut = 2.5 * sigma_rep_mat[itype][jtype]; rcut2 = rcut*rcut; if (d2 < rcut2) { dev_is_pair_list_rep[i] = 0; } else dev_is_pair_list_rep[i] = 1; }//end if i... }// end update_pair_list_rep_kernel //If USE_GPU_NL_PL is defined, use the GPU for all of the neighbor list //calculations #ifdef USE_GPU_NL_PL //Update the neighbor list. This function involves using kernels to denote //which interactions should be added to the neighbor list and then uses CUDPP //sort and scan functionality to transfer the interactions to the neighbor list. void update_neighbor_list() { nnl_att = 0; nnl_rep = 0; //<editor-fold defaultstate="collapsed" desc="Execution parameters"> // setup execution parameters dim3 threads_att(BLOCK_DIM, 1, 1); dim3 grid_att((int) ceil(((float) ncon_att) / (float) threads_att.x), 1, 1); int blocksx, blocksy, gridsx, gridsy; if (ncon_rep / BLOCK_DIM <= GRID_DIM) { blocksx = BLOCK_DIM; blocksy = 1; gridsx = (int) ceil(((float) ncon_rep) / (float) BLOCK_DIM); gridsy = 1; } else if (ncon_rep / BLOCK_DIM > GRID_DIM) { blocksx = 32; blocksy = 16; gridsx = (int) ceil(sqrt(ncon_rep) / blocksx + 1.0); gridsy = (int) ceil(sqrt(ncon_rep) / blocksy + 1.0); } dim3 threads_rep(blocksx, blocksy, 1); dim3 grid_rep(gridsx, gridsy, 1); //</editor-fold> //<editor-fold defaultstate="collapsed" desc="Update kernels"> //Call the kernels that determine which interactions should be added to the //attractive and repulsive neighbor lists. The entries of the //dev_idx_bead_lj_nat represent the attractive interactions and correspond to //the entries of the dev_is_list_att array which will denote whether or not a //given interaction should be added to the neighbor list. Similarly, the //dev_idx_bead_lj_non_nat array represents the repulsive interactions and //correspond to the entries of the dev_is_list_rep array which will denote //whether or not a given interaction should be added to the neighbor list update_neighbor_list_att_kernel <<<grid_att, threads_att >>>(dev_is_list_att, boxl, ncon_att, dev_lj_nat_pdb_dist, dev_unc_pos, dev_idx_bead_lj_nat); update_neighbor_list_rep_kernel <<<grid_rep, threads_rep >>>(dev_is_list_rep, boxl, blocksx*gridsx, blocksy*gridsy, ncon_rep, dev_unc_pos, dev_idx_bead_lj_non_nat); cudaThreadSynchronize(); cutilCheckMsg("update_neighbor_list_rep_kernel failed"); //</editor-fold> //<editor-fold defaultstate="collapsed" desc="CUDPP Att code"> //The following code uses CUDPP to create the neighbor list for the attractive //interactions and calculate how many attractive entries there are in //the neighbor list. //Obtain a copy of dev_is_list_att for use with pdb. This is //necessary because both the pdb array and the idx array must be sorted in //the same manner. When the is_list_att array is sorted the first time, //the order is lost. Obtaining a copy allows the pdb array to be sorted //in an identical way to the idx array, insuring that the corresponding //values are in identical positions in the arrays. cutilSafeCall(cudaMemcpy(dev_is_nl_2, dev_is_list_att, is_list_att_size, cudaMemcpyDeviceToDevice)); //Copy the default values of idx_bead_lj_nat to idx_neighbor_list_att. The //idx_bead_lj_nat array must be kept in its initial order and the //idx_neighbor_list array must be identical to the idx_bead_lj_nat array //before the sort and scan algorithm is used. cutilSafeCall(cudaMemcpy(dev_idx_neighbor_list_att, dev_idx_bead_lj_nat, idx_bead_lj_nat_size, cudaMemcpyDeviceToDevice)); //Sort the idx_neighbor_list_att array based on the information in //the is_list_att array. The entries that are in the neighbor list //will be in the first portion of the array and those that are not will be //in the last portion result = cudppSort(sort_plan, dev_is_list_att, dev_idx_neighbor_list_att, 1, ncon_att); if (CUDPP_SUCCESS != result) { printf("Error calling cppSort(sort_plan_att) 1\n"); exit(-1); } err = cudaGetLastError(); cutilSafeCall(err); //Copy the default values of lj_nat_pdb_dist to nl_lj_nat_pdb_dist. The //jl_nat_pdb_dist array must be kept in its initial order and the //nl_lj_nat_pdb_dist array must be identical to the lj_nat_pdb_dist array //before the sort and scan algorithm is used. cutilSafeCall(cudaMemcpy(dev_nl_lj_nat_pdb_dist, dev_lj_nat_pdb_dist, lj_nat_pdb_dist_size, cudaMemcpyDeviceToDevice)); //Sort the lj_nat_pdb_dist array based on the information in the copy //of is_list_att array. The entries corresponding to the interactions in the //pair list will be in the first portion of the array and those that are not //will be in the last portion result = cudppSort(sort_plan, dev_is_nl_2, dev_nl_lj_nat_pdb_dist, 1, ncon_att); if (CUDPP_SUCCESS != result) { printf("Error calling cppSort(sort_plan_att) 2\n"); exit(-1); } err = cudaGetLastError(); cutilSafeCall(err); //Perform the parallel scan of the is_list_att array, counting the number //of 1's that appear. This number corresponds to the number of interactions //that are NOT in the neighbor list. The is_list_att array will be untouched //and the result of the scan will be stored in dev_is_nl_scan_att result = cudppScan(scan_plan, dev_is_nl_scan_att, dev_is_list_att, ncon_att); if (CUDPP_SUCCESS != result) { printf("Error scanning att\n"); exit(-1); } err = cudaGetLastError(); cutilSafeCall(err); //Temporary storage for the result of the scan unsigned int *num; num = new unsigned int[1]; //Copy the last entry of dev_is_nl_scan_att, corresponding to the total sum //of 1's in is_list_att to the host variable "num" cutilSafeCall(cudaMemcpy(num, &(dev_is_nl_scan_att[ncon_att - 1]), sizeof (unsigned int), cudaMemcpyDeviceToHost)); err = cudaGetLastError(); cutilSafeCall(err); //The total number of attractive entries in the neighbor list is equal to //the total number of attractive interactions (ncon_att) minus the number //of attractive entries NOT in the neighbor list (num) nnl_att = ncon_att - *num; //</editor-fold> //<editor-fold defaultstate="collapsed" desc="CUDPP Rep code"> //The following code uses CUDPP to create the neighbor list for the repulsive //interactions and calculate how many repulsive entries there are in //the neighbor list. //The CUDPP algorithms fail with arrays larger than about 32 million entries. //As a workaround, if the number of entries is greater than 32 million, the //array can be partitioned into two arrays and each array sorted and scanned //individually and recombined afterwards //If there are less than 32 million entries, no partitioning is necessary if (ncon_rep <= NCON_REP_CUTOFF) { //Copy the default values of idx_bead_lj_non_nat to idx_neighbor_list_rep. //The idx_bead_lj_non_nat array must be kept in its initial order and the //idx_neighbor_list array must be identical to the idx_bead_lj_non_nat array //before the sort and scan algorithm is used. cutilSafeCall(cudaMemcpy(dev_idx_neighbor_list_rep, dev_idx_bead_lj_non_nat, idx_bead_lj_non_nat_size, cudaMemcpyDeviceToDevice)); err = cudaGetLastError(); cutilSafeCall(err); //Sort the idx_neighbor_list_rep array based on the information in //the is_list_rep array. The entries that are in the neighbor list //will be in the first portion of the array and those that are not will be //in the last portion result = cudppSort(sort_plan, dev_is_list_rep, dev_idx_neighbor_list_rep, 1, ncon_rep); if (CUDPP_SUCCESS != result) { printf("Error calling cppSort(sort_plan_rep) 1\n"); exit(-1); } err = cudaGetLastError(); cutilSafeCall(err); //Perform the parallel scan of the is_list_rep array, counting the number //of 1's that appear. This number corresponds to the number of interactions //that are NOT in the neighbor list. The is_list_rep array will be //untouched and the result of the scan will be stored in dev_is_nl_scan_rep result = cudppScan(scan_plan, dev_is_nl_scan_rep, dev_is_list_rep, ncon_rep); if (CUDPP_SUCCESS != result) { printf("Error scanning rep\n"); exit(-1); } cudaThreadSynchronize(); err = cudaGetLastError(); cutilSafeCall(err); //Copy the last entry of dev_is_nl_scan_rep, corresponding to the total sum //of 1's in is_list_rep to the host variable "num" cutilSafeCall(cudaMemcpy(num, &(dev_is_nl_scan_rep[ncon_rep - 1]), sizeof (unsigned int), cudaMemcpyDeviceToHost)); //The total number of repulsive entries in the neighbor list is equal to //the total number of repulsive interactions (ncon_rep) minus the number //of repulsive entries NOT in the neighbor list (num) nnl_rep = ncon_rep - *num; //The temporary variable num is no longer needed, so it can be freed. free(num); }//end if //If there are over 32 million entries, the first 32 million entries will be //sorted as usual, then the remaining entries will be sorted in separate //arrays. The entries that are members of the neighbor list are then //copied back to the original list. The result is that the repulsive //neighbor list ends up sorted exactly as it would be if CUDPP could handle //arrays larger than 32 million entries. else { //Copy first NCON_REP_CUTOFF elements to idx_nl_rep. cutilSafeCall(cudaMemcpy(dev_idx_neighbor_list_rep, dev_idx_bead_lj_non_nat, sizeof (ushort2) * NCON_REP_CUTOFF, cudaMemcpyDeviceToDevice)); //Calculate the number or entries that will be in the temporary array. This //is simply the total number of repulsive interactions (ncon_rep) minus the //cutoff value (currently 32 million) int numTmp = ncon_rep - NCON_REP_CUTOFF; //Create temporary arrays //idx_rep_temp will hold the entries at and above the 32 millionth index //in the original idx list ushort2* idx_rep_tmp; cutilSafeCall(cudaMalloc((void**) &idx_rep_tmp, sizeof (ushort2) * numTmp)); //is_nl_rep_tmp will hold the entries at and above the 32 millionth index //in the original is_list unsigned int* is_nl_rep_tmp; cutilSafeCall(cudaMalloc((void**) &is_nl_rep_tmp, sizeof (unsigned int) * numTmp)); //Copy last ncon_rep - NCON_REP_CUTOFF elements to temporary arrays cutilSafeCall(cudaMemcpy(idx_rep_tmp, &(dev_idx_bead_lj_non_nat[NCON_REP_CUTOFF]), sizeof (ushort2) * numTmp, cudaMemcpyDeviceToDevice)); cutilSafeCall(cudaMemcpy(is_nl_rep_tmp, &(dev_is_list_rep[NCON_REP_CUTOFF]), sizeof (unsigned int) * numTmp, cudaMemcpyDeviceToDevice)); //Sort first NCON_REP_CUTOFF elements of original array err = cudaGetLastError(); cutilSafeCall(err); result = cudppSort(sort_plan, dev_is_list_rep, dev_idx_neighbor_list_rep, 1, NCON_REP_CUTOFF); if (CUDPP_SUCCESS != result) { printf("Error calling cppSort(sort_plan_rep) 1\n"); exit(-1); } err = cudaGetLastError(); cutilSafeCall(err); //Scan first NCON_REP_CUTOFF elements to determine how many entries would be //in is_nl_rep result = cudppScan(scan_plan, dev_is_nl_scan_rep, dev_is_list_rep, NCON_REP_CUTOFF); if (CUDPP_SUCCESS != result) { printf("Error scanning rep\n"); exit(-1); } err = cudaGetLastError(); cutilSafeCall(err); //Copy the 32million - 1st entry of dev_is_nl_scan_rep to the host. This //corresponds to the number of 1's in the array, or the number of entries //that are NOT in the pair list cutilSafeCall(cudaMemcpy(num, &(dev_is_nl_scan_rep[NCON_REP_CUTOFF - 1]), sizeof (unsigned int), cudaMemcpyDeviceToHost)); err = cudaGetLastError(); cutilSafeCall(err); //The number of entries in the neighbor list (to be stored in num) is equal //to the total number of values sorted (NCON_REP_CUTOFF) minus the number //of entries NOT in the neighbor list (num) *num = NCON_REP_CUTOFF - *num; //Sort elements of temp array result = cudppSort(sort_plan, is_nl_rep_tmp, idx_rep_tmp, 1, numTmp); if (CUDPP_SUCCESS != result) { printf("Error calling cppSort(sort_plan_rep) 1\n"); exit(-1); } err = cudaGetLastError(); cutilSafeCall(err); //Scan elements of temp array to determine how many will be copied back to //the original array result = cudppScan(scan_plan, dev_is_nl_scan_rep, is_nl_rep_tmp, numTmp); if (CUDPP_SUCCESS != result) { printf("Error scanning rep\n"); exit(-1); } err = cudaGetLastError(); cutilSafeCall(err); //num2 is a temporary variable to store the number of entries in the //temporary array that are NOT in the neighbor list unsigned int* num2; num2 = new unsigned int[1]; //Copy the last entry in dev_is_nl_scan_rep, corresponding to the number //of entires in the temporary array that are NOT in the neighbor list, to //the host //std::cout << "numTmp: " << numTmp << std::endl; cutilSafeCall(cudaMemcpy(num2, &(dev_is_nl_scan_rep[numTmp - 1]), sizeof (unsigned int), cudaMemcpyDeviceToHost)); //The number of entries in the neighbor list (to be stored in num2) that are //in the temporary array is equal to the total number of values sorted //in the temporary array (numTmp) minus the number of entries NOT in the //neighbor list (num2) *num2 = numTmp - *num2; //Copy num_is_temp valid entries to original array starting at the num'th //entry cutilSafeCall(cudaMemcpy(&(dev_idx_neighbor_list_rep[(*num)]), idx_rep_tmp, sizeof (ushort2) * (*num2), cudaMemcpyDeviceToDevice)); //The total number of entries in the repulsive neighbor list (nnl_rep) is //equal to the number of entries in the original list (num) plus the number //of entries in the temporary list (num2) nnl_rep = *num + *num2; //Free temp arrays free(num); free(num2); cutilSafeCall(cudaFree(idx_rep_tmp)); cutilSafeCall(cudaFree(is_nl_rep_tmp)); } //</editor-fold> if (nnl_rep == 0) { cerr << "Neighbor List is EMPTY!!" << endl; exit(-1); } }//end update_neighbor_list //If the GPU is not to be used for all neighbor list calculations, //USE_GPU_NL_PL_NAIVE can be defined to use the "naive" GPU approach or nothing //can be defined to use a CPU-only neighbor list calculation #else #ifdef USE_GPU_NL_PL_NAIVE //Update the neighbor list WITHOUT using CUDPP. This uses parallel kernels to //determine which interactions should be added to the neighbor list and then //adds them to the neighbor list sequentially on the CPU. This is included for //timing and comparison purposes only. void update_neighbor_list() { using namespace std; nnl_att = 0; nnl_rep = 0; //<editor-fold defaultstate="collapsed" desc="Execution parameters"> // setup execution parameters dim3 threads_att(BLOCK_DIM, 1, 1); dim3 grid_att((int) ceil(((float) ncon_att) / (float) threads_att.x), 1, 1); int blocksx, blocksy, gridsx, gridsy; if (ncon_rep / BLOCK_DIM <= GRID_DIM) { blocksx = BLOCK_DIM; blocksy = 1; gridsx = (int) ceil(((float) ncon_rep) / (float) BLOCK_DIM); gridsy = 1; } else if (ncon_rep / BLOCK_DIM > GRID_DIM) { blocksx = 32; blocksy = 16; gridsx = (int) ceil(sqrt(ncon_rep) / blocksx + 1.0); gridsy = (int) ceil(sqrt(ncon_rep) / blocksy + 1.0); } dim3 threads_rep(blocksx, blocksy, 1); dim3 grid_rep(gridsx, gridsy, 1); //</editor-fold> //<editor-fold defaultstate="collapsed" desc="Update kernels"> update_neighbor_list_att_kernel <<<grid_att, threads_att >>>(dev_is_list_att, boxl, ncon_att, dev_lj_nat_pdb_dist, dev_unc_pos, dev_idx_bead_lj_nat); update_neighbor_list_rep_kernel <<<grid_rep, threads_rep >>>(dev_is_list_rep, boxl, blocksx*gridsx, blocksy*gridsy, ncon_rep, dev_unc_pos, dev_idx_bead_lj_non_nat); cudaThreadSynchronize(); cutilCheckMsg("update_neighbor_list_rep_kernel failed"); //</editor-fold> //Copy needed arrays to the host cutilSafeCall(cudaMemcpy(is_list_att, dev_is_list_att, is_list_att_size, cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(is_list_rep, dev_is_list_rep, is_list_rep_size, cudaMemcpyDeviceToHost)); // should be native distance for (int i=0; i<ncon_att; i++) { if (is_list_att[i] == 0) { // add to interaction neighbor list idx_neighbor_list_att[nnl_att] = idx_bead_lj_nat[i]; nl_lj_nat_pdb_dist[nnl_att] = lj_nat_pdb_dist[i]; nnl_att++; } } for (int i=0; i<ncon_rep; i++) { if (is_list_rep[i] == 0) { // add to interaction neighbor list idx_neighbor_list_rep[nnl_rep] = idx_bead_lj_non_nat[i]; nnl_rep++; } } //Copy updated values back to the GPU cutilSafeCall(cudaMemcpy(dev_idx_neighbor_list_att, idx_neighbor_list_att, /*idx_neighbor_list_att_size**/ nnl_att * sizeof(ushort2), cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(dev_nl_lj_nat_pdb_dist, nl_lj_nat_pdb_dist, /*nl_lj_nat_pdb_dist_size**/ nnl_att * sizeof(PDB_FLOAT), cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(dev_idx_neighbor_list_rep, idx_neighbor_list_rep, /*idx_neighbor_list_rep_size**/ nnl_rep * sizeof(ushort2), cudaMemcpyHostToDevice)); if (nnl_rep == 0) { cerr << "Neighbor List is EMPTY!!" << endl; exit(-1); } } #else //Update the neighbor list using ONLY the CPU. This is included for timing //and comparison purposes only. void update_neighbor_list() { FLOAT dx, dy, dz; FLOAT d2; int ibead, jbead, itype, jtype; FLOAT rcut, rcut2; nnl_att = 0; nnl_rep = 0; //Copy the needed data to the CPU from the GPU. The unc_pos array will need //to be copied, but the other arrays that are read from in NL calculations //are static/global arrays cutilSafeCall(cudaMemcpy(unc_pos, dev_unc_pos, unc_pos_size, cudaMemcpyDeviceToHost)); for (int i=0; i<ncon_att; i++) { ibead = GET_IDX(idx_bead_lj_nat[i].x) - 1; jbead = GET_IDX(idx_bead_lj_nat[i].y) - 1; itype = GET_TYPE(idx_bead_lj_nat[i].x); jtype = GET_TYPE(idx_bead_lj_nat[i].y); dx = unc_pos[jbead].x - unc_pos[ibead].x; dy = unc_pos[jbead].y - unc_pos[ibead].y; dz = unc_pos[jbead].z - unc_pos[ibead].z; dx -= boxl*rnd(dx/boxl); dy -= boxl*rnd(dy/boxl); dz -= boxl*rnd(dz/boxl); d2 = dx*dx+dy*dy+dz*dz; rcut = 3.2*lj_nat_pdb_dist[i]; rcut2 = rcut*rcut; if (d2 < rcut2) { // add to neighbor list // ibead_neighbor_list_att[nnl_att] = ibead + 1; // jbead_neighbor_list_att[nnl_att] = jbead + 1; // itype_neighbor_list_att[nnl_att] = itype; // jtype_neighbor_list_att[nnl_att] = jtype; idx_neighbor_list_att[nnl_att] = idx_bead_lj_nat[i]; nl_lj_nat_pdb_dist[nnl_att] = lj_nat_pdb_dist[i]; // nl_lj_nat_pdb_dist2[nnl_att] = lj_nat_pdb_dist2[i]; // nl_lj_nat_pdb_dist6[nnl_att] = lj_nat_pdb_dist6[i]; // nl_lj_nat_pdb_dist12[nnl_att] = lj_nat_pdb_dist12[i]; nnl_att++; } } for (int i=0; i<ncon_rep; i++) { ibead = GET_IDX(idx_bead_lj_non_nat[i].x) - 1; jbead = GET_IDX(idx_bead_lj_non_nat[i].y) - 1; itype = GET_TYPE(idx_bead_lj_non_nat[i].x); jtype = GET_TYPE(idx_bead_lj_non_nat[i].y); dx = unc_pos[jbead].x - unc_pos[ibead].x; dy = unc_pos[jbead].y - unc_pos[ibead].y; dz = unc_pos[jbead].z - unc_pos[ibead].z; dx -= boxl*rnd(dx/boxl); dy -= boxl*rnd(dy/boxl); dz -= boxl*rnd(dz/boxl); d2 = dx*dx+dy*dy+dz*dz; rcut = 3.2*sigma_rep[itype][jtype]; rcut2 = rcut*rcut; if (d2 < rcut2) { // add to neighbor list // ibead_neighbor_list_rep[nnl_rep] = ibead + 1; // jbead_neighbor_list_rep[nnl_rep] = jbead + 1; // itype_neighbor_list_rep[nnl_rep] = itype; // jtype_neighbor_list_rep[nnl_rep] = jtype; idx_neighbor_list_rep[nnl_rep] = idx_bead_lj_non_nat[i]; nnl_rep++; } } //Write the modified arrays back to the GPU cutilSafeCall(cudaMemcpy(dev_idx_neighbor_list_att, idx_neighbor_list_att, /*idx_neighbor_list_att_size**/ nnl_att * sizeof(ushort2), cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(dev_nl_lj_nat_pdb_dist, nl_lj_nat_pdb_dist, /*nl_lj_nat_pdb_dist_size**/ nnl_att * sizeof(PDB_FLOAT), cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(dev_idx_neighbor_list_rep, idx_neighbor_list_rep, /*idx_neighbor_list_rep_size**/ nnl_rep * sizeof(ushort2), cudaMemcpyHostToDevice)); } #endif #endif #ifdef USE_GPU_NL_PL //Update the pair list. This function involves using kernels to denote //which interactions should be added to the pair list and then uses CUDPP //sort and scan functionality to transfer the interactions to the pair list. void update_pair_list() { nil_att = 0; nil_rep = 0; //<editor-fold defaultstate="collapsed" desc="Execution params"> // setup execution parameters dim3 threads_att(BLOCK_DIM, 1, 1); dim3 grid_att((int) ceil(((float) nnl_att) / (float) threads_att.x), 1, 1); dim3 threads_rep(BLOCK_DIM, 1, 1); dim3 grid_rep((int) ceil(((float) nnl_rep) / (float) threads_rep.x), 1, 1); //</editor-fold> //<editor-fold defaultstate="collapsed" desc="Kernels"> //Call the kernels that determine which interactions should be added to the //attractive and repulsive neighbor lists. The entries of the //dev_idx_neighbor_list_att represent the attractive interactions in the //neigbhor list andcorrespond to the entries of the dev_is_list_att array //which will denote whether or not a given interaction should be added to the //pair list. Similarly, the dev_idx_neighbor_list_rep array represents the //repulsive interactions in the neighbor list and correspond to the entries of //the dev_is_list_rep array which will denote whether or not a given //interaction should be added to the pair list update_pair_list_att_kernel <<<grid_att, threads_att >>>(dev_is_list_att, boxl, nnl_att, dev_nl_lj_nat_pdb_dist, dev_unc_pos, dev_idx_neighbor_list_att); update_pair_list_rep_kernel <<<grid_rep, threads_rep>>>(dev_is_list_rep, boxl, nnl_rep, dev_unc_pos, dev_idx_neighbor_list_rep); cudaThreadSynchronize(); cutilCheckMsg("Kernel execution failed"); //</editor-fold> //<editor-fold defaultstate="collapsed" desc="CUDPP Att code"> //The following code uses CUDPP to create the pair list for the attractive //interactions and calculate how many attractive entries there are in //the pair list. //Obtain a copy of dev_is_list_att for use with pdb. This is //necessary because both the pdb array and the idx array must be sorted in //the same manner. When the is_list_att array is sorted the first time, //the order is lost. Obtaining a copy allows the pdb array to be sorted //in an identical way to the idx array, insuring that the corresponding //values are in identical positions in the arrays. cutilSafeCall(cudaMemcpy(dev_is_nl_2, dev_is_list_att, is_list_att_size, cudaMemcpyDeviceToDevice)); //Re-use the space allocated for the neighbor list for the pair list. The //entries of the neighbor list will still be in the first nnl_att entries //and the entries of the pair list will be in the first nil_att entries. dev_idx_pair_list_att = dev_idx_neighbor_list_att; //Sort the idx_pair_list_att array based on the information in //the is_list_att array. The entries that are in the pair list //will be in the first portion of the array and those that are not will be //in the last portion result = cudppSort(sort_plan, dev_is_list_att, dev_idx_pair_list_att, 1, nnl_att); if (CUDPP_SUCCESS != result) { printf("Error calling cppSort(sort_plan_att) 1\n"); exit(-1); } err = cudaGetLastError(); cutilSafeCall(err); //Re-use the space allocated for dev_nl_lj_nat_pdb_dist for the //dev_pl_lj_nat_pdb_dist array. The entries of the neighbor list will still //be in the first nnl_att entries and the entries of the pair list will be in //the first nil_att entries. dev_pl_lj_nat_pdb_dist = dev_nl_lj_nat_pdb_dist; //Sort the dev_pl_lj_nat_pdb_dist array based on the information in the copy //of is_list_att array. The entries corresponding to the interactions in the //pair list will be in the first portion of the array and those that are not //will be in the last portion result = cudppSort(sort_plan, dev_is_nl_2, dev_pl_lj_nat_pdb_dist, 1, nnl_att); if (CUDPP_SUCCESS != result) { printf("Error calling cppSort(sort_plan_att) 2\n"); exit(-1); } err = cudaGetLastError(); cutilSafeCall(err); //Perform the parallel scan of the is_list_att array, counting the number //of 1's that appear. This number corresponds to the number of interactions //that are NOT in the neighbor list. The is_list_att array will be untouched //and the result of the scan will be stored in dev_is_nl_scan_att result = cudppScan(scan_plan, dev_is_nl_scan_att, dev_is_list_att, nnl_att); if (CUDPP_SUCCESS != result) { printf("Error scanning att\n"); exit(-1); } err = cudaGetLastError(); cutilSafeCall(err); //Temporary storage for the result of the scan unsigned int *num; num = new unsigned int[1]; //Copy the last entry of dev_is_nl_scan_att, corresponding to the total sum //of 1's in is_list_att to the host variable "num" cutilSafeCall(cudaMemcpy(num, &(dev_is_nl_scan_att[nnl_att - 1]), sizeof (unsigned int), cudaMemcpyDeviceToHost)); err = cudaGetLastError(); cutilSafeCall(err); //The total number of attractive entries in the neighbor list is equal to //the total number of attractive interactions (ncon_att) minus the number //of attractive entries NOT in the neighbor list (num) nil_att = nnl_att - *num; //</editor-fold> //<editor-fold defaultstate="collapsed" desc="CUDPP Rep code"> //The following code uses CUDPP to create the pair list for the repulsive //interactions and calculate how many repulsive entries there are in //the pair list. //Reuse the neighbor list array for the pair list dev_idx_pair_list_rep = dev_idx_neighbor_list_rep; //Sort the idx_pair_list_rep array based on the information in //the is_list_rep array. The entries that are in the pair list //will be in the first portion of the array and those that are not will be //in the last portion result = cudppSort(sort_plan, dev_is_list_rep, dev_idx_pair_list_rep, 1, nnl_rep); if (CUDPP_SUCCESS != result) { printf("Error calling cppSort(sort_plan_rep) 1\n"); exit(-1); } err = cudaGetLastError(); cutilSafeCall(err); //Perform the parallel scan of the is_list_rep array, counting the number //of 1's that appear. This number corresponds to the number of interactions //that are NOT in the pair list. The is_list_rep array will be //untouched and the result of the scan will be stored in dev_is_nl_scan_rep result = cudppScan(scan_plan, dev_is_nl_scan_rep, dev_is_list_rep, nnl_rep); err = cudaGetLastError(); cutilSafeCall(err); if (CUDPP_SUCCESS != result) { printf("Error scanning rep\n"); exit(-1); } //Copy the last entry of dev_is_nl_scan_rep, corresponding to the total sum //of 1's in is_list_rep to the host variable "num" cutilSafeCall(cudaMemcpy(num, &(dev_is_nl_scan_rep[nnl_rep - 1]), sizeof (unsigned int), cudaMemcpyDeviceToHost)); //The total number of repulsive entries in the pair list is equal to //the total number of repulsive interactions in the neighbor list(nnl_rep) //minus the number of repulsive entries NOT in the pair list (num) nil_rep = nnl_rep - *num; //</editor-fold> free(num); }//end update_pair_list #else #ifdef USE_GPU_NL_PL_NAIVE //Update the pair list WITHOUT using CUDPP. This uses parallel kernels to //determine which interactions should be added to the pair list and then //adds them to the pair list sequentially on the CPU. This is included for //timing and comparison purposes only. void update_neighbor_list() void update_pair_list() { using namespace std; nil_att = 0; nil_rep = 0; //<editor-fold defaultstate="collapsed" desc="Execution params"> // setup execution parameters dim3 threads_att(BLOCK_DIM, 1, 1); dim3 grid_att((int) ceil(((float) nnl_att) / (float) threads_att.x), 1, 1); dim3 threads_rep(BLOCK_DIM, 1, 1); dim3 grid_rep((int) ceil(((float) nnl_rep) / (float) threads_rep.x), 1, 1); //</editor-fold> //<editor-fold defaultstate="collapsed" desc="Kernels"> update_pair_list_att_kernel <<<grid_att, threads_att >>>(dev_is_list_att, boxl, nnl_att, dev_nl_lj_nat_pdb_dist, dev_unc_pos, dev_idx_neighbor_list_att); update_pair_list_rep_kernel <<<grid_rep, threads_rep>>>(dev_is_list_rep, boxl, nnl_rep, dev_unc_pos, dev_idx_neighbor_list_rep); cudaThreadSynchronize(); cutilCheckMsg("Kernel execution failed"); //</editor-fold> //Copy needed values to the CPU cutilSafeCall(cudaMemcpy(is_list_att, dev_is_list_att, nnl_att * sizeof(unsigned int) /*is_list_att_size**/, cudaMemcpyDeviceToHost)); //Might still be up to date from neighbor list update // cutilSafeCall(cudaMemcpy(idx_neighbor_list_att, dev_idx_neighbor_list_att, idx_neighbor_list_att_size, cudaMemcpyDeviceToHost)); // cutilSafeCall(cudaMemcpy(idx_pair_list_att, dev_idx_pair_list_att, idx_pair_list_att_size, cudaMemcpyDeviceToHost)); //Might still be up to date from neighbor list update // cutilSafeCall(cudaMemcpy(nl_lj_nat_pdb_dist, dev_nl_lj_nat_pdb_dist, nl_lj_nat_pdb_dist_size, cudaMemcpyDeviceToHost)); // cutilSafeCall(cudaMemcpy(pl_lj_nat_pdb_dist, dev_pl_lj_nat_pdb_dist, pl_lj_nat_pdb_dist_size, cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(is_list_rep, dev_is_list_rep, nnl_rep * sizeof(unsigned int) /*is_list_rep_size**/, cudaMemcpyDeviceToHost)); //Might still be up to date from neighbor list update // cutilSafeCall(cudaMemcpy(idx_neighbor_list_rep, dev_idx_neighbor_list_rep, idx_neighbor_list_rep_size, cudaMemcpyDeviceToHost)); // should be native distance for (int i=0; i<nnl_att; i++) { if (is_list_att[i] == 0) { // add to interaction pair list // idx_pair_list_att[nil_att].x = idx_neighbor_list_att[i].x; // idx_pair_list_att[nil_att].y = idx_neighbor_list_att[i].y; // idx_pair_list_att[nil_att].z = idx_neighbor_list_att[i].z; // idx_pair_list_att[nil_att].w = idx_neighbor_list_att[i].w; idx_pair_list_att[nil_att] = idx_neighbor_list_att[i]; pl_lj_nat_pdb_dist[nil_att] = nl_lj_nat_pdb_dist[i]; nil_att++; } } for (int i=0; i<nnl_rep; i++) { if (is_list_rep[i] == 0) { // add to interaction pair list // idx_pair_list_rep[nil_rep].x = idx_neighbor_list_rep[i].x; // idx_pair_list_rep[nil_rep].y = idx_neighbor_list_rep[i].y; // idx_pair_list_rep[nil_rep].z = idx_neighbor_list_rep[i].z; // idx_pair_list_rep[nil_rep].w = idx_neighbor_list_rep[i].w; idx_pair_list_rep[nil_rep] = idx_neighbor_list_rep[i]; nil_rep++; } } //Copy updated values back to the GPU cutilSafeCall(cudaMemcpy(dev_idx_neighbor_list_att, idx_pair_list_att, nil_att * sizeof(ushort2) /*idx_pair_list_att_size**/, cudaMemcpyHostToDevice)); // cutilSafeCall(cudaMemcpy(dev_idx_pair_list_att, idx_pair_list_att, nil_att * sizeof(ushort2) /*idx_pair_list_att_size**/, cudaMemcpyHostToDevice)); // cutilSafeCall(cudaMemcpy(dev_pl_lj_nat_pdb_dist, pl_lj_nat_pdb_dist, nil_att * sizeof(PDB_FLOAT) /*pl_lj_nat_pdb_dist_size**/, cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(dev_nl_lj_nat_pdb_dist, pl_lj_nat_pdb_dist, nil_att * sizeof(PDB_FLOAT) /*pl_lj_nat_pdb_dist_size**/, cudaMemcpyHostToDevice)); // cutilSafeCall(cudaMemcpy(dev_idx_pair_list_rep, idx_pair_list_rep, nil_rep * sizeof(ushort2) /*idx_pair_list_rep_size**/, cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(dev_idx_neighbor_list_rep, idx_pair_list_rep, nil_rep * sizeof(ushort2) /*idx_pair_list_rep_size**/, cudaMemcpyHostToDevice)); dev_idx_pair_list_att = dev_idx_neighbor_list_att; dev_pl_lj_nat_pdb_dist = dev_nl_lj_nat_pdb_dist; dev_idx_pair_list_rep = dev_idx_neighbor_list_rep; } #else //Update the pair list using ONLY the CPU. This is included for timing //and comparison purposes only. void update_pair_list() { using namespace std; // declare host variables FLOAT dx, dy, dz; FLOAT d2; unsigned int ibead, jbead, itype, jtype; FLOAT rcut, rcut2; nil_att = 0; nil_rep = 0; //Copy needed arrays to the CPU from the GPU cutilSafeCall(cudaMemcpy(idx_neighbor_list_att, dev_idx_neighbor_list_att, /*idx_neighbor_list_att_size**/ nnl_att * sizeof(ushort2), cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(unc_pos, dev_unc_pos, unc_pos_size, cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(nl_lj_nat_pdb_dist, dev_nl_lj_nat_pdb_dist, /*nl_lj_nat_pdb_dist_size**/ nnl_att * sizeof(PDB_FLOAT), cudaMemcpyDeviceToHost)); cutilSafeCall(cudaMemcpy(idx_neighbor_list_rep, dev_idx_neighbor_list_rep, /*idx_neighbor_list_rep_size**/ nnl_rep * sizeof(ushort2), cudaMemcpyDeviceToHost)); // should be native distance for (int i=0; i<nnl_att; i++) { // ibead = ibead_neighbor_list_att[i] - 1; // jbead = jbead_neighbor_list_att[i] - 1; // itype = itype_neighbor_list_att[i]; // jtype = jtype_neighbor_list_att[i]; ibead = GET_IDX(idx_neighbor_list_att[i].x) - 1; jbead = GET_IDX(idx_neighbor_list_att[i].y) - 1; itype = GET_TYPE(idx_neighbor_list_att[i].x); jtype = GET_TYPE(idx_neighbor_list_att[i].y); dx = unc_pos[jbead].x - unc_pos[ibead].x; dy = unc_pos[jbead].y - unc_pos[ibead].y; dz = unc_pos[jbead].z - unc_pos[ibead].z; dx -= boxl*rnd(dx/boxl); dy -= boxl*rnd(dy/boxl); dz -= boxl*rnd(dz/boxl); d2 = dx*dx+dy*dy+dz*dz; rcut = 2.5*nl_lj_nat_pdb_dist[i]; rcut2 = rcut*rcut; if (d2 < rcut2) { // add to interaction pair list // ibead_pair_list_att[nil_att] = ibead + 1; // jbead_pair_list_att[nil_att] = jbead + 1; // itype_pair_list_att[nil_att] = itype; // jtype_pair_list_att[nil_att] = jtype; idx_pair_list_att[nil_att] = idx_neighbor_list_att[i]; pl_lj_nat_pdb_dist[nil_att] = nl_lj_nat_pdb_dist[i]; // pl_lj_nat_pdb_dist2[nil_att] = nl_lj_nat_pdb_dist2[i]; // pl_lj_nat_pdb_dist6[nil_att] = nl_lj_nat_pdb_dist6[i]; // pl_lj_nat_pdb_dist12[nil_att] = nl_lj_nat_pdb_dist12[i]; nil_att++; } } for (int i=0; i<nnl_rep; i++) { // ibead = ibead_neighbor_list_rep[i] - 1; // jbead = jbead_neighbor_list_rep[i] - 1; // itype = itype_neighbor_list_rep[i]; // jtype = jtype_neighbor_list_rep[i]; ibead = GET_IDX(idx_neighbor_list_rep[i].x) - 1; jbead = GET_IDX(idx_neighbor_list_rep[i].y) - 1; itype = GET_TYPE(idx_neighbor_list_rep[i].x); jtype = GET_TYPE(idx_neighbor_list_rep[i].y); dx = unc_pos[jbead].x - unc_pos[ibead].x; dy = unc_pos[jbead].y - unc_pos[ibead].y; dz = unc_pos[jbead].z - unc_pos[ibead].z; dx -= boxl*rnd(dx/boxl); dy -= boxl*rnd(dy/boxl); dz -= boxl*rnd(dz/boxl); d2 = dx*dx+dy*dy+dz*dz; rcut = 2.5*sigma_rep[itype][jtype]; rcut2 = rcut*rcut; if (d2 < rcut2) { // add to interaction pair list // ibead_pair_list_rep[nil_rep] = ibead + 1; // jbead_pair_list_rep[nil_rep] = jbead + 1; // itype_pair_list_rep[nil_rep] = itype; // jtype_pair_list_rep[nil_rep] = jtype; idx_pair_list_rep[nil_rep] = idx_neighbor_list_rep[i]; nil_rep++; } } //Copy updated values back to the GPU cutilSafeCall(cudaMemcpy(dev_idx_neighbor_list_att, idx_pair_list_att, nil_att * sizeof(ushort2) /*idx_pair_list_att_size**/, cudaMemcpyHostToDevice)); // cutilSafeCall(cudaMemcpy(dev_idx_pair_list_att, idx_pair_list_att, nil_att * sizeof(ushort2) /*idx_pair_list_att_size**/, cudaMemcpyHostToDevice)); // cutilSafeCall(cudaMemcpy(dev_pl_lj_nat_pdb_dist, pl_lj_nat_pdb_dist, nil_att * sizeof(PDB_FLOAT) /*pl_lj_nat_pdb_dist_size**/, cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(dev_nl_lj_nat_pdb_dist, pl_lj_nat_pdb_dist, nil_att * sizeof(PDB_FLOAT) /*pl_lj_nat_pdb_dist_size**/, cudaMemcpyHostToDevice)); // cutilSafeCall(cudaMemcpy(dev_idx_pair_list_rep, idx_pair_list_rep, nil_rep * sizeof(ushort2) /*idx_pair_list_rep_size**/, cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(dev_idx_neighbor_list_rep, idx_pair_list_rep, nil_rep * sizeof(ushort2) /*idx_pair_list_rep_size**/, cudaMemcpyHostToDevice)); dev_idx_pair_list_att = dev_idx_neighbor_list_att; dev_pl_lj_nat_pdb_dist = dev_nl_lj_nat_pdb_dist; dev_idx_pair_list_rep = dev_idx_neighbor_list_rep; } #endif #endif
b754a06b5de6cff75891af8a66a5d71e3faf85bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <memory> __global__ void square1(float* out, float* in) { int index = blockDim.x * blockIdx.x + threadIdx.x; float f = in[index]; out[index] = f * f; } int main() { const int N = 1024; std::unique_ptr<float[]> h_in(new float[N]); std::unique_ptr<float[]> h_out(new float[N]); for(int i = 0; i < N; ++i) h_in[i] = i; float *d_in = NULL; float *d_out = NULL; hipMalloc(&d_in, N * sizeof(float)); hipMalloc(&d_out, N * sizeof(float)); hipMemcpy(d_in, h_in.get() , N * sizeof(float), hipMemcpyHostToDevice); const int NUM_BLOCKS = 4; hipLaunchKernelGGL(( square1), dim3(NUM_BLOCKS), dim3(N/NUM_BLOCKS), 0, 0, d_out, d_in); hipDeviceSynchronize(); hipMemcpy(h_out.get(), d_out, N * sizeof(float), hipMemcpyDeviceToHost); for(int i = 0; i < N; ++i) { std::cout << h_out[i]; if(i && i%4 == 0) std::cout << std::endl; else std::cout << " "; } hipFree(d_in); hipFree(d_out); return 0; }
b754a06b5de6cff75891af8a66a5d71e3faf85bf.cu
#include <iostream> #include <memory> __global__ void square1(float* out, float* in) { int index = blockDim.x * blockIdx.x + threadIdx.x; float f = in[index]; out[index] = f * f; } int main() { const int N = 1024; std::unique_ptr<float[]> h_in(new float[N]); std::unique_ptr<float[]> h_out(new float[N]); for(int i = 0; i < N; ++i) h_in[i] = i; float *d_in = NULL; float *d_out = NULL; cudaMalloc(&d_in, N * sizeof(float)); cudaMalloc(&d_out, N * sizeof(float)); cudaMemcpy(d_in, h_in.get() , N * sizeof(float), cudaMemcpyHostToDevice); const int NUM_BLOCKS = 4; square1<<<NUM_BLOCKS, N/NUM_BLOCKS>>>(d_out, d_in); cudaDeviceSynchronize(); cudaMemcpy(h_out.get(), d_out, N * sizeof(float), cudaMemcpyDeviceToHost); for(int i = 0; i < N; ++i) { std::cout << h_out[i]; if(i && i%4 == 0) std::cout << std::endl; else std::cout << " "; } cudaFree(d_in); cudaFree(d_out); return 0; }
b35eeb723a788c15a0903d9cd696fe8fe7577662.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define B0 0.675603595979828813 #define B1 -0.175603595979828813 #define D0 1.35120719195965763 #define D1 -1.70241438391931525 /* function prototypes */ void force(long, long, long, double *, double *, double *, double *,double *, double *, float *, float *, double *, double *); __global__ void step_type1(long , double, double, double *, double *, double *); __global__ void step_type2(long , double, double *, double *, double *); void timestep(long n, long nblock, long nthread, double dt, double *mx, double *my, double *magx, double *magy, double *magx_gpu, double *magy_gpu, double *r_gpu, double *p_gpu, double *f_gpu, float *sinr_gpu, float *cosr_gpu) { double bb0,bb1,dd0,dd1; bb0=B0*dt; bb1=B1*dt; dd0=D0*dt; dd1=D1*dt; hipLaunchKernelGGL(( step_type1), dim3(nblock),dim3(nthread), 0, 0, n,bb0,dd0,r_gpu,p_gpu,f_gpu); force(n,nblock,nthread,mx,my,magx,magy,r_gpu,f_gpu,sinr_gpu,cosr_gpu,magx_gpu,magy_gpu); hipLaunchKernelGGL(( step_type1), dim3(nblock),dim3(nthread), 0, 0, n,bb1,dd1,r_gpu,p_gpu,f_gpu); force(n,nblock,nthread,mx,my,magx,magy,r_gpu,f_gpu,sinr_gpu,cosr_gpu,magx_gpu,magy_gpu); hipLaunchKernelGGL(( step_type1), dim3(nblock),dim3(nthread), 0, 0, n,bb1,dd0,r_gpu,p_gpu,f_gpu); force(n,nblock,nthread,mx,my,magx,magy,r_gpu,f_gpu,sinr_gpu,cosr_gpu,magx_gpu,magy_gpu); hipLaunchKernelGGL(( step_type2), dim3(nblock),dim3(nthread), 0, 0, n,bb0,r_gpu,p_gpu,f_gpu); return; }
b35eeb723a788c15a0903d9cd696fe8fe7577662.cu
#include <stdio.h> #define B0 0.675603595979828813 #define B1 -0.175603595979828813 #define D0 1.35120719195965763 #define D1 -1.70241438391931525 /* function prototypes */ void force(long, long, long, double *, double *, double *, double *,double *, double *, float *, float *, double *, double *); __global__ void step_type1(long , double, double, double *, double *, double *); __global__ void step_type2(long , double, double *, double *, double *); void timestep(long n, long nblock, long nthread, double dt, double *mx, double *my, double *magx, double *magy, double *magx_gpu, double *magy_gpu, double *r_gpu, double *p_gpu, double *f_gpu, float *sinr_gpu, float *cosr_gpu) { double bb0,bb1,dd0,dd1; bb0=B0*dt; bb1=B1*dt; dd0=D0*dt; dd1=D1*dt; step_type1<<<nblock,nthread>>>(n,bb0,dd0,r_gpu,p_gpu,f_gpu); force(n,nblock,nthread,mx,my,magx,magy,r_gpu,f_gpu,sinr_gpu,cosr_gpu,magx_gpu,magy_gpu); step_type1<<<nblock,nthread>>>(n,bb1,dd1,r_gpu,p_gpu,f_gpu); force(n,nblock,nthread,mx,my,magx,magy,r_gpu,f_gpu,sinr_gpu,cosr_gpu,magx_gpu,magy_gpu); step_type1<<<nblock,nthread>>>(n,bb1,dd0,r_gpu,p_gpu,f_gpu); force(n,nblock,nthread,mx,my,magx,magy,r_gpu,f_gpu,sinr_gpu,cosr_gpu,magx_gpu,magy_gpu); step_type2<<<nblock,nthread>>>(n,bb0,r_gpu,p_gpu,f_gpu); return; }
89085bf0eccf7d9209e03b60ed2a4d1318f150fa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "add.h" /* This is the function that each thread will execute on the GPU. The fact that it executes on the device is indicated by the __global__ modifier in front of the return type of the function. After that, the signature of the function isn't special - in particular, the pointers we pass in should point to memory on the device, but this is not indicated by the function's signature. */ __global__ void add(int *a, int *b, int *c) { /* Each thread knows its identity in the system. This identity is made available in code via indices blockIdx and threadIdx. We write blockIdx.x because block indices are multidimensional. In this case, we have linear arrays of data, so we only need one dimension. If this doesn't make sense, don't worry - the important thing is that the first step in the function is converting the thread's indentity into an index into the data. */ int thread_id = blockIdx.x * blockDim.x + threadIdx.x; /* We make sure that the thread_id isn't too large, and then we assign c = a + b using the index we calculated above. The big picture is that each thread is responsible for adding one element from a and one element from b. Each thread is able to run in parallel, so we get speedup. */ if (thread_id < N) { c[thread_id] = a[thread_id] + b[thread_id]; } } __global__ void mult(int size,int** a, int** b, int** c) { int stride_x = blockDim.x * gridDim.x; int stride_y = blockDim.y * gridDim.y; int x, y; /*for(int id_x = blockIdx.x * blockDim.x + threadIdx.x; id_x < size; id_x += stride_x){ for(int id_y = blockIdx.y * blockDim.y + threadIdx.y; id_y < size; id_y += stride_y){ c[id_x][id_y] = a[id_x][id_y] * b[id_y][id_x]; } }*/ for(int j = (blockIdx.y * blockDim.y + threadIdx.y) * blockDim.x * gridDim.x + (blockIdx.x * blockDim.x + threadIdx.x); j < size*size; j += stride_x * stride_y){ x = j/size; y = j%size; c[x][y] = a[x][y] * b[y][x]; } }
89085bf0eccf7d9209e03b60ed2a4d1318f150fa.cu
#include "add.h" /* This is the function that each thread will execute on the GPU. The fact that it executes on the device is indicated by the __global__ modifier in front of the return type of the function. After that, the signature of the function isn't special - in particular, the pointers we pass in should point to memory on the device, but this is not indicated by the function's signature. */ __global__ void add(int *a, int *b, int *c) { /* Each thread knows its identity in the system. This identity is made available in code via indices blockIdx and threadIdx. We write blockIdx.x because block indices are multidimensional. In this case, we have linear arrays of data, so we only need one dimension. If this doesn't make sense, don't worry - the important thing is that the first step in the function is converting the thread's indentity into an index into the data. */ int thread_id = blockIdx.x * blockDim.x + threadIdx.x; /* We make sure that the thread_id isn't too large, and then we assign c = a + b using the index we calculated above. The big picture is that each thread is responsible for adding one element from a and one element from b. Each thread is able to run in parallel, so we get speedup. */ if (thread_id < N) { c[thread_id] = a[thread_id] + b[thread_id]; } } __global__ void mult(int size,int** a, int** b, int** c) { int stride_x = blockDim.x * gridDim.x; int stride_y = blockDim.y * gridDim.y; int x, y; /*for(int id_x = blockIdx.x * blockDim.x + threadIdx.x; id_x < size; id_x += stride_x){ for(int id_y = blockIdx.y * blockDim.y + threadIdx.y; id_y < size; id_y += stride_y){ c[id_x][id_y] = a[id_x][id_y] * b[id_y][id_x]; } }*/ for(int j = (blockIdx.y * blockDim.y + threadIdx.y) * blockDim.x * gridDim.x + (blockIdx.x * blockDim.x + threadIdx.x); j < size*size; j += stride_x * stride_y){ x = j/size; y = j%size; c[x][y] = a[x][y] * b[y][x]; } }
f71da1e44c8f4b5ca6c22aa044dcefef60506620.hip
// !!! This is a file automatically generated by hipify!!! /* * * * Created on: 27.6.2011 * Author: Teemu Rantalaiho ([email protected]) * * * Copyright 2011 Teemu Rantalaiho * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * * Compilation instructions: * * nvcc -O4 -arch=<your_arch> -I../ test_sum_rows.cu -o test_sum_rows * * thrust codepath (-DTHRUST) not up to date -- do not use! * */ // 50 million inputs #define NROWS 50 #define NCOLUMNS (1000 * 1000) #define TESTMAXIDX NROWS // 16 keys / indices //#define TEST_IS_POW2 0 #define TEST_SIZE (NROWS * NCOLUMNS) // 10 million inputs #define NRUNS 1000 // Repeat 1000 times => 10 Gigainputs in total #define START_INDEX 0 #define NSTRESS_RUNS NRUNS #ifdef THRUST #define ENABLE_THRUST 1 // Enable thrust-based version also (xform-sort_by_key-reduce_by_key) #else #define ENABLE_THRUST 0 // Disable thrust-based version also (xform-sort_by_key-reduce_by_key) #endif #define USE_MULTIREDUCE_FASTPATH 0 #include "cuda_histogram.h" #if ENABLE_THRUST #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/sort.h> #endif #include <assert.h> #include <stdio.h> // Always return 1 -> normal histogram - each sample has same weight struct test_xform2 { __host__ __device__ void operator() (float* input, int i, int* result_index, float* results, int nresults) const { *result_index = i % NROWS;/*(i & (TESTMAXIDX - 1))*/ ; *results = input[i]; } }; struct test_sumfun2 { __device__ __host__ float operator() (float res1, float res2) const{ return res1 + res2; } }; static void printres (float* res, int nres, const char* descr) { if (descr) printf("\n%s:\n", descr); printf("vals = [ "); for (int i = 0; i < nres; i++) printf("(%4f), ", res[i]); printf("]\n"); } static void testHistogramParam(float* INPUT, float* hostINPUT, int index_0, int index_1, bool print, bool cpurun, bool stress) { int nIndex = TESTMAXIDX; int srun; int nruns = stress ? NSTRESS_RUNS : 1; test_sumfun2 sumFun; test_xform2 transformFun; //test_indexfun2 indexFun; float* tmpres = (float*)malloc(sizeof(float) * nIndex); float* cpures = stress ? (float*)malloc(sizeof(float) * nIndex) : tmpres; float zero = 0; for (srun = 0; srun < nruns; srun++) { { //int* tmpidx = (int*)malloc(sizeof(int) * nIndex); if (print) printf("\nTest reduce_by_key:\n\n"); memset(tmpres, 0, sizeof(float) * nIndex); if (stress) memset(cpures, 0, sizeof(float) * nIndex); if (cpurun || stress) for (int i = index_0; i < index_1; i++) { int index; float tmp; transformFun(hostINPUT, i, &index, &tmp, 1); //index = indexFun(INPUT, i); cpures[index] = sumFun(cpures[index], tmp); //printf("i = %d, out_index = %d, out_val = (%.3f, %.3f) \n",i, index, tmp.real, tmp.imag); } if (print && cpurun) { printres(cpures, nIndex, "CPU results:"); } } if (!cpurun) { #if USE_MULTIREDUCE_FASTPATH callMultiReduce(NCOLUMNS, NROWS, tmpres, INPUT, sumFun, zero); #else callHistogramKernel<histogram_atomic_add, 1>(INPUT, transformFun, /*indexFun,*/ sumFun, index_0, index_1, zero, tmpres, nIndex); #endif } if (stress) { int k; for (k = 0; k < nIndex; k++) { if (tmpres[k] != cpures[k] /*|| tmpres[k].imag != cpures[k].imag*/) { printf("Error detected with index-values: i0 = %d, i1 = %d!\n", index_0, index_1); printres(cpures, nIndex, "CPU results:"); printres(tmpres, nIndex, "GPU results:"); } } } if (print && (!cpurun)) { printres(tmpres, nIndex, "GPU results:"); } int size = index_1 - index_0; index_0 += 1; index_1 -= 1; if (index_0 > index_1 + 1) { int tmp = index_0; index_0 = index_1; index_1 = tmp; } if (index_0 < 0 || index_1 < 0) { index_0 = 0; index_1 = size - 1; } } free(tmpres); if (stress) free(cpures); } #if ENABLE_THRUST // NOTE: Take advantage here of the fact that this is the classical histogram with all values = 1 // And also that we know before hand the number of indices coming out static void testHistogramParamThrust(int* INPUT, int index_0, int index_1, bool print) { test_sumfun2 mysumfun; thrust::equal_to<int> binary_pred; int nIndex = TESTMAXIDX; int N = index_1 - index_0; thrust::device_vector<int> keys_out(nIndex); thrust::device_vector<int> vals_out(nIndex); thrust::device_vector<int> h_vals_out(nIndex); //thrust::device_vector<int> keys(N); thrust::device_ptr<int> keys(INPUT); // Sort the data thrust::sort(keys, keys + N); // And reduce by key - histogram complete thrust::reduce_by_key(keys, keys + N, thrust::make_constant_iterator(1), keys_out.begin(), vals_out.begin(), binary_pred, mysumfun); h_vals_out = vals_out; if (print) { printf("\nThrust results:\n"); printf("vals = [ "); for (int i = 0; i < nIndex; i++) { int tmp = h_vals_out[i]; printf("(%d), ", tmp); } printf("]\n"); } } #endif void printUsage(void) { printf("\n"); printf("Test order independent reduce-by-key / histogram algorithm\n\n"); printf("By default this runs on custom algorithm on the GPU, with lots of equal consecutive keys\n\n"); printf("\tOptions:\n\n"); printf("\t\t--cpu\t\t Run on CPU serially instead of GPU\n"); printf("\t\t--print\t\t Print results of algorithm (check validity)\n"); printf("\t\t--thrust\t Run on GPU but using thrust library\n"); printf("\t\t--load\t Use 32-bit texture data s\n"); printf("\t\t--rnd\t Take uniform random keys s\n"); // printf("\t\t--sharp\t Make peaks sharp\n"); // printf("\t\t--nornd\t Remove random noise from input\n"); } static unsigned int* MyTexture_load(char* filename, int* dataSize) { FILE* file = fopen(filename, "rb"); //texture->dataRGBA8888 = NULL; if (!file) { char* tmp = (char*)malloc(strlen(filename) + 10); if (tmp) { char* ptr = tmp; strcpy(ptr, "../"); ptr += 3; strcpy(ptr, filename); file = fopen(tmp, "rb"); } free(tmp); } // Read if (file) { int npixels = 512 * 512;//texture->width * texture->height; int size = npixels * 4; unsigned int* data = (unsigned int*)malloc(size); *dataSize = npixels; if (data) { int i; for (i = 0; i < npixels; i++) { unsigned int r, g, b; unsigned int raw = 0; unsigned int pixel = 0; int rsize = fread(&raw, 3, 1, file); if (rsize != 1) { printf( "Warning: Unexpected EOF in texture %s at idx %d\n", filename, i); break; } r = (raw & 0x00FF0000) >> 16; g = (raw & 0x0000FF00) >> 8; b = (raw & 0x000000FF) >> 0; pixel = 0xFF000000 | (b << 16) | (g << 8) | (r << 0); data[i] = pixel; } } fclose(file); return data; } return NULL; } static inline float getInput(int i, unsigned int* texData, int dataSize, bool rnd) { if (texData) { static int index = i % dataSize; static int round = 0; unsigned int val = texData[index]; int result = 0; float fres; result += ((val >> 16 ) & 0xFF) + round; result += ((val >> 8 ) & 0xFF) + round; result += ((val >> 0 ) & 0xFF) + round; index++; if (index >= dataSize) { index = 0; round += 7; } fres = (float)result / (float)(1 << 24); return fres; } else { if (!rnd) { return 1.0f; } else { static unsigned int current = 0xf1232345; const unsigned int mult = 1664525; const unsigned int add = 1013904223ul; float fres; current = current * mult + add; fres = (float)current / (float)0xFFFFFFFFU; return fres; } } } static void fillInput(float* input, bool load, bool rnd) { int i; unsigned int* texData = NULL; int dataSize = 0; if (load && !rnd) { texData = MyTexture_load("texture.raw", &dataSize); } for (i = 0; i < TEST_SIZE;) { *input++ = getInput(i++, texData, dataSize, rnd); *input++ = getInput(i++, texData, dataSize, rnd); *input++ = getInput(i++, texData, dataSize, rnd); *input++ = getInput(i++, texData, dataSize, rnd); } if (texData) free(texData); } int main (int argc, char** argv) { int i; int index_0 = START_INDEX; int index_1 = index_0 + TEST_SIZE; bool cpu = false; bool print = false; bool thrust = false; bool stress = false; // bool peaks = false; // bool sharp = false; bool rnd = false; bool load = false; printUsage(); for (i = 0; i < argc; i++) { if (argv[i] && strcmp(argv[i], "--cpu") == 0) cpu = true; if (argv[i] && strcmp(argv[i], "--print") == 0) print = true; if (argv[i] && strcmp(argv[i], "--thrust") == 0) thrust = true; if (argv[i] && strcmp(argv[i], "--stress") == 0) stress = true; // if (argv[i] && strcmp(argv[i], "--peaks") == 0) // peaks = true; if (argv[i] && strcmp(argv[i], "--load") == 0) load = true; // if (argv[i] && strcmp(argv[i], "--sharp") == 0) // sharp = true; if (argv[i] && strcmp(argv[i], "--rnd") == 0) rnd = true; } { // Allocate keys: float* INPUT = NULL; float* hostINPUT = (float*)malloc(sizeof(float) * (TEST_SIZE + 3)); assert(hostINPUT); fillInput(hostINPUT, load, rnd); if (!cpu) { hipMalloc(&INPUT, sizeof(float) * TEST_SIZE); assert(INPUT); hipMemcpy(INPUT, hostINPUT, sizeof(float) * TEST_SIZE, hipMemcpyHostToDevice); } // Create events for timing: hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Now start timer - we run on stream 0 (default stream): hipEventRecord(start, 0); for (i = 0; i < NRUNS; i++) { if (thrust) { #if ENABLE_THRUST testHistogramParamThrust(INPUT, index_0, index_1, print); #else printf("\nTest was compiled without thrust support! Find 'ENABLE_THRUST' in source-code!\n\n Exiting...\n"); break; #endif } else { testHistogramParam(INPUT, hostINPUT, index_0, index_1, print, cpu, stress); } print = false; // Run only once all stress-tests if (stress) break; } { float t_ms; hipEventRecord(stop, 0); hipDeviceSynchronize(); hipEventElapsedTime(&t_ms, start, stop); double t = t_ms * 0.001f; double GKps = (((double)TEST_SIZE * (double)NRUNS)) / (t*1.e9); printf("Runtime in loops: %fs, Throughput (Gkeys/s): %3f GK/s \n", t, GKps); } if (INPUT) hipFree(INPUT); if (hostINPUT) free(hostINPUT); hipEventDestroy(start); hipEventDestroy(stop); } return 0; }
f71da1e44c8f4b5ca6c22aa044dcefef60506620.cu
/* * * * Created on: 27.6.2011 * Author: Teemu Rantalaiho ([email protected]) * * * Copyright 2011 Teemu Rantalaiho * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * * Compilation instructions: * * nvcc -O4 -arch=<your_arch> -I../ test_sum_rows.cu -o test_sum_rows * * thrust codepath (-DTHRUST) not up to date -- do not use! * */ // 50 million inputs #define NROWS 50 #define NCOLUMNS (1000 * 1000) #define TESTMAXIDX NROWS // 16 keys / indices //#define TEST_IS_POW2 0 #define TEST_SIZE (NROWS * NCOLUMNS) // 10 million inputs #define NRUNS 1000 // Repeat 1000 times => 10 Gigainputs in total #define START_INDEX 0 #define NSTRESS_RUNS NRUNS #ifdef THRUST #define ENABLE_THRUST 1 // Enable thrust-based version also (xform-sort_by_key-reduce_by_key) #else #define ENABLE_THRUST 0 // Disable thrust-based version also (xform-sort_by_key-reduce_by_key) #endif #define USE_MULTIREDUCE_FASTPATH 0 #include "cuda_histogram.h" #if ENABLE_THRUST #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/sort.h> #endif #include <assert.h> #include <stdio.h> // Always return 1 -> normal histogram - each sample has same weight struct test_xform2 { __host__ __device__ void operator() (float* input, int i, int* result_index, float* results, int nresults) const { *result_index = i % NROWS;/*(i & (TESTMAXIDX - 1))*/ ; *results = input[i]; } }; struct test_sumfun2 { __device__ __host__ float operator() (float res1, float res2) const{ return res1 + res2; } }; static void printres (float* res, int nres, const char* descr) { if (descr) printf("\n%s:\n", descr); printf("vals = [ "); for (int i = 0; i < nres; i++) printf("(%4f), ", res[i]); printf("]\n"); } static void testHistogramParam(float* INPUT, float* hostINPUT, int index_0, int index_1, bool print, bool cpurun, bool stress) { int nIndex = TESTMAXIDX; int srun; int nruns = stress ? NSTRESS_RUNS : 1; test_sumfun2 sumFun; test_xform2 transformFun; //test_indexfun2 indexFun; float* tmpres = (float*)malloc(sizeof(float) * nIndex); float* cpures = stress ? (float*)malloc(sizeof(float) * nIndex) : tmpres; float zero = 0; for (srun = 0; srun < nruns; srun++) { { //int* tmpidx = (int*)malloc(sizeof(int) * nIndex); if (print) printf("\nTest reduce_by_key:\n\n"); memset(tmpres, 0, sizeof(float) * nIndex); if (stress) memset(cpures, 0, sizeof(float) * nIndex); if (cpurun || stress) for (int i = index_0; i < index_1; i++) { int index; float tmp; transformFun(hostINPUT, i, &index, &tmp, 1); //index = indexFun(INPUT, i); cpures[index] = sumFun(cpures[index], tmp); //printf("i = %d, out_index = %d, out_val = (%.3f, %.3f) \n",i, index, tmp.real, tmp.imag); } if (print && cpurun) { printres(cpures, nIndex, "CPU results:"); } } if (!cpurun) { #if USE_MULTIREDUCE_FASTPATH callMultiReduce(NCOLUMNS, NROWS, tmpres, INPUT, sumFun, zero); #else callHistogramKernel<histogram_atomic_add, 1>(INPUT, transformFun, /*indexFun,*/ sumFun, index_0, index_1, zero, tmpres, nIndex); #endif } if (stress) { int k; for (k = 0; k < nIndex; k++) { if (tmpres[k] != cpures[k] /*|| tmpres[k].imag != cpures[k].imag*/) { printf("Error detected with index-values: i0 = %d, i1 = %d!\n", index_0, index_1); printres(cpures, nIndex, "CPU results:"); printres(tmpres, nIndex, "GPU results:"); } } } if (print && (!cpurun)) { printres(tmpres, nIndex, "GPU results:"); } int size = index_1 - index_0; index_0 += 1; index_1 -= 1; if (index_0 > index_1 + 1) { int tmp = index_0; index_0 = index_1; index_1 = tmp; } if (index_0 < 0 || index_1 < 0) { index_0 = 0; index_1 = size - 1; } } free(tmpres); if (stress) free(cpures); } #if ENABLE_THRUST // NOTE: Take advantage here of the fact that this is the classical histogram with all values = 1 // And also that we know before hand the number of indices coming out static void testHistogramParamThrust(int* INPUT, int index_0, int index_1, bool print) { test_sumfun2 mysumfun; thrust::equal_to<int> binary_pred; int nIndex = TESTMAXIDX; int N = index_1 - index_0; thrust::device_vector<int> keys_out(nIndex); thrust::device_vector<int> vals_out(nIndex); thrust::device_vector<int> h_vals_out(nIndex); //thrust::device_vector<int> keys(N); thrust::device_ptr<int> keys(INPUT); // Sort the data thrust::sort(keys, keys + N); // And reduce by key - histogram complete thrust::reduce_by_key(keys, keys + N, thrust::make_constant_iterator(1), keys_out.begin(), vals_out.begin(), binary_pred, mysumfun); h_vals_out = vals_out; if (print) { printf("\nThrust results:\n"); printf("vals = [ "); for (int i = 0; i < nIndex; i++) { int tmp = h_vals_out[i]; printf("(%d), ", tmp); } printf("]\n"); } } #endif void printUsage(void) { printf("\n"); printf("Test order independent reduce-by-key / histogram algorithm\n\n"); printf("By default this runs on custom algorithm on the GPU, with lots of equal consecutive keys\n\n"); printf("\tOptions:\n\n"); printf("\t\t--cpu\t\t Run on CPU serially instead of GPU\n"); printf("\t\t--print\t\t Print results of algorithm (check validity)\n"); printf("\t\t--thrust\t Run on GPU but using thrust library\n"); printf("\t\t--load\t Use 32-bit texture data s\n"); printf("\t\t--rnd\t Take uniform random keys s\n"); // printf("\t\t--sharp\t Make peaks sharp\n"); // printf("\t\t--nornd\t Remove random noise from input\n"); } static unsigned int* MyTexture_load(char* filename, int* dataSize) { FILE* file = fopen(filename, "rb"); //texture->dataRGBA8888 = NULL; if (!file) { char* tmp = (char*)malloc(strlen(filename) + 10); if (tmp) { char* ptr = tmp; strcpy(ptr, "../"); ptr += 3; strcpy(ptr, filename); file = fopen(tmp, "rb"); } free(tmp); } // Read if (file) { int npixels = 512 * 512;//texture->width * texture->height; int size = npixels * 4; unsigned int* data = (unsigned int*)malloc(size); *dataSize = npixels; if (data) { int i; for (i = 0; i < npixels; i++) { unsigned int r, g, b; unsigned int raw = 0; unsigned int pixel = 0; int rsize = fread(&raw, 3, 1, file); if (rsize != 1) { printf( "Warning: Unexpected EOF in texture %s at idx %d\n", filename, i); break; } r = (raw & 0x00FF0000) >> 16; g = (raw & 0x0000FF00) >> 8; b = (raw & 0x000000FF) >> 0; pixel = 0xFF000000 | (b << 16) | (g << 8) | (r << 0); data[i] = pixel; } } fclose(file); return data; } return NULL; } static inline float getInput(int i, unsigned int* texData, int dataSize, bool rnd) { if (texData) { static int index = i % dataSize; static int round = 0; unsigned int val = texData[index]; int result = 0; float fres; result += ((val >> 16 ) & 0xFF) + round; result += ((val >> 8 ) & 0xFF) + round; result += ((val >> 0 ) & 0xFF) + round; index++; if (index >= dataSize) { index = 0; round += 7; } fres = (float)result / (float)(1 << 24); return fres; } else { if (!rnd) { return 1.0f; } else { static unsigned int current = 0xf1232345; const unsigned int mult = 1664525; const unsigned int add = 1013904223ul; float fres; current = current * mult + add; fres = (float)current / (float)0xFFFFFFFFU; return fres; } } } static void fillInput(float* input, bool load, bool rnd) { int i; unsigned int* texData = NULL; int dataSize = 0; if (load && !rnd) { texData = MyTexture_load("texture.raw", &dataSize); } for (i = 0; i < TEST_SIZE;) { *input++ = getInput(i++, texData, dataSize, rnd); *input++ = getInput(i++, texData, dataSize, rnd); *input++ = getInput(i++, texData, dataSize, rnd); *input++ = getInput(i++, texData, dataSize, rnd); } if (texData) free(texData); } int main (int argc, char** argv) { int i; int index_0 = START_INDEX; int index_1 = index_0 + TEST_SIZE; bool cpu = false; bool print = false; bool thrust = false; bool stress = false; // bool peaks = false; // bool sharp = false; bool rnd = false; bool load = false; printUsage(); for (i = 0; i < argc; i++) { if (argv[i] && strcmp(argv[i], "--cpu") == 0) cpu = true; if (argv[i] && strcmp(argv[i], "--print") == 0) print = true; if (argv[i] && strcmp(argv[i], "--thrust") == 0) thrust = true; if (argv[i] && strcmp(argv[i], "--stress") == 0) stress = true; // if (argv[i] && strcmp(argv[i], "--peaks") == 0) // peaks = true; if (argv[i] && strcmp(argv[i], "--load") == 0) load = true; // if (argv[i] && strcmp(argv[i], "--sharp") == 0) // sharp = true; if (argv[i] && strcmp(argv[i], "--rnd") == 0) rnd = true; } { // Allocate keys: float* INPUT = NULL; float* hostINPUT = (float*)malloc(sizeof(float) * (TEST_SIZE + 3)); assert(hostINPUT); fillInput(hostINPUT, load, rnd); if (!cpu) { cudaMalloc(&INPUT, sizeof(float) * TEST_SIZE); assert(INPUT); cudaMemcpy(INPUT, hostINPUT, sizeof(float) * TEST_SIZE, cudaMemcpyHostToDevice); } // Create events for timing: cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Now start timer - we run on stream 0 (default stream): cudaEventRecord(start, 0); for (i = 0; i < NRUNS; i++) { if (thrust) { #if ENABLE_THRUST testHistogramParamThrust(INPUT, index_0, index_1, print); #else printf("\nTest was compiled without thrust support! Find 'ENABLE_THRUST' in source-code!\n\n Exiting...\n"); break; #endif } else { testHistogramParam(INPUT, hostINPUT, index_0, index_1, print, cpu, stress); } print = false; // Run only once all stress-tests if (stress) break; } { float t_ms; cudaEventRecord(stop, 0); cudaThreadSynchronize(); cudaEventElapsedTime(&t_ms, start, stop); double t = t_ms * 0.001f; double GKps = (((double)TEST_SIZE * (double)NRUNS)) / (t*1.e9); printf("Runtime in loops: %fs, Throughput (Gkeys/s): %3f GK/s \n", t, GKps); } if (INPUT) cudaFree(INPUT); if (hostINPUT) free(hostINPUT); cudaEventDestroy(start); cudaEventDestroy(stop); } return 0; }
25058ea25eb7caae81698af58d29a36b48af76b0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "matrix.h" /* *http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api */ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %d %s %s %d\n", code, hipGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void vanilla(matrix_t A, matrix_t B, matrix_t C) { TYPE c = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row > A.r || col > B.c) return; for (int i = 0; i < A.c; ++i) { c += A.data[ row * A.c + i] * B.data [ i * B.c + col ]; } C.data[ row * C.c + col ] = c; } __global__ void coal() { } extern "C" { void *mult_vanilla_cuda(arg_t *args) { int devices; gpuErrchk(hipGetDeviceCount( &devices )); printf("%d devices\n", devices); hipSetDevice(1); struct hipDeviceProp_t properties; gpuErrchk( hipGetDeviceProperties( &properties, 1 )); printf("%s\n", properties.name); matrix_t A = {NULL, args->A.r, args->A.c}; matrix_t B = {NULL, args->B.r, args->B.c}; matrix_t C = {NULL, args->C.r, args->C.c}; int sizeA = args->A.r * args->A.c * sizeof(TYPE); int sizeB = args->B.r * args->B.c * sizeof(TYPE); int sizeC = args->C.r * args->C.c * sizeof(TYPE); gpuErrchk( hipMalloc( &A.data, sizeA )); gpuErrchk( hipMemcpy( A.data, args->A.data, sizeA, hipMemcpyHostToDevice )); gpuErrchk( hipMalloc( &B.data, sizeB )); gpuErrchk( hipMemcpy( B.data, args->B.data, sizeB, hipMemcpyHostToDevice )); gpuErrchk( hipMalloc( &C.data, sizeC )); dim3 dimBlock(16,16); dim3 dimGrid(args->C.c, args->C.r); hipLaunchKernelGGL(( vanilla), dim3(dimGrid),dim3(dimBlock), 0, 0, A,B,C); gpuErrchk(hipDeviceSynchronize()); gpuErrchk(hipMemcpy(args->C.data, C.data, sizeC, hipMemcpyDeviceToHost )); hipFree(A.data); hipFree(B.data); hipFree(C.data); return NULL; } void *mult_coal_cuda(arg_t *args) { hipLaunchKernelGGL(( coal), dim3(1),dim3(1), 0, 0, ); return NULL; } }
25058ea25eb7caae81698af58d29a36b48af76b0.cu
#include <stdio.h> #include "matrix.h" /* *http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api */ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %d %s %s %d\n", code, cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void vanilla(matrix_t A, matrix_t B, matrix_t C) { TYPE c = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row > A.r || col > B.c) return; for (int i = 0; i < A.c; ++i) { c += A.data[ row * A.c + i] * B.data [ i * B.c + col ]; } C.data[ row * C.c + col ] = c; } __global__ void coal() { } extern "C" { void *mult_vanilla_cuda(arg_t *args) { int devices; gpuErrchk(cudaGetDeviceCount( &devices )); printf("%d devices\n", devices); cudaSetDevice(1); struct cudaDeviceProp properties; gpuErrchk( cudaGetDeviceProperties( &properties, 1 )); printf("%s\n", properties.name); matrix_t A = {NULL, args->A.r, args->A.c}; matrix_t B = {NULL, args->B.r, args->B.c}; matrix_t C = {NULL, args->C.r, args->C.c}; int sizeA = args->A.r * args->A.c * sizeof(TYPE); int sizeB = args->B.r * args->B.c * sizeof(TYPE); int sizeC = args->C.r * args->C.c * sizeof(TYPE); gpuErrchk( cudaMalloc( &A.data, sizeA )); gpuErrchk( cudaMemcpy( A.data, args->A.data, sizeA, cudaMemcpyHostToDevice )); gpuErrchk( cudaMalloc( &B.data, sizeB )); gpuErrchk( cudaMemcpy( B.data, args->B.data, sizeB, cudaMemcpyHostToDevice )); gpuErrchk( cudaMalloc( &C.data, sizeC )); dim3 dimBlock(16,16); dim3 dimGrid(args->C.c, args->C.r); vanilla<<<dimGrid,dimBlock>>>(A,B,C); gpuErrchk(cudaThreadSynchronize()); gpuErrchk(cudaMemcpy(args->C.data, C.data, sizeC, cudaMemcpyDeviceToHost )); cudaFree(A.data); cudaFree(B.data); cudaFree(C.data); return NULL; } void *mult_coal_cuda(arg_t *args) { coal<<<1,1>>>(); return NULL; } }
5919a123b10f8def1304b9878016ce8e2b536044.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2008-2013 NVIDIA Corporation * Modifications Copyright 2019 Advanced Micro Devices, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <unittest/unittest.h> #include <thrust/for_each.h> #include <thrust/device_ptr.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/retag.h> #include <thrust/device_malloc.h> #include <thrust/device_free.h> #include <algorithm> THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_BEGIN template <typename T> class mark_present_for_each { public: T * ptr; __host__ __device__ void operator()(T x){ ptr[(int) x] = 1; } }; template <class Vector> void TestForEachSimple(void) { typedef typename Vector::value_type T; Vector input(5); Vector output(7, (T) 0); input[0] = 3; input[1] = 2; input[2] = 3; input[3] = 4; input[4] = 6; mark_present_for_each<T> f; f.ptr = thrust::raw_pointer_cast(output.data()); typename Vector::iterator result = thrust::for_each(input.begin(), input.end(), f); ASSERT_EQUAL(output[0], 0); ASSERT_EQUAL(output[1], 0); ASSERT_EQUAL(output[2], 1); ASSERT_EQUAL(output[3], 1); ASSERT_EQUAL(output[4], 1); ASSERT_EQUAL(output[5], 0); ASSERT_EQUAL(output[6], 1); ASSERT_EQUAL_QUIET(result, input.end()); } DECLARE_INTEGRAL_VECTOR_UNITTEST(TestForEachSimple); template<typename InputIterator, typename Function> __host__ __device__ InputIterator for_each(my_system &system, InputIterator first, InputIterator, Function) { system.validate_dispatch(); return first; } void TestForEachDispatchExplicit() { thrust::device_vector<int> vec(1); my_system sys(0); thrust::for_each(sys, vec.begin(), vec.end(), 0); ASSERT_EQUAL(true, sys.is_valid()); } DECLARE_UNITTEST(TestForEachDispatchExplicit); template<typename InputIterator, typename Function> __host__ __device__ InputIterator for_each(my_tag, InputIterator first, InputIterator, Function) { *first = 13; return first; } void TestForEachDispatchImplicit() { thrust::device_vector<int> vec(1); thrust::for_each(thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.end()), 0); ASSERT_EQUAL(13, vec.front()); } DECLARE_UNITTEST(TestForEachDispatchImplicit); template <class Vector> void TestForEachNSimple(void) { typedef typename Vector::value_type T; Vector input(5); Vector output(7, (T) 0); input[0] = 3; input[1] = 2; input[2] = 3; input[3] = 4; input[4] = 6; mark_present_for_each<T> f; f.ptr = thrust::raw_pointer_cast(output.data()); typename Vector::iterator result = thrust::for_each_n(input.begin(), input.size(), f); ASSERT_EQUAL(output[0], 0); ASSERT_EQUAL(output[1], 0); ASSERT_EQUAL(output[2], 1); ASSERT_EQUAL(output[3], 1); ASSERT_EQUAL(output[4], 1); ASSERT_EQUAL(output[5], 0); ASSERT_EQUAL(output[6], 1); ASSERT_EQUAL_QUIET(result, input.end()); } DECLARE_INTEGRAL_VECTOR_UNITTEST(TestForEachNSimple); template<typename InputIterator, typename Size, typename Function> __host__ __device__ InputIterator for_each_n(my_system &system, InputIterator first, Size, Function) { system.validate_dispatch(); return first; } void TestForEachNDispatchExplicit() { thrust::device_vector<int> vec(1); my_system sys(0); thrust::for_each_n(sys, vec.begin(), vec.size(), 0); ASSERT_EQUAL(true, sys.is_valid()); } DECLARE_UNITTEST(TestForEachNDispatchExplicit); template<typename InputIterator, typename Size, typename Function> __host__ __device__ InputIterator for_each_n(my_tag, InputIterator first, Size, Function) { *first = 13; return first; } void TestForEachNDispatchImplicit() { thrust::device_vector<int> vec(1); thrust::for_each_n(thrust::retag<my_tag>(vec.begin()), vec.size(), 0); ASSERT_EQUAL(13, vec.front()); } DECLARE_UNITTEST(TestForEachNDispatchImplicit); void TestForEachSimpleAnySystem(void) { thrust::device_vector<int> output(7, 0); mark_present_for_each<int> f; f.ptr = thrust::raw_pointer_cast(output.data()); thrust::counting_iterator<int> result = thrust::for_each(thrust::make_counting_iterator(0), thrust::make_counting_iterator(5), f); ASSERT_EQUAL(output[0], 1); ASSERT_EQUAL(output[1], 1); ASSERT_EQUAL(output[2], 1); ASSERT_EQUAL(output[3], 1); ASSERT_EQUAL(output[4], 1); ASSERT_EQUAL(output[5], 0); ASSERT_EQUAL(output[6], 0); ASSERT_EQUAL_QUIET(result, thrust::make_counting_iterator(5)); } DECLARE_UNITTEST(TestForEachSimpleAnySystem); void TestForEachNSimpleAnySystem(void) { thrust::device_vector<int> output(7, 0); mark_present_for_each<int> f; f.ptr = thrust::raw_pointer_cast(output.data()); thrust::counting_iterator<int> result = thrust::for_each_n(thrust::make_counting_iterator(0), 5, f); ASSERT_EQUAL(output[0], 1); ASSERT_EQUAL(output[1], 1); ASSERT_EQUAL(output[2], 1); ASSERT_EQUAL(output[3], 1); ASSERT_EQUAL(output[4], 1); ASSERT_EQUAL(output[5], 0); ASSERT_EQUAL(output[6], 0); ASSERT_EQUAL_QUIET(result, thrust::make_counting_iterator(5)); } DECLARE_UNITTEST(TestForEachNSimpleAnySystem); template <typename T> void TestForEach(const size_t n) { const size_t output_size = ::min((size_t) 10, 2 * n); thrust::host_vector<T> h_input = unittest::random_integers<T>(n); for(size_t i = 0; i < n; i++) h_input[i] = ((size_t) h_input[i]) % output_size; thrust::device_vector<T> d_input = h_input; thrust::host_vector<T> h_output(output_size, (T) 0); thrust::device_vector<T> d_output(output_size, (T) 0); mark_present_for_each<T> h_f; mark_present_for_each<T> d_f; h_f.ptr = &h_output[0]; d_f.ptr = (&d_output[0]).get(); typename thrust::host_vector<T>::iterator h_result = thrust::for_each(h_input.begin(), h_input.end(), h_f); typename thrust::device_vector<T>::iterator d_result = thrust::for_each(d_input.begin(), d_input.end(), d_f); ASSERT_EQUAL(h_output, d_output); ASSERT_EQUAL_QUIET(h_result, h_input.end()); ASSERT_EQUAL_QUIET(d_result, d_input.end()); } DECLARE_VARIABLE_UNITTEST(TestForEach); template <typename T> void TestForEachN(const size_t n) { const size_t output_size = ::min((size_t) 10, 2 * n); thrust::host_vector<T> h_input = unittest::random_integers<T>(n); for(size_t i = 0; i < n; i++) h_input[i] = ((size_t) h_input[i]) % output_size; thrust::device_vector<T> d_input = h_input; thrust::host_vector<T> h_output(output_size, (T) 0); thrust::device_vector<T> d_output(output_size, (T) 0); mark_present_for_each<T> h_f; mark_present_for_each<T> d_f; h_f.ptr = &h_output[0]; d_f.ptr = (&d_output[0]).get(); typename thrust::host_vector<T>::iterator h_result = thrust::for_each_n(h_input.begin(), h_input.size(), h_f); typename thrust::device_vector<T>::iterator d_result = thrust::for_each_n(d_input.begin(), d_input.size(), d_f); ASSERT_EQUAL(h_output, d_output); ASSERT_EQUAL_QUIET(h_result, h_input.end()); ASSERT_EQUAL_QUIET(d_result, d_input.end()); } DECLARE_VARIABLE_UNITTEST(TestForEachN); template <typename T, unsigned int N> struct SetFixedVectorToConstant { FixedVector<T,N> exemplar; SetFixedVectorToConstant(T scalar) : exemplar(scalar) {} __host__ __device__ void operator()(FixedVector<T,N>& t) { t = exemplar; } }; template <typename T, unsigned int N> void _TestForEachWithLargeTypes(void) { size_t n = (64 * 1024) / sizeof(FixedVector<T,N>); thrust::host_vector< FixedVector<T,N> > h_data(n); for(size_t i = 0; i < h_data.size(); i++) h_data[i] = FixedVector<T,N>(i); thrust::device_vector< FixedVector<T,N> > d_data = h_data; SetFixedVectorToConstant<T,N> func(123); thrust::for_each(h_data.begin(), h_data.end(), func); thrust::for_each(d_data.begin(), d_data.end(), func); ASSERT_EQUAL_QUIET(h_data, d_data); } void TestForEachWithLargeTypes(void) { _TestForEachWithLargeTypes<int, 1>(); _TestForEachWithLargeTypes<int, 2>(); _TestForEachWithLargeTypes<int, 4>(); _TestForEachWithLargeTypes<int, 8>(); _TestForEachWithLargeTypes<int, 16>(); _TestForEachWithLargeTypes<int, 32>(); // fails on Linux 32 w/ gcc 4.1 _TestForEachWithLargeTypes<int, 64>(); _TestForEachWithLargeTypes<int, 128>(); _TestForEachWithLargeTypes<int, 256>(); _TestForEachWithLargeTypes<int, 512>(); _TestForEachWithLargeTypes<int, 1024>(); // fails on Vista 64 w/ VS2008 } DECLARE_UNITTEST(TestForEachWithLargeTypes); template <typename T, unsigned int N> void _TestForEachNWithLargeTypes(void) { size_t n = (64 * 1024) / sizeof(FixedVector<T,N>); thrust::host_vector< FixedVector<T,N> > h_data(n); for(size_t i = 0; i < h_data.size(); i++) h_data[i] = FixedVector<T,N>(i); thrust::device_vector< FixedVector<T,N> > d_data = h_data; SetFixedVectorToConstant<T,N> func(123); thrust::for_each_n(h_data.begin(), h_data.size(), func); thrust::for_each_n(d_data.begin(), d_data.size(), func); ASSERT_EQUAL_QUIET(h_data, d_data); } void TestForEachNWithLargeTypes(void) { _TestForEachNWithLargeTypes<int, 1>(); _TestForEachNWithLargeTypes<int, 2>(); _TestForEachNWithLargeTypes<int, 4>(); _TestForEachNWithLargeTypes<int, 8>(); _TestForEachNWithLargeTypes<int, 16>(); _TestForEachNWithLargeTypes<int, 32>(); // fails on Linux 32 w/ gcc 4.1 _TestForEachNWithLargeTypes<int, 64>(); _TestForEachNWithLargeTypes<int, 128>(); _TestForEachNWithLargeTypes<int, 256>(); _TestForEachNWithLargeTypes<int, 512>(); _TestForEachNWithLargeTypes<int, 1024>(); // fails on Vista 64 w/ VS2008 } DECLARE_UNITTEST(TestForEachNWithLargeTypes); THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_END struct only_set_when_expected { unsigned long long expected; bool * flag; __device__ void operator()(unsigned long long x) { if (x == expected) { *flag = true; } } }; void TestForEachWithBigIndexesHelper(int magnitude) { thrust::counting_iterator<unsigned long long> begin(0); thrust::counting_iterator<unsigned long long> end = begin + (1ull << magnitude); ASSERT_EQUAL(thrust::distance(begin, end), 1ll << magnitude); thrust::device_ptr<bool> has_executed = thrust::device_malloc<bool>(1); *has_executed = false; only_set_when_expected fn = { (1ull << magnitude) - 1, thrust::raw_pointer_cast(has_executed) }; thrust::for_each(thrust::device, begin, end, fn); bool has_executed_h = *has_executed; thrust::device_free(has_executed); ASSERT_EQUAL(has_executed_h, true); } void TestForEachWithBigIndexes() { TestForEachWithBigIndexesHelper(30); TestForEachWithBigIndexesHelper(31); TestForEachWithBigIndexesHelper(32); TestForEachWithBigIndexesHelper(33); } DECLARE_UNITTEST(TestForEachWithBigIndexes);
5919a123b10f8def1304b9878016ce8e2b536044.cu
/* * Copyright 2008-2013 NVIDIA Corporation * Modifications Copyright© 2019 Advanced Micro Devices, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <unittest/unittest.h> #include <thrust/for_each.h> #include <thrust/device_ptr.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/retag.h> #include <thrust/device_malloc.h> #include <thrust/device_free.h> #include <algorithm> THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_BEGIN template <typename T> class mark_present_for_each { public: T * ptr; __host__ __device__ void operator()(T x){ ptr[(int) x] = 1; } }; template <class Vector> void TestForEachSimple(void) { typedef typename Vector::value_type T; Vector input(5); Vector output(7, (T) 0); input[0] = 3; input[1] = 2; input[2] = 3; input[3] = 4; input[4] = 6; mark_present_for_each<T> f; f.ptr = thrust::raw_pointer_cast(output.data()); typename Vector::iterator result = thrust::for_each(input.begin(), input.end(), f); ASSERT_EQUAL(output[0], 0); ASSERT_EQUAL(output[1], 0); ASSERT_EQUAL(output[2], 1); ASSERT_EQUAL(output[3], 1); ASSERT_EQUAL(output[4], 1); ASSERT_EQUAL(output[5], 0); ASSERT_EQUAL(output[6], 1); ASSERT_EQUAL_QUIET(result, input.end()); } DECLARE_INTEGRAL_VECTOR_UNITTEST(TestForEachSimple); template<typename InputIterator, typename Function> __host__ __device__ InputIterator for_each(my_system &system, InputIterator first, InputIterator, Function) { system.validate_dispatch(); return first; } void TestForEachDispatchExplicit() { thrust::device_vector<int> vec(1); my_system sys(0); thrust::for_each(sys, vec.begin(), vec.end(), 0); ASSERT_EQUAL(true, sys.is_valid()); } DECLARE_UNITTEST(TestForEachDispatchExplicit); template<typename InputIterator, typename Function> __host__ __device__ InputIterator for_each(my_tag, InputIterator first, InputIterator, Function) { *first = 13; return first; } void TestForEachDispatchImplicit() { thrust::device_vector<int> vec(1); thrust::for_each(thrust::retag<my_tag>(vec.begin()), thrust::retag<my_tag>(vec.end()), 0); ASSERT_EQUAL(13, vec.front()); } DECLARE_UNITTEST(TestForEachDispatchImplicit); template <class Vector> void TestForEachNSimple(void) { typedef typename Vector::value_type T; Vector input(5); Vector output(7, (T) 0); input[0] = 3; input[1] = 2; input[2] = 3; input[3] = 4; input[4] = 6; mark_present_for_each<T> f; f.ptr = thrust::raw_pointer_cast(output.data()); typename Vector::iterator result = thrust::for_each_n(input.begin(), input.size(), f); ASSERT_EQUAL(output[0], 0); ASSERT_EQUAL(output[1], 0); ASSERT_EQUAL(output[2], 1); ASSERT_EQUAL(output[3], 1); ASSERT_EQUAL(output[4], 1); ASSERT_EQUAL(output[5], 0); ASSERT_EQUAL(output[6], 1); ASSERT_EQUAL_QUIET(result, input.end()); } DECLARE_INTEGRAL_VECTOR_UNITTEST(TestForEachNSimple); template<typename InputIterator, typename Size, typename Function> __host__ __device__ InputIterator for_each_n(my_system &system, InputIterator first, Size, Function) { system.validate_dispatch(); return first; } void TestForEachNDispatchExplicit() { thrust::device_vector<int> vec(1); my_system sys(0); thrust::for_each_n(sys, vec.begin(), vec.size(), 0); ASSERT_EQUAL(true, sys.is_valid()); } DECLARE_UNITTEST(TestForEachNDispatchExplicit); template<typename InputIterator, typename Size, typename Function> __host__ __device__ InputIterator for_each_n(my_tag, InputIterator first, Size, Function) { *first = 13; return first; } void TestForEachNDispatchImplicit() { thrust::device_vector<int> vec(1); thrust::for_each_n(thrust::retag<my_tag>(vec.begin()), vec.size(), 0); ASSERT_EQUAL(13, vec.front()); } DECLARE_UNITTEST(TestForEachNDispatchImplicit); void TestForEachSimpleAnySystem(void) { thrust::device_vector<int> output(7, 0); mark_present_for_each<int> f; f.ptr = thrust::raw_pointer_cast(output.data()); thrust::counting_iterator<int> result = thrust::for_each(thrust::make_counting_iterator(0), thrust::make_counting_iterator(5), f); ASSERT_EQUAL(output[0], 1); ASSERT_EQUAL(output[1], 1); ASSERT_EQUAL(output[2], 1); ASSERT_EQUAL(output[3], 1); ASSERT_EQUAL(output[4], 1); ASSERT_EQUAL(output[5], 0); ASSERT_EQUAL(output[6], 0); ASSERT_EQUAL_QUIET(result, thrust::make_counting_iterator(5)); } DECLARE_UNITTEST(TestForEachSimpleAnySystem); void TestForEachNSimpleAnySystem(void) { thrust::device_vector<int> output(7, 0); mark_present_for_each<int> f; f.ptr = thrust::raw_pointer_cast(output.data()); thrust::counting_iterator<int> result = thrust::for_each_n(thrust::make_counting_iterator(0), 5, f); ASSERT_EQUAL(output[0], 1); ASSERT_EQUAL(output[1], 1); ASSERT_EQUAL(output[2], 1); ASSERT_EQUAL(output[3], 1); ASSERT_EQUAL(output[4], 1); ASSERT_EQUAL(output[5], 0); ASSERT_EQUAL(output[6], 0); ASSERT_EQUAL_QUIET(result, thrust::make_counting_iterator(5)); } DECLARE_UNITTEST(TestForEachNSimpleAnySystem); template <typename T> void TestForEach(const size_t n) { const size_t output_size = std::min((size_t) 10, 2 * n); thrust::host_vector<T> h_input = unittest::random_integers<T>(n); for(size_t i = 0; i < n; i++) h_input[i] = ((size_t) h_input[i]) % output_size; thrust::device_vector<T> d_input = h_input; thrust::host_vector<T> h_output(output_size, (T) 0); thrust::device_vector<T> d_output(output_size, (T) 0); mark_present_for_each<T> h_f; mark_present_for_each<T> d_f; h_f.ptr = &h_output[0]; d_f.ptr = (&d_output[0]).get(); typename thrust::host_vector<T>::iterator h_result = thrust::for_each(h_input.begin(), h_input.end(), h_f); typename thrust::device_vector<T>::iterator d_result = thrust::for_each(d_input.begin(), d_input.end(), d_f); ASSERT_EQUAL(h_output, d_output); ASSERT_EQUAL_QUIET(h_result, h_input.end()); ASSERT_EQUAL_QUIET(d_result, d_input.end()); } DECLARE_VARIABLE_UNITTEST(TestForEach); template <typename T> void TestForEachN(const size_t n) { const size_t output_size = std::min((size_t) 10, 2 * n); thrust::host_vector<T> h_input = unittest::random_integers<T>(n); for(size_t i = 0; i < n; i++) h_input[i] = ((size_t) h_input[i]) % output_size; thrust::device_vector<T> d_input = h_input; thrust::host_vector<T> h_output(output_size, (T) 0); thrust::device_vector<T> d_output(output_size, (T) 0); mark_present_for_each<T> h_f; mark_present_for_each<T> d_f; h_f.ptr = &h_output[0]; d_f.ptr = (&d_output[0]).get(); typename thrust::host_vector<T>::iterator h_result = thrust::for_each_n(h_input.begin(), h_input.size(), h_f); typename thrust::device_vector<T>::iterator d_result = thrust::for_each_n(d_input.begin(), d_input.size(), d_f); ASSERT_EQUAL(h_output, d_output); ASSERT_EQUAL_QUIET(h_result, h_input.end()); ASSERT_EQUAL_QUIET(d_result, d_input.end()); } DECLARE_VARIABLE_UNITTEST(TestForEachN); template <typename T, unsigned int N> struct SetFixedVectorToConstant { FixedVector<T,N> exemplar; SetFixedVectorToConstant(T scalar) : exemplar(scalar) {} __host__ __device__ void operator()(FixedVector<T,N>& t) { t = exemplar; } }; template <typename T, unsigned int N> void _TestForEachWithLargeTypes(void) { size_t n = (64 * 1024) / sizeof(FixedVector<T,N>); thrust::host_vector< FixedVector<T,N> > h_data(n); for(size_t i = 0; i < h_data.size(); i++) h_data[i] = FixedVector<T,N>(i); thrust::device_vector< FixedVector<T,N> > d_data = h_data; SetFixedVectorToConstant<T,N> func(123); thrust::for_each(h_data.begin(), h_data.end(), func); thrust::for_each(d_data.begin(), d_data.end(), func); ASSERT_EQUAL_QUIET(h_data, d_data); } void TestForEachWithLargeTypes(void) { _TestForEachWithLargeTypes<int, 1>(); _TestForEachWithLargeTypes<int, 2>(); _TestForEachWithLargeTypes<int, 4>(); _TestForEachWithLargeTypes<int, 8>(); _TestForEachWithLargeTypes<int, 16>(); _TestForEachWithLargeTypes<int, 32>(); // fails on Linux 32 w/ gcc 4.1 _TestForEachWithLargeTypes<int, 64>(); _TestForEachWithLargeTypes<int, 128>(); _TestForEachWithLargeTypes<int, 256>(); _TestForEachWithLargeTypes<int, 512>(); _TestForEachWithLargeTypes<int, 1024>(); // fails on Vista 64 w/ VS2008 } DECLARE_UNITTEST(TestForEachWithLargeTypes); template <typename T, unsigned int N> void _TestForEachNWithLargeTypes(void) { size_t n = (64 * 1024) / sizeof(FixedVector<T,N>); thrust::host_vector< FixedVector<T,N> > h_data(n); for(size_t i = 0; i < h_data.size(); i++) h_data[i] = FixedVector<T,N>(i); thrust::device_vector< FixedVector<T,N> > d_data = h_data; SetFixedVectorToConstant<T,N> func(123); thrust::for_each_n(h_data.begin(), h_data.size(), func); thrust::for_each_n(d_data.begin(), d_data.size(), func); ASSERT_EQUAL_QUIET(h_data, d_data); } void TestForEachNWithLargeTypes(void) { _TestForEachNWithLargeTypes<int, 1>(); _TestForEachNWithLargeTypes<int, 2>(); _TestForEachNWithLargeTypes<int, 4>(); _TestForEachNWithLargeTypes<int, 8>(); _TestForEachNWithLargeTypes<int, 16>(); _TestForEachNWithLargeTypes<int, 32>(); // fails on Linux 32 w/ gcc 4.1 _TestForEachNWithLargeTypes<int, 64>(); _TestForEachNWithLargeTypes<int, 128>(); _TestForEachNWithLargeTypes<int, 256>(); _TestForEachNWithLargeTypes<int, 512>(); _TestForEachNWithLargeTypes<int, 1024>(); // fails on Vista 64 w/ VS2008 } DECLARE_UNITTEST(TestForEachNWithLargeTypes); THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_END struct only_set_when_expected { unsigned long long expected; bool * flag; __device__ void operator()(unsigned long long x) { if (x == expected) { *flag = true; } } }; void TestForEachWithBigIndexesHelper(int magnitude) { thrust::counting_iterator<unsigned long long> begin(0); thrust::counting_iterator<unsigned long long> end = begin + (1ull << magnitude); ASSERT_EQUAL(thrust::distance(begin, end), 1ll << magnitude); thrust::device_ptr<bool> has_executed = thrust::device_malloc<bool>(1); *has_executed = false; only_set_when_expected fn = { (1ull << magnitude) - 1, thrust::raw_pointer_cast(has_executed) }; thrust::for_each(thrust::device, begin, end, fn); bool has_executed_h = *has_executed; thrust::device_free(has_executed); ASSERT_EQUAL(has_executed_h, true); } void TestForEachWithBigIndexes() { TestForEachWithBigIndexesHelper(30); TestForEachWithBigIndexesHelper(31); TestForEachWithBigIndexesHelper(32); TestForEachWithBigIndexesHelper(33); } DECLARE_UNITTEST(TestForEachWithBigIndexes);
1416a0239fc065fba4b7e74d405fe2c3f695f473.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void reduce_ws(float *gdata, float *out){ __shared__ float sdata[32]; int tid = threadIdx.x; int idx = threadIdx.x+blockDim.x*blockIdx.x; float val = 0.0f; unsigned mask = 0xFFFFFFFFU; int lane = threadIdx.x % warpSize; int warpID = threadIdx.x / warpSize; while (idx < N) { // grid stride loop to load val += gdata[idx]; idx += gridDim.x*blockDim.x; } // 1st warp-shuffle reduction for (int offset = warpSize/2; offset > 0; offset >>= 1) val += __shfl_down_sync(mask, val, offset); if (lane == 0) sdata[warpID] = val; __syncthreads(); // put warp results in shared mem // hereafter, just warp 0 if (warpID == 0){ // reload val from shared mem if warp existed val = (tid < blockDim.x/warpSize)?sdata[lane]:0; // final warp-shuffle reduction for (int offset = warpSize/2; offset > 0; offset >>= 1) val += __shfl_down_sync(mask, val, offset); if (tid == 0) atomicAdd(out, val); } }
1416a0239fc065fba4b7e74d405fe2c3f695f473.cu
#include "includes.h" __global__ void reduce_ws(float *gdata, float *out){ __shared__ float sdata[32]; int tid = threadIdx.x; int idx = threadIdx.x+blockDim.x*blockIdx.x; float val = 0.0f; unsigned mask = 0xFFFFFFFFU; int lane = threadIdx.x % warpSize; int warpID = threadIdx.x / warpSize; while (idx < N) { // grid stride loop to load val += gdata[idx]; idx += gridDim.x*blockDim.x; } // 1st warp-shuffle reduction for (int offset = warpSize/2; offset > 0; offset >>= 1) val += __shfl_down_sync(mask, val, offset); if (lane == 0) sdata[warpID] = val; __syncthreads(); // put warp results in shared mem // hereafter, just warp 0 if (warpID == 0){ // reload val from shared mem if warp existed val = (tid < blockDim.x/warpSize)?sdata[lane]:0; // final warp-shuffle reduction for (int offset = warpSize/2; offset > 0; offset >>= 1) val += __shfl_down_sync(mask, val, offset); if (tid == 0) atomicAdd(out, val); } }
b44fce2bacb89b6ab88f3bfe1eacaf89dbd2b992.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> #define N 1024 //Note: N should always be in powers of 2 (like 2, 4, 8, 16, 32, ...) -Mohit Agrawal __global__ void FindSum(float input[]) { int tid = threadIdx.x; int step_size = 1; int number_of_threads = blockDim.x; while (number_of_threads > 0) { if (tid < number_of_threads) { int fst = tid * step_size * 2; int snd = fst + step_size; input[fst] += input[snd]; } step_size <<= 1; number_of_threads >>= 1; } } __global__ void FindDiff(float input[], float mean) { int tid = threadIdx.x; if (tid < N) { input[tid] = input[tid] - mean; } } int main() { //Initialization time_t t; srand((unsigned) time(&t)); float *h; h = (float*)malloc(N*sizeof(float)); for(int i=0; i<N; i++) { h[i] = ((float)rand() / (float)RAND_MAX) * N; } for(int i=0; i<N; i++) { printf("%f ", h[i]); } printf("\n"); //Finding sum float* d; hipMalloc(&d, N*sizeof(float)); hipMemcpy(d, h, N*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( FindSum) , dim3(1), dim3(N/2) , 0, 0, d); hipDeviceSynchronize(); float *result; result = (float*)malloc(sizeof(float)); hipMemcpy(result, d, sizeof(float), hipMemcpyDeviceToHost); printf("Sum is: %f \n", result[0]); //Mean calculation float avg = result[0]/N; printf("Avg is: %f \n", avg); //Subtracting mean from each element float *g; hipMalloc(&g, N*sizeof(float)); hipMemcpy(g, h, N*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( FindDiff) , dim3(1), dim3(N) , 0, 0, g, avg); hipDeviceSynchronize(); float *solution; solution = (float*)malloc(N*sizeof(float)); hipMemcpy(solution, g, N*sizeof(float), hipMemcpyDeviceToHost); printf("Difference: "); for(int i=0; i<N; i++) { printf("%f ", solution[i]); } printf("\n"); for(int i=0; i<N; i++) { solution[i] = fabsf(solution[i] * solution[i]); } printf("Squares: "); for(int i=0; i<N; i++) { printf("%f ", solution[i]); } printf("\n"); float *solute; hipMalloc(&solute, N*sizeof(float)); hipMemcpy(solute, solution, N*sizeof(float), hipMemcpyHostToDevice); //Adding the squares of differences hipLaunchKernelGGL(( FindSum) , dim3(1), dim3(N/2) , 0, 0, solute); hipDeviceSynchronize(); float *std_dev; std_dev = (float*)malloc(sizeof(float)); hipMemcpy(std_dev, solute, sizeof(float), hipMemcpyDeviceToHost); printf("Sum of Squares: "); printf("%f \n", std_dev[0]); //Taking arithmetic mean of the sqaures of differences float sol = std_dev[0]/N; float answer = sqrt(sol); printf("Standard Deviation is: %f \n", answer); hipFree(d); free(h); return 0; }
b44fce2bacb89b6ab88f3bfe1eacaf89dbd2b992.cu
#include <stdio.h> #include <math.h> #define N 1024 //Note: N should always be in powers of 2 (like 2, 4, 8, 16, 32, ...) -Mohit Agrawal __global__ void FindSum(float input[]) { int tid = threadIdx.x; int step_size = 1; int number_of_threads = blockDim.x; while (number_of_threads > 0) { if (tid < number_of_threads) { int fst = tid * step_size * 2; int snd = fst + step_size; input[fst] += input[snd]; } step_size <<= 1; number_of_threads >>= 1; } } __global__ void FindDiff(float input[], float mean) { int tid = threadIdx.x; if (tid < N) { input[tid] = input[tid] - mean; } } int main() { //Initialization time_t t; srand((unsigned) time(&t)); float *h; h = (float*)malloc(N*sizeof(float)); for(int i=0; i<N; i++) { h[i] = ((float)rand() / (float)RAND_MAX) * N; } for(int i=0; i<N; i++) { printf("%f ", h[i]); } printf("\n"); //Finding sum float* d; cudaMalloc(&d, N*sizeof(float)); cudaMemcpy(d, h, N*sizeof(float), cudaMemcpyHostToDevice); FindSum <<<1, N/2 >>>(d); cudaDeviceSynchronize(); float *result; result = (float*)malloc(sizeof(float)); cudaMemcpy(result, d, sizeof(float), cudaMemcpyDeviceToHost); printf("Sum is: %f \n", result[0]); //Mean calculation float avg = result[0]/N; printf("Avg is: %f \n", avg); //Subtracting mean from each element float *g; cudaMalloc(&g, N*sizeof(float)); cudaMemcpy(g, h, N*sizeof(float), cudaMemcpyHostToDevice); FindDiff <<<1, N >>>(g, avg); cudaDeviceSynchronize(); float *solution; solution = (float*)malloc(N*sizeof(float)); cudaMemcpy(solution, g, N*sizeof(float), cudaMemcpyDeviceToHost); printf("Difference: "); for(int i=0; i<N; i++) { printf("%f ", solution[i]); } printf("\n"); for(int i=0; i<N; i++) { solution[i] = fabsf(solution[i] * solution[i]); } printf("Squares: "); for(int i=0; i<N; i++) { printf("%f ", solution[i]); } printf("\n"); float *solute; cudaMalloc(&solute, N*sizeof(float)); cudaMemcpy(solute, solution, N*sizeof(float), cudaMemcpyHostToDevice); //Adding the squares of differences FindSum <<<1, N/2 >>>(solute); cudaDeviceSynchronize(); float *std_dev; std_dev = (float*)malloc(sizeof(float)); cudaMemcpy(std_dev, solute, sizeof(float), cudaMemcpyDeviceToHost); printf("Sum of Squares: "); printf("%f \n", std_dev[0]); //Taking arithmetic mean of the sqaures of differences float sol = std_dev[0]/N; float answer = sqrt(sol); printf("Standard Deviation is: %f \n", answer); cudaFree(d); free(h); return 0; }
b95bf770a25cb8f1109e3bd848cc02a605b5cd41.hip
// !!! This is a file automatically generated by hipify!!! /****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <time.h> #include "kernel.hip" #include "support.h" void cudaCheck(hipError_t cuda_ret) { if(cuda_ret != hipSuccess) { printf("found an error"); exit(-1); } } int main (int argc, char *argv[]) { Timer timer; hipError_t cuda_ret; time_t t; // Initialize host variables ---------------------------------------------- printf("\nSetting up the problem..."); fflush(stdout); startTime(&timer); float *A_h, *B_h, *C_h; float *A_d, *B_d, *C_d; size_t A_sz, B_sz, C_sz; unsigned matArow, matAcol; unsigned matBrow, matBcol; dim3 dim_grid, dim_block; if (argc == 1) { matArow = 1000; matAcol = matBrow = 1000; matBcol = 1000; } else if (argc == 2) { matArow = atoi(argv[1]); matAcol = matBrow = atoi(argv[1]); matBcol = atoi(argv[1]); } else if (argc == 4) { matArow = atoi(argv[1]); matAcol = matBrow = atoi(argv[2]); matBcol = atoi(argv[3]); } else { printf("\n Invalid input parameters!" "\n Usage: ./sgemm # All matrices are 1000 x 1000" "\n Usage: ./sgemm <m> # All matrices are m x m" "\n Usage: ./sgemm <m> <k> <n> # A: m x k, B: k x n, C: m x n" "\n"); exit(0); } A_sz = matArow*matAcol; B_sz = matBrow*matBcol; C_sz = matArow*matBcol; /* Intializes random number generator */ srand((unsigned) time(&t)); A_h = (float*) malloc( sizeof(float)*A_sz ); for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; } B_h = (float*) malloc( sizeof(float)*B_sz ); for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; } C_h = (float*) malloc( sizeof(float)*C_sz ); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n", matArow, matAcol, matBrow, matBcol, matArow, matBcol); // Allocate device variables ---------------------------------------------- printf("Allocating device variables..."); fflush(stdout); startTime(&timer); //INSERT CODE HERE cuda_ret = hipMalloc(&A_d, A_sz * sizeof(float)); cudaCheck(cuda_ret); cuda_ret = hipMalloc(&B_d, B_sz * sizeof(float)); cudaCheck(cuda_ret); cuda_ret = hipMalloc(&C_d, C_sz * sizeof(float)); cudaCheck(cuda_ret); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy host variables to device ------------------------------------------ printf("Copying data from host to device..."); fflush(stdout); startTime(&timer); //INSERT CODE HERE cuda_ret = hipMemcpy(A_d, A_h, A_sz*sizeof(float), hipMemcpyHostToDevice); cudaCheck(cuda_ret); cuda_ret = hipMemcpy(B_d, B_h, B_sz*sizeof(float), hipMemcpyHostToDevice); cudaCheck(cuda_ret); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Launch kernel using standard sgemm interface --------------------------- printf("Launching kernel..."); fflush(stdout); startTime(&timer); basicSgemm('N', 'N', matArow, matBcol, matBrow, 1.0f, \ A_d, matArow, B_d, matBrow, 0.0f, C_d, matBrow); cuda_ret = hipDeviceSynchronize(); if(cuda_ret != hipSuccess) FATAL("Unable to launch kernel"); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy device variables from host ---------------------------------------- printf("Copying data from device to host..."); fflush(stdout); startTime(&timer); //INSERT CODE HERE cuda_ret = hipMemcpy(C_h, C_d, C_sz*sizeof(float), hipMemcpyDeviceToHost); cudaCheck(cuda_ret); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Verify correctness ----------------------------------------------------- printf("Verifying results..."); fflush(stdout); verify(A_h, B_h, C_h, matArow, matAcol, matBcol); // Free memory ------------------------------------------------------------ free(A_h); free(B_h); free(C_h); //INSERT CODE HERE hipFree(A_d); hipFree(B_d); hipFree(C_h); return 0; }
b95bf770a25cb8f1109e3bd848cc02a605b5cd41.cu
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <time.h> #include "kernel.cu" #include "support.h" void cudaCheck(cudaError_t cuda_ret) { if(cuda_ret != cudaSuccess) { printf("found an error"); exit(-1); } } int main (int argc, char *argv[]) { Timer timer; cudaError_t cuda_ret; time_t t; // Initialize host variables ---------------------------------------------- printf("\nSetting up the problem..."); fflush(stdout); startTime(&timer); float *A_h, *B_h, *C_h; float *A_d, *B_d, *C_d; size_t A_sz, B_sz, C_sz; unsigned matArow, matAcol; unsigned matBrow, matBcol; dim3 dim_grid, dim_block; if (argc == 1) { matArow = 1000; matAcol = matBrow = 1000; matBcol = 1000; } else if (argc == 2) { matArow = atoi(argv[1]); matAcol = matBrow = atoi(argv[1]); matBcol = atoi(argv[1]); } else if (argc == 4) { matArow = atoi(argv[1]); matAcol = matBrow = atoi(argv[2]); matBcol = atoi(argv[3]); } else { printf("\n Invalid input parameters!" "\n Usage: ./sgemm # All matrices are 1000 x 1000" "\n Usage: ./sgemm <m> # All matrices are m x m" "\n Usage: ./sgemm <m> <k> <n> # A: m x k, B: k x n, C: m x n" "\n"); exit(0); } A_sz = matArow*matAcol; B_sz = matBrow*matBcol; C_sz = matArow*matBcol; /* Intializes random number generator */ srand((unsigned) time(&t)); A_h = (float*) malloc( sizeof(float)*A_sz ); for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; } B_h = (float*) malloc( sizeof(float)*B_sz ); for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; } C_h = (float*) malloc( sizeof(float)*C_sz ); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n", matArow, matAcol, matBrow, matBcol, matArow, matBcol); // Allocate device variables ---------------------------------------------- printf("Allocating device variables..."); fflush(stdout); startTime(&timer); //INSERT CODE HERE cuda_ret = cudaMalloc(&A_d, A_sz * sizeof(float)); cudaCheck(cuda_ret); cuda_ret = cudaMalloc(&B_d, B_sz * sizeof(float)); cudaCheck(cuda_ret); cuda_ret = cudaMalloc(&C_d, C_sz * sizeof(float)); cudaCheck(cuda_ret); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy host variables to device ------------------------------------------ printf("Copying data from host to device..."); fflush(stdout); startTime(&timer); //INSERT CODE HERE cuda_ret = cudaMemcpy(A_d, A_h, A_sz*sizeof(float), cudaMemcpyHostToDevice); cudaCheck(cuda_ret); cuda_ret = cudaMemcpy(B_d, B_h, B_sz*sizeof(float), cudaMemcpyHostToDevice); cudaCheck(cuda_ret); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Launch kernel using standard sgemm interface --------------------------- printf("Launching kernel..."); fflush(stdout); startTime(&timer); basicSgemm('N', 'N', matArow, matBcol, matBrow, 1.0f, \ A_d, matArow, B_d, matBrow, 0.0f, C_d, matBrow); cuda_ret = cudaDeviceSynchronize(); if(cuda_ret != cudaSuccess) FATAL("Unable to launch kernel"); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Copy device variables from host ---------------------------------------- printf("Copying data from device to host..."); fflush(stdout); startTime(&timer); //INSERT CODE HERE cuda_ret = cudaMemcpy(C_h, C_d, C_sz*sizeof(float), cudaMemcpyDeviceToHost); cudaCheck(cuda_ret); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // Verify correctness ----------------------------------------------------- printf("Verifying results..."); fflush(stdout); verify(A_h, B_h, C_h, matArow, matAcol, matBcol); // Free memory ------------------------------------------------------------ free(A_h); free(B_h); free(C_h); //INSERT CODE HERE cudaFree(A_d); cudaFree(B_d); cudaFree(C_h); return 0; }
bbaed0345fc8c354967d6c088dd27ba0624742a9.hip
// !!! This is a file automatically generated by hipify!!! #include <cassert> #include <cfloat> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <iostream> #include <stdio.h> #include <list> #include <map> #include <math.h> #include <stdlib.h> #include <vector> #include <set> #include <algorithm> #include <iterator> #include <fstream> #include "../include/common.h" #define K 1 using namespace std; //__constant__ int c_row[64516/4]; #define spmv_NBLOCKS 12*8*21 //22 #define spmv_BLOCK_SIZE 256 #define WARP_SIZE 32 texture<float,1,hipReadModeElementType> tex_val; texture<int,1,hipReadModeElementType> tex_col; texture<float,1,hipReadModeElementType> tex_vec; texture<int,1,hipReadModeElementType> tex_row; static const double MAX_RELATIVE_ERROR = .02; static const int PAD_FACTOR = 16; void fill(float *A, const int n, const float maxi) { for (int j = 0; j < n; j++) { A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f))); } } void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim) { int nnzAssigned = 0; // Figure out the probability that a nonzero should be assigned to a given // spot in the matrix double prob = (double)n / ((double)dim * (double)dim); // Seed random number generator srand48(2013); // Randomly decide whether entry i,j gets a value, but ensure n values // are assigned bool fillRemaining = false; for (int i = 0; i < dim; i++) { rowDelimiters[i] = nnzAssigned; for (int j = 0; j < dim; j++) { int numEntriesLeft = (dim * dim) - ((i * dim) + j); int needToAssign = n - nnzAssigned; if (numEntriesLeft <= needToAssign) { fillRemaining = true; } if ((nnzAssigned < n && drand48() <= prob) || fillRemaining) { // Assign (i,j) a value cols[nnzAssigned] = j; nnzAssigned++; } } } // Observe the convention to put the number of non zeroes at the end of the // row delimiters array rowDelimiters[dim] = n; assert(nnzAssigned == n); } void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters, float **newA_ptr, int **newcols_ptr, int *newIndices, int *newSize) { // determine total padded size and new row indices int paddedSize = 0; int rowSize; for (int i=0; i<dim; i++) { newIndices[i] = paddedSize; rowSize = rowDelimiters[i+1] - rowDelimiters[i]; if (rowSize % PAD_FACTOR != 0) { rowSize += PAD_FACTOR - rowSize % PAD_FACTOR; } paddedSize += rowSize; } *newSize = paddedSize; newIndices[dim] = paddedSize; hipHostMalloc(newA_ptr, paddedSize * sizeof(float)); hipHostMalloc(newcols_ptr, paddedSize * sizeof(int)); float *newA = *newA_ptr; int *newcols = *newcols_ptr; memset(newA, 0, paddedSize * sizeof(float)); // fill newA and newcols for (int i=0; i<dim; i++) { for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1]; j++, k++) { newA[k] = A[j]; newcols[k] = cols[j]; } } } void spmvCpu(const float *val, const int *cols, const int *rowDelimiters, const float *vec, int dim, float *out) { for (int i=0; i<dim; i++) { float t = 0; for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++) { int col = cols[j]; t += val[j] * vec[col];//tex1Dfetch(tex_vec,col); } out[i] = t; } } void spmv_verifyResults(const float *cpuResults, const float *gpuResults, const int size) { bool passed = true; for (int i = 0; i < size; i++) { if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i] > MAX_RELATIVE_ERROR) { cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] << " dev: " << gpuResults[i] << endl; return; } } cout << "spmv passed" << endl; } __global__ void spmv_kernel(const float* val, const int * cols, const int * rowDelimiters, const float * vec, const int dim, float * out) { // Thread ID in block int t = threadIdx.x; // Thread ID within warp int id = t & (WARP_SIZE-1); int warpsPerBlock = blockDim.x / WARP_SIZE; // One row per warp int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE); //__shared__ int rowDeli[spmv_BLOCK_SIZE/WARP_SIZE+1]; __shared__ volatile float partialSums[spmv_BLOCK_SIZE]; //if (threadIdx.x<spmv_BLOCK_SIZE/WARP_SIZE+1) //rowDeli[threadIdx.x]=rowDelimiters[myRow+threadIdx.x]; //__syncthreads(); if (myRow < dim) { int warpStart =tex1Dfetch(tex_row,myRow); int warpEnd = tex1Dfetch(tex_row,myRow+1); float mySum = 0; for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE) { int col = tex1Dfetch(tex_col,j); mySum += tex1Dfetch(tex_val,j) *tex1Dfetch(tex_vec,col); } partialSums[t] = mySum; // Reduce partial sums if (id < 16) partialSums[t] += partialSums[t+16]; if (id < 8) partialSums[t] += partialSums[t+ 8]; if (id < 4) partialSums[t] += partialSums[t+ 4]; if (id < 2) partialSums[t] += partialSums[t+ 2]; if (id < 1) partialSums[t] += partialSums[t+ 1]; // Write result if (id == 0) { out[myRow] = partialSums[t]; } } } int main(int argc, char **argv) { hipSetDevice(2); srand(2013); float *h_spmv_val, *h_spmv_valPad; int *h_spmv_cols, *h_spmv_colsPad; int *h_rowDelimiters, *h_rowDelimitersPad; float *h_spmv_vec, *h_spmv_out, *spmv_refOut; int spmv_nItems, nItemsPadded, spmv_numRows; spmv_numRows = spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE); spmv_nItems = spmv_numRows * spmv_numRows/10; // 1% of entries will be non-zero float maxval = 200.0; hipHostMalloc(&h_spmv_val, spmv_nItems * sizeof(float)); hipHostMalloc(&h_spmv_cols, spmv_nItems * sizeof(int)); hipHostMalloc(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_val, spmv_nItems, maxval); initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows); // Set up remaining host data int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR); hipHostMalloc(&h_spmv_vec, spmv_numRows * sizeof(float)) ; spmv_refOut = new float[spmv_numRows]; hipHostMalloc(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_vec, spmv_numRows, maxval); hipHostMalloc(&h_spmv_out, paddedSize * sizeof(float)); convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad, &h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded); // Compute reference solution spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut); float *d_spmv_val, *d_spmv_vec, *d_spmv_out; int *d_spmv_cols, *d_rowDelimiters; // Allocate device memory hipMalloc(&d_spmv_val, spmv_nItems * sizeof(float)); hipMalloc(&d_spmv_cols, spmv_nItems * sizeof(int)); hipMalloc(&d_spmv_vec, spmv_numRows * sizeof(float)); hipMalloc(&d_spmv_out, spmv_numRows * sizeof(float)); hipMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int)); // Transfer data to device hipMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), hipMemcpyHostToDevice); // hipMemcpyToSymbol(c_row,h_rowDelimiters,(spmv_numRows+1)*sizeof(int)); hipBindTexture(0,tex_vec,d_spmv_vec,spmv_numRows * sizeof(float)); hipBindTexture(0,tex_val,d_spmv_val,spmv_nItems * sizeof(float)); hipBindTexture(0,tex_row,d_rowDelimiters, (spmv_numRows+1) * sizeof(int)); hipBindTexture(0,tex_col,d_spmv_cols,spmv_nItems * sizeof(int)); hipEvent_t kernel_start, kernel_stop; hipEventCreate(&kernel_start); hipEventCreate(&kernel_stop); float kernel_time = 0.0f; hipEventRecord(kernel_start, 0); // Setup thread configuration int spmv_grid = (int) ceil(spmv_numRows / (float)(spmv_BLOCK_SIZE / WARP_SIZE)); for(int i=0;i<100;i++) hipLaunchKernelGGL(( spmv_kernel) , dim3(spmv_grid), dim3(spmv_BLOCK_SIZE), 0, 0, d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out); hipDeviceSynchronize(); hipEventRecord(kernel_stop, 0); hipEventSynchronize(kernel_stop); // get elapsed time kernel_time = 0.0f; hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time *= 1.e-3; // Convert to seconds cout << "kernel exe time: " << kernel_time << endl; hipMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), hipMemcpyDeviceToHost); spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows); return 0; }
bbaed0345fc8c354967d6c088dd27ba0624742a9.cu
#include <cassert> #include <cfloat> #include <cuda_runtime_api.h> #include <cuda.h> #include <iostream> #include <stdio.h> #include <list> #include <map> #include <math.h> #include <stdlib.h> #include <vector> #include <set> #include <algorithm> #include <iterator> #include <fstream> #include "../include/common.h" #define K 1 using namespace std; //__constant__ int c_row[64516/4]; #define spmv_NBLOCKS 12*8*21 //22 #define spmv_BLOCK_SIZE 256 #define WARP_SIZE 32 texture<float,1,cudaReadModeElementType> tex_val; texture<int,1,cudaReadModeElementType> tex_col; texture<float,1,cudaReadModeElementType> tex_vec; texture<int,1,cudaReadModeElementType> tex_row; static const double MAX_RELATIVE_ERROR = .02; static const int PAD_FACTOR = 16; void fill(float *A, const int n, const float maxi) { for (int j = 0; j < n; j++) { A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f))); } } void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim) { int nnzAssigned = 0; // Figure out the probability that a nonzero should be assigned to a given // spot in the matrix double prob = (double)n / ((double)dim * (double)dim); // Seed random number generator srand48(2013); // Randomly decide whether entry i,j gets a value, but ensure n values // are assigned bool fillRemaining = false; for (int i = 0; i < dim; i++) { rowDelimiters[i] = nnzAssigned; for (int j = 0; j < dim; j++) { int numEntriesLeft = (dim * dim) - ((i * dim) + j); int needToAssign = n - nnzAssigned; if (numEntriesLeft <= needToAssign) { fillRemaining = true; } if ((nnzAssigned < n && drand48() <= prob) || fillRemaining) { // Assign (i,j) a value cols[nnzAssigned] = j; nnzAssigned++; } } } // Observe the convention to put the number of non zeroes at the end of the // row delimiters array rowDelimiters[dim] = n; assert(nnzAssigned == n); } void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters, float **newA_ptr, int **newcols_ptr, int *newIndices, int *newSize) { // determine total padded size and new row indices int paddedSize = 0; int rowSize; for (int i=0; i<dim; i++) { newIndices[i] = paddedSize; rowSize = rowDelimiters[i+1] - rowDelimiters[i]; if (rowSize % PAD_FACTOR != 0) { rowSize += PAD_FACTOR - rowSize % PAD_FACTOR; } paddedSize += rowSize; } *newSize = paddedSize; newIndices[dim] = paddedSize; cudaMallocHost(newA_ptr, paddedSize * sizeof(float)); cudaMallocHost(newcols_ptr, paddedSize * sizeof(int)); float *newA = *newA_ptr; int *newcols = *newcols_ptr; memset(newA, 0, paddedSize * sizeof(float)); // fill newA and newcols for (int i=0; i<dim; i++) { for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1]; j++, k++) { newA[k] = A[j]; newcols[k] = cols[j]; } } } void spmvCpu(const float *val, const int *cols, const int *rowDelimiters, const float *vec, int dim, float *out) { for (int i=0; i<dim; i++) { float t = 0; for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++) { int col = cols[j]; t += val[j] * vec[col];//tex1Dfetch(tex_vec,col); } out[i] = t; } } void spmv_verifyResults(const float *cpuResults, const float *gpuResults, const int size) { bool passed = true; for (int i = 0; i < size; i++) { if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i] > MAX_RELATIVE_ERROR) { cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] << " dev: " << gpuResults[i] << endl; return; } } cout << "spmv passed" << endl; } __global__ void spmv_kernel(const float* val, const int * cols, const int * rowDelimiters, const float * vec, const int dim, float * out) { // Thread ID in block int t = threadIdx.x; // Thread ID within warp int id = t & (WARP_SIZE-1); int warpsPerBlock = blockDim.x / WARP_SIZE; // One row per warp int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE); //__shared__ int rowDeli[spmv_BLOCK_SIZE/WARP_SIZE+1]; __shared__ volatile float partialSums[spmv_BLOCK_SIZE]; //if (threadIdx.x<spmv_BLOCK_SIZE/WARP_SIZE+1) //rowDeli[threadIdx.x]=rowDelimiters[myRow+threadIdx.x]; //__syncthreads(); if (myRow < dim) { int warpStart =tex1Dfetch(tex_row,myRow); int warpEnd = tex1Dfetch(tex_row,myRow+1); float mySum = 0; for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE) { int col = tex1Dfetch(tex_col,j); mySum += tex1Dfetch(tex_val,j) *tex1Dfetch(tex_vec,col); } partialSums[t] = mySum; // Reduce partial sums if (id < 16) partialSums[t] += partialSums[t+16]; if (id < 8) partialSums[t] += partialSums[t+ 8]; if (id < 4) partialSums[t] += partialSums[t+ 4]; if (id < 2) partialSums[t] += partialSums[t+ 2]; if (id < 1) partialSums[t] += partialSums[t+ 1]; // Write result if (id == 0) { out[myRow] = partialSums[t]; } } } int main(int argc, char **argv) { cudaSetDevice(2); srand(2013); float *h_spmv_val, *h_spmv_valPad; int *h_spmv_cols, *h_spmv_colsPad; int *h_rowDelimiters, *h_rowDelimitersPad; float *h_spmv_vec, *h_spmv_out, *spmv_refOut; int spmv_nItems, nItemsPadded, spmv_numRows; spmv_numRows = spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE); spmv_nItems = spmv_numRows * spmv_numRows/10; // 1% of entries will be non-zero float maxval = 200.0; cudaMallocHost(&h_spmv_val, spmv_nItems * sizeof(float)); cudaMallocHost(&h_spmv_cols, spmv_nItems * sizeof(int)); cudaMallocHost(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_val, spmv_nItems, maxval); initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows); // Set up remaining host data int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR); cudaMallocHost(&h_spmv_vec, spmv_numRows * sizeof(float)) ; spmv_refOut = new float[spmv_numRows]; cudaMallocHost(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_vec, spmv_numRows, maxval); cudaMallocHost(&h_spmv_out, paddedSize * sizeof(float)); convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad, &h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded); // Compute reference solution spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut); float *d_spmv_val, *d_spmv_vec, *d_spmv_out; int *d_spmv_cols, *d_rowDelimiters; // Allocate device memory cudaMalloc(&d_spmv_val, spmv_nItems * sizeof(float)); cudaMalloc(&d_spmv_cols, spmv_nItems * sizeof(int)); cudaMalloc(&d_spmv_vec, spmv_numRows * sizeof(float)); cudaMalloc(&d_spmv_out, spmv_numRows * sizeof(float)); cudaMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int)); // Transfer data to device cudaMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), cudaMemcpyHostToDevice); // cudaMemcpyToSymbol(c_row,h_rowDelimiters,(spmv_numRows+1)*sizeof(int)); cudaBindTexture(0,tex_vec,d_spmv_vec,spmv_numRows * sizeof(float)); cudaBindTexture(0,tex_val,d_spmv_val,spmv_nItems * sizeof(float)); cudaBindTexture(0,tex_row,d_rowDelimiters, (spmv_numRows+1) * sizeof(int)); cudaBindTexture(0,tex_col,d_spmv_cols,spmv_nItems * sizeof(int)); cudaEvent_t kernel_start, kernel_stop; cudaEventCreate(&kernel_start); cudaEventCreate(&kernel_stop); float kernel_time = 0.0f; cudaEventRecord(kernel_start, 0); // Setup thread configuration int spmv_grid = (int) ceil(spmv_numRows / (float)(spmv_BLOCK_SIZE / WARP_SIZE)); for(int i=0;i<100;i++) spmv_kernel <<<spmv_grid, spmv_BLOCK_SIZE>>> (d_spmv_val, d_spmv_cols, d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out); cudaDeviceSynchronize(); cudaEventRecord(kernel_stop, 0); cudaEventSynchronize(kernel_stop); // get elapsed time kernel_time = 0.0f; cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time *= 1.e-3; // Convert to seconds cout << "kernel exe time: " << kernel_time << endl; cudaMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), cudaMemcpyDeviceToHost); spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows); return 0; }
845a0724fe0092e42fd04cfbfbd4f28771212d9b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THH/generic/THHTensorIndex.hip" #else #include <ATen/hip/HIPContext.h> // Check tensor dimensions for index operations, and return the slice size. // src can be nullptr in case of indexFill: in that case it is ignored. static ptrdiff_t THCTensor_(getSliceSize)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *index, THCTensor *src) { int dstDims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); int srcDims = (src == nullptr) ? dstDims : THCTensor_(nDimensionLegacyNoScalars)(state, src); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == 1, 4, "expecting vector of indices"); THArgCheck(dim >= 0 && dim < dstDims, 2, "Indexing dim is out of bounds"); ptrdiff_t dstSliceSize = 1; for (int d = 0; d < dstDims; d++) { if (d != dim) { dstSliceSize *= THTensor_sizeLegacyNoScalars(dst, d); } } if (src == nullptr) return dstSliceSize; THArgCheck(dim < srcDims, 3, "Indexing dim is out of bounds"); THArgCheck(THCudaLongTensor_nElement(state, index) == THTensor_sizeLegacyNoScalars(src, dim), 4, "length of src.size[dim] is not equal to length of indices"); ptrdiff_t srcSliceSize = 1; bool mismatch = false; if (dstDims != srcDims) mismatch = true; for (int d = 0; d < srcDims; d++) { if (d != dim) { srcSliceSize *= THTensor_sizeLegacyNoScalars(src, d); if (!mismatch && THTensor_sizeLegacyNoScalars(dst, d) != THTensor_sizeLegacyNoScalars(src, d)) mismatch = true; } } THArgCheck(dstSliceSize == srcSliceSize, 2, "Source/destination tensor have different slice sizes (%ld vs %ld)", dstSliceSize, srcSliceSize); if (mismatch) { static bool warningShown = false; if (!warningShown) { warningShown = true; fprintf(stderr, "Warning: source/destination slices have same size but different " "shape for an index operation. This behavior is deprecated.\n"); } } return dstSliceSize; } // Compare the stride between adjacent slices (sliceStride) with strides in the // other dimensions (i.e., strides *inside* each slice). // // - Returns true if some dimension inside the slice has lower stride than // sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim // == 0 (that is, each slice is a row). // // In this case, we choose the CUDA kernel that processes the data in // "index-major order". For example, if thread count equals slice size, then // all threads process slice #0 in lockstep, and then slice #1, and so on. // // - Otherwise (i.e., sliceStride has the lowest value), this function returns // false. The simplest example is a 2-D contiguous tensor with sliceDim == 1 // (each slice is a column). // // In this case, we choose the CUDA kernel that processes the data in // "elementInSlice-major order". For example, each thread can process element // #0 of every slice, and then element #1 of every slice, and so on. bool THCTensor_(indexShouldBeMajor)(TensorInfo<scalar_t, unsigned int> &info, int sliceDim) { // The stride between adjacent slices (e.g., between element #0 of slice #100 // and element #0 of slice #101). unsigned int sliceStride = info.strides[sliceDim]; for (int i = 0; i < info.dims; ++i) { if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) { return true; } } return false; } void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices)); dim = at::maybe_wrap_dim(dim, dst); int dims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCTensor_(nDimensionLegacyNoScalars)(state, src); THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src); ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src); int64_t dstCopyDimSize = THCTensor_(sizeLegacyNoScalars)(state, dst, dim); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); if (sliceSize == 0) { return; } hipStream_t stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(); int indContig = THCudaLongTensor_isContiguous(state, indices); int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ hipLaunchKernelGGL(( indexCopySmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \ , dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \ dstInfo, srcInfo, indicesInfo, \ dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize); #define LARGE_INDEX(TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \ hipLaunchKernelGGL(( indexCopyLargeIndex<TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \ , dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \ dstInfo, srcInfo, indicesInfo, \ dstCopyDim, srcCopyDim, srcTotalSize, \ (IDX_IS_MAJOR) ? sliceSize : numIndices, \ dstCopyDimSize); dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(::min(srcTotalSize, (ptrdiff_t)128)); if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, indices)) { TensorInfo<scalar_t, unsigned int> dstInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, dst); int dstCopyDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstCopyDim); TensorInfo<scalar_t, unsigned int> srcInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src); int srcCopyDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcCopyDim); TensorInfo<int64_t, unsigned int> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 1, 1, -2); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 2, 2, -2); } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstCopyDim); if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, unsigned int, 1, 1, -2, true); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, false); } } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, unsigned int, -1, -1, -1, true); } } } else { TensorInfo<scalar_t, uint64_t> dstInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, dst); int dstCopyDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstCopyDim); TensorInfo<scalar_t, uint64_t> srcInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src); int srcCopyDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcCopyDim); TensorInfo<int64_t, uint64_t> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, uint64_t, -1, -1, -1, true); } #undef SMALL_INDEX #undef LARGE_INDEX } static void THCTensor_(sort_indices)(THCState *state, THCudaLongTensor *index, THCTensor *src) { THCThrustAllocator thrustAlloc(state); auto index_iter = thrust::device_ptr<int64_t>(THCudaLongTensor_data(state, index)); auto src_iter = thrust::device_ptr<scalar_t>(THCTensor_(data)(state, src)); auto numel = THCTensor_(numel)(state, src); thrust::sort_by_key( thrust::hip::par(thrustAlloc).on(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA()), index_iter, index_iter + numel, src_iter, ThrustLTOp<int64_t>()); } void THCTensor_(put)(THCState *state, THCTensor *dst, THCudaLongTensor *index, THCTensor *src, int accumulate) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); ptrdiff_t dstSize = THCTensor_(nElement)(state, dst); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, index); THArgCheck(THCTensor_(nElement)(state, src) == numIndices, 3, "src should have the same number of elements as index"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); if (numIndices == 0) { return; } if (accumulate) { // wrap indices so to replace negative indices THCudaLongTensor* sorted_index = THCudaLongTensor_new(state); THCudaLongTensor_resizeAs(state, sorted_index, index); THC_pointwiseApply2<int64_t, int64_t>(state, sorted_index, index, WrapIndexOp(dstSize)); THCTensor* sorted_src = THCTensor_(newClone)(state, src); THCTensor_(sort_indices)(state, sorted_index, sorted_src); dispatchTakePut<scalar_t, TensorPutAccumulateOp>(state, dst, sorted_src, sorted_index); THCTensor_(free)(state, sorted_src); THCudaLongTensor_free(state, sorted_index); } else { dispatchTakePut<scalar_t, TensorPutOp>(state, dst, src, index); } } void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, scalar_t val) { at::NoNamesGuard guard; THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices)); dim = at::maybe_wrap_dim(dim, dst); int dims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, nullptr); ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst); int64_t dstFillDimSize = THCTensor_(sizeLegacyNoScalars)(state, dst, dim); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); if (sliceSize == 0) { return; } hipStream_t stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(); int indContig = THCudaLongTensor_isContiguous(state, indices); int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \ hipLaunchKernelGGL(( indexFillSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM>) \ , dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \ dstInfo, indicesInfo, \ dstFillDim, sliceSize, dstFillDimSize, val); #define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR) \ hipLaunchKernelGGL(( indexFillLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR>) \ , dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \ dstInfo, indicesInfo, \ dstFillDim, sliceSize * numIndices, \ (IDX_IS_MAJOR) ? sliceSize : numIndices, \ dstFillDimSize, val); dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(::min(dstTotalSize, (ptrdiff_t)128)); if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, indices)) { TensorInfo<scalar_t, unsigned int> dstInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, dst); int dstFillDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstFillDim); TensorInfo<int64_t, unsigned int> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 1, -2); } else if (dstInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 2, -2); } else if (dstInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 3, -2); } else { SMALL_INDEX(scalar_t, unsigned int, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstFillDim); if (dstInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, unsigned int, 1, -2, true); } else if (dstInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 2, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 2, -2, false); } } else if (dstInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 3, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 3, -2, false); } } else { LARGE_INDEX(scalar_t, unsigned int, -1, -1, true); } } } else { TensorInfo<scalar_t, uint64_t> dstInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, dst); int dstFillDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstFillDim); TensorInfo<int64_t, uint64_t> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, uint64_t, -1, -1, true); } #undef SMALL_INDEX #undef LARGE_INDEX } #endif
845a0724fe0092e42fd04cfbfbd4f28771212d9b.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THC/generic/THCTensorIndex.cu" #else #include <ATen/cuda/CUDAContext.h> // Check tensor dimensions for index operations, and return the slice size. // src can be nullptr in case of indexFill: in that case it is ignored. static ptrdiff_t THCTensor_(getSliceSize)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *index, THCTensor *src) { int dstDims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); int srcDims = (src == nullptr) ? dstDims : THCTensor_(nDimensionLegacyNoScalars)(state, src); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == 1, 4, "expecting vector of indices"); THArgCheck(dim >= 0 && dim < dstDims, 2, "Indexing dim is out of bounds"); ptrdiff_t dstSliceSize = 1; for (int d = 0; d < dstDims; d++) { if (d != dim) { dstSliceSize *= THTensor_sizeLegacyNoScalars(dst, d); } } if (src == nullptr) return dstSliceSize; THArgCheck(dim < srcDims, 3, "Indexing dim is out of bounds"); THArgCheck(THCudaLongTensor_nElement(state, index) == THTensor_sizeLegacyNoScalars(src, dim), 4, "length of src.size[dim] is not equal to length of indices"); ptrdiff_t srcSliceSize = 1; bool mismatch = false; if (dstDims != srcDims) mismatch = true; for (int d = 0; d < srcDims; d++) { if (d != dim) { srcSliceSize *= THTensor_sizeLegacyNoScalars(src, d); if (!mismatch && THTensor_sizeLegacyNoScalars(dst, d) != THTensor_sizeLegacyNoScalars(src, d)) mismatch = true; } } THArgCheck(dstSliceSize == srcSliceSize, 2, "Source/destination tensor have different slice sizes (%ld vs %ld)", dstSliceSize, srcSliceSize); if (mismatch) { static bool warningShown = false; if (!warningShown) { warningShown = true; fprintf(stderr, "Warning: source/destination slices have same size but different " "shape for an index operation. This behavior is deprecated.\n"); } } return dstSliceSize; } // Compare the stride between adjacent slices (sliceStride) with strides in the // other dimensions (i.e., strides *inside* each slice). // // - Returns true if some dimension inside the slice has lower stride than // sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim // == 0 (that is, each slice is a row). // // In this case, we choose the CUDA kernel that processes the data in // "index-major order". For example, if thread count equals slice size, then // all threads process slice #0 in lockstep, and then slice #1, and so on. // // - Otherwise (i.e., sliceStride has the lowest value), this function returns // false. The simplest example is a 2-D contiguous tensor with sliceDim == 1 // (each slice is a column). // // In this case, we choose the CUDA kernel that processes the data in // "elementInSlice-major order". For example, each thread can process element // #0 of every slice, and then element #1 of every slice, and so on. bool THCTensor_(indexShouldBeMajor)(TensorInfo<scalar_t, unsigned int> &info, int sliceDim) { // The stride between adjacent slices (e.g., between element #0 of slice #100 // and element #0 of slice #101). unsigned int sliceStride = info.strides[sliceDim]; for (int i = 0; i < info.dims; ++i) { if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) { return true; } } return false; } void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices)); dim = at::maybe_wrap_dim(dim, dst); int dims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCTensor_(nDimensionLegacyNoScalars)(state, src); THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src); ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src); int64_t dstCopyDimSize = THCTensor_(sizeLegacyNoScalars)(state, dst, dim); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); if (sliceSize == 0) { return; } cudaStream_t stream = c10::cuda::getCurrentCUDAStream(); int indContig = THCudaLongTensor_isContiguous(state, indices); int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ indexCopySmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \ <<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \ dstInfo, srcInfo, indicesInfo, \ dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize); #define LARGE_INDEX(TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \ indexCopyLargeIndex<TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \ <<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \ dstInfo, srcInfo, indicesInfo, \ dstCopyDim, srcCopyDim, srcTotalSize, \ (IDX_IS_MAJOR) ? sliceSize : numIndices, \ dstCopyDimSize); dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(std::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(std::min(srcTotalSize, (ptrdiff_t)128)); if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, indices)) { TensorInfo<scalar_t, unsigned int> dstInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, dst); int dstCopyDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstCopyDim); TensorInfo<scalar_t, unsigned int> srcInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src); int srcCopyDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcCopyDim); TensorInfo<int64_t, unsigned int> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 1, 1, -2); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 2, 2, -2); } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstCopyDim); if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, unsigned int, 1, 1, -2, true); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, false); } } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, unsigned int, -1, -1, -1, true); } } } else { TensorInfo<scalar_t, uint64_t> dstInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, dst); int dstCopyDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstCopyDim); TensorInfo<scalar_t, uint64_t> srcInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src); int srcCopyDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcCopyDim); TensorInfo<int64_t, uint64_t> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, uint64_t, -1, -1, -1, true); } #undef SMALL_INDEX #undef LARGE_INDEX } static void THCTensor_(sort_indices)(THCState *state, THCudaLongTensor *index, THCTensor *src) { THCThrustAllocator thrustAlloc(state); auto index_iter = thrust::device_ptr<int64_t>(THCudaLongTensor_data(state, index)); auto src_iter = thrust::device_ptr<scalar_t>(THCTensor_(data)(state, src)); auto numel = THCTensor_(numel)(state, src); thrust::sort_by_key( thrust::cuda::par(thrustAlloc).on(c10::cuda::getCurrentCUDAStream()), index_iter, index_iter + numel, src_iter, ThrustLTOp<int64_t>()); } void THCTensor_(put)(THCState *state, THCTensor *dst, THCudaLongTensor *index, THCTensor *src, int accumulate) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); ptrdiff_t dstSize = THCTensor_(nElement)(state, dst); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, index); THArgCheck(THCTensor_(nElement)(state, src) == numIndices, 3, "src should have the same number of elements as index"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); if (numIndices == 0) { return; } if (accumulate) { // wrap indices so to replace negative indices THCudaLongTensor* sorted_index = THCudaLongTensor_new(state); THCudaLongTensor_resizeAs(state, sorted_index, index); THC_pointwiseApply2<int64_t, int64_t>(state, sorted_index, index, WrapIndexOp(dstSize)); THCTensor* sorted_src = THCTensor_(newClone)(state, src); THCTensor_(sort_indices)(state, sorted_index, sorted_src); dispatchTakePut<scalar_t, TensorPutAccumulateOp>(state, dst, sorted_src, sorted_index); THCTensor_(free)(state, sorted_src); THCudaLongTensor_free(state, sorted_index); } else { dispatchTakePut<scalar_t, TensorPutOp>(state, dst, src, index); } } void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, scalar_t val) { at::NoNamesGuard guard; THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices)); dim = at::maybe_wrap_dim(dim, dst); int dims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, nullptr); ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst); int64_t dstFillDimSize = THCTensor_(sizeLegacyNoScalars)(state, dst, dim); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); if (sliceSize == 0) { return; } cudaStream_t stream = c10::cuda::getCurrentCUDAStream(); int indContig = THCudaLongTensor_isContiguous(state, indices); int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \ indexFillSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM> \ <<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \ dstInfo, indicesInfo, \ dstFillDim, sliceSize, dstFillDimSize, val); #define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR) \ indexFillLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR> \ <<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \ dstInfo, indicesInfo, \ dstFillDim, sliceSize * numIndices, \ (IDX_IS_MAJOR) ? sliceSize : numIndices, \ dstFillDimSize, val); dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(std::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(std::min(dstTotalSize, (ptrdiff_t)128)); if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, indices)) { TensorInfo<scalar_t, unsigned int> dstInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, dst); int dstFillDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstFillDim); TensorInfo<int64_t, unsigned int> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 1, -2); } else if (dstInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 2, -2); } else if (dstInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 3, -2); } else { SMALL_INDEX(scalar_t, unsigned int, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstFillDim); if (dstInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, unsigned int, 1, -2, true); } else if (dstInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 2, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 2, -2, false); } } else if (dstInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 3, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 3, -2, false); } } else { LARGE_INDEX(scalar_t, unsigned int, -1, -1, true); } } } else { TensorInfo<scalar_t, uint64_t> dstInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, dst); int dstFillDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstFillDim); TensorInfo<int64_t, uint64_t> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, uint64_t, -1, -1, true); } #undef SMALL_INDEX #undef LARGE_INDEX } #endif
14ffe82e0c8f105dd70c600b8c31428ef7866cff.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2009-2011, NVIDIA Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA Corporation nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #if 0 #define FW_ENABLE_ASSERT //#define THRUST_DEBUG_SYNC //#define DEBUG #include "bvh/BVHNode.hpp" #include "bvh/GPUSplitBVHBuilder.hpp" #include "base/Sort.hpp" #include <thrust/swap.h> #include <thrust/sort.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/host_vector.h> #include <thrust/transform.h> #include <thrust/reduce.h> #include <thrust/transform_reduce.h> #include <thrust/iterator/constant_iterator.h> #define HD __host__ __device__ //------------------------------------------------------------------------ FW::GPUSplitBVHBuilder::GPUSplitBVHBuilder(BVH& bvh, const BVH::BuildParams& params) : m_bvh(bvh), m_platform(bvh.getPlatform()), m_params(params), m_minOverlap(0.0f), m_sortDim(-1) { } //------------------------------------------------------------------------ FW::GPUSplitBVHBuilder::~GPUSplitBVHBuilder(void) { } //------------------------------------------------------------------------ FW::BVHNode* FW::GPUSplitBVHBuilder::run(void) { if (m_params.enablePrints) printf("SBVH alpha=%g minLeafSize=%d maxLeafSize=%d\n", m_params.splitAlpha, m_platform.getMinLeafSize(), m_platform.getMaxLeafSize()); // Initialize reference stack and determine root bounds. NodeSpec rootSpec; rootSpec.numRef = m_bvh.getScene()->getNumTriangles(); m_refStack.setManaged(true); m_refStack.resize(rootSpec.numRef); #if 0 const Vec3i* tris = (const Vec3i*)m_bvh.getScene()->getTriVtxIndexBuffer().getPtr(); const Vec3f* verts = (const Vec3f*)m_bvh.getScene()->getVtxPosBuffer().getPtr(); for (int i = 0; i < rootSpec.numRef; i++) { m_refStack[i].triIdx = i; for (int j = 0; j < 3; j++) m_refStack[i].bounds.grow(verts[tris[i][j]]); rootSpec.bounds.grow(m_refStack[i].bounds); } #else // Do this here in run() to set up the pointers for the whole algorithm m_ptris = (const Vec3i*)m_bvh.getScene()->getTriVtxIndexBuffer().getCudaPtr(); m_pverts = (const Vec3f*)m_bvh.getScene()->getVtxPosBuffer().getCudaPtr(); m_prefStack = m_refStack.getPtr(); // Do this in every function that uses a device lambda: const Vec3i* tris = m_ptris; const Vec3f* verts = m_pverts; Reference* refStack = m_prefStack; rootSpec.bounds = thrust::transform_reduce(thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator(rootSpec.numRef), [=] HD (S32 i) { refStack[i].triIdx = i; refStack[i].bounds = AABB(); for (int j = 0; j < 3; j++) refStack[i].bounds.grow(verts[tris[i][j]]); return refStack[i].bounds; }, AABB(), [] HD (AABB a, AABB b) { return a + b; }); hipDeviceSynchronize(); #endif // Initialize rest of the members. m_minOverlap = rootSpec.bounds.area() * m_params.splitAlpha; m_rightBounds.reset(max(rootSpec.numRef, (int)NumSpatialBins) - 1); m_numDuplicates = 0; m_progressTimer.start(); // Build recursively. BVHNode* root = buildNode(rootSpec, 0, 0.0f, 1.0f); m_bvh.getTriIndices().compact(); // Done. if (m_params.enablePrints) printf("GPUSplitBVHBuilder: progress %.0f%%, duplicates %.0f%%\n", 100.0f, (F32)m_numDuplicates / (F32)m_bvh.getScene()->getNumTriangles() * 100.0f); return root; } //------------------------------------------------------------------------ bool FW::GPUSplitBVHBuilder::sortCompare(void* data, int idxA, int idxB) { const GPUSplitBVHBuilder* ptr = (const GPUSplitBVHBuilder*)data; int dim = ptr->m_sortDim; const Reference& ra = ptr->m_refStack[idxA]; const Reference& rb = ptr->m_refStack[idxB]; F32 ca = ra.bounds.min()[dim] + ra.bounds.max()[dim]; F32 cb = rb.bounds.min()[dim] + rb.bounds.max()[dim]; return (ca < cb || (ca == cb && ra.triIdx < rb.triIdx)); } //------------------------------------------------------------------------ void FW::GPUSplitBVHBuilder::sortSwap(void* data, int idxA, int idxB) { GPUSplitBVHBuilder* ptr = (GPUSplitBVHBuilder*)data; swap(ptr->m_refStack[idxA], ptr->m_refStack[idxB]); } //------------------------------------------------------------------------ FW::BVHNode* FW::GPUSplitBVHBuilder::buildNode(NodeSpec spec, int level, F32 progressStart, F32 progressEnd) { // Display progress. if (m_params.enablePrints && m_progressTimer.getElapsed() >= 1.0f) { printf("GPUSplitBVHBuilder: progress %.0f%%, duplicates %.0f%%\r", progressStart * 100.0f, (F32)m_numDuplicates / (F32)m_bvh.getScene()->getNumTriangles() * 100.0f); m_progressTimer.start(); } // Remove degenerates. { int firstRef = m_refStack.getSize() - spec.numRef; for (int i = m_refStack.getSize() - 1; i >= firstRef; i--) { Vec3f size = m_refStack[i].bounds.max() - m_refStack[i].bounds.min(); if (min(size) < 0.0f || sum(size) == max(size)) m_refStack.removeSwap(i); } spec.numRef = m_refStack.getSize() - firstRef; } // Small enough or too deep => create leaf. if (spec.numRef <= m_platform.getMinLeafSize() || level >= MaxDepth) return createLeaf(spec); // Find split candidates. F32 area = spec.bounds.area(); F32 leafSAH = area * m_platform.getTriangleCost(spec.numRef); F32 nodeSAH = area * m_platform.getNodeCost(2); ObjectSplit object = findObjectSplit(spec, nodeSAH); SpatialSplit spatial; if (level < MaxSpatialDepth) { AABB overlap = object.leftBounds; overlap.intersect(object.rightBounds); if (overlap.area() >= m_minOverlap) spatial = findSpatialSplit(spec, nodeSAH); } // Leaf SAH is the lowest => create leaf. F32 minSAH = min(leafSAH, object.sah, spatial.sah); if (minSAH == leafSAH && spec.numRef <= m_platform.getMaxLeafSize()) return createLeaf(spec); // Perform split. NodeSpec left, right; if (minSAH == spatial.sah) performSpatialSplit(left, right, spec, spatial); if (!left.numRef || !right.numRef) performObjectSplit(left, right, spec, object); // Create inner node. m_numDuplicates += left.numRef + right.numRef - spec.numRef; F32 progressMid = lerp(progressStart, progressEnd, (F32)right.numRef / (F32)(left.numRef + right.numRef)); BVHNode* rightNode = buildNode(right, level + 1, progressStart, progressMid); BVHNode* leftNode = buildNode(left, level + 1, progressMid, progressEnd); return new InnerNode(spec.bounds, leftNode, rightNode); } //------------------------------------------------------------------------ FW::BVHNode* FW::GPUSplitBVHBuilder::createLeaf(const NodeSpec& spec) { Array<S32>& tris = m_bvh.getTriIndices(); for (int i = 0; i < spec.numRef; i++) tris.add(m_refStack.removeLast().triIdx); return new LeafNode(spec.bounds, tris.getSize() - spec.numRef, tris.getSize()); } //------------------------------------------------------------------------ namespace FW { void __host__ __device__ swap(FW::Reference& a, FW::Reference& b) { //printf("swap 0x%016llx 0x%016llx\n", (U64)&a, (U64)&b); thrust::swap(a, b); } }; void FW::GPUSplitBVHBuilder::sortHelper(size_t beg, size_t end, int mdim) { Reference* refStack = m_prefStack; int dim = mdim; //printf("%lld %lld\n", beg, end); thrust::sort(thrust::device, refStack + beg, refStack + end, [dim] __device__ (const Reference ra, const Reference rb) { F32 ca = ra.bounds.min()[dim] + ra.bounds.max()[dim]; F32 cb = rb.bounds.min()[dim] + rb.bounds.max()[dim]; return (ca < cb || (ca == cb && ra.triIdx < rb.triIdx)); }); } FW::GPUSplitBVHBuilder::ObjectSplit FW::GPUSplitBVHBuilder::findObjectSplit(const NodeSpec& spec, F32 nodeSAH) { ObjectSplit split; const Reference* refPtr = m_refStack.getPtr(m_refStack.getSize() - spec.numRef); F32 bestTieBreak = FW_F32_MAX; // Sort along each dimension. for (m_sortDim = 0; m_sortDim < 3; m_sortDim++) { if (spec.numRef < 100000) { sort(this, m_refStack.getSize() - spec.numRef, m_refStack.getSize(), sortCompare, sortSwap); } else { // PAR: Sort by centroid sortHelper(m_refStack.getSize() - spec.numRef, m_refStack.getSize(), m_sortDim); hipDeviceSynchronize(); } // Sweep right to left and determine bounds. // PAR: Is an inclusive scan AABB rightBounds; for (int i = spec.numRef - 1; i > 0; i--) { rightBounds.grow(refPtr[i].bounds); m_rightBounds[i - 1] = rightBounds; } // Sweep left to right and select lowest SAH. // PAR: Is an inclusive scan AABB leftBounds; for (int i = 1; i < spec.numRef; i++) { leftBounds.grow(refPtr[i - 1].bounds); F32 sah = nodeSAH + leftBounds.area() * m_platform.getTriangleCost(i) + m_rightBounds[i - 1].area() * m_platform.getTriangleCost(spec.numRef - i); F32 tieBreak = sqr((F32)i) + sqr((F32)(spec.numRef - i)); if (sah < split.sah || (sah == split.sah && tieBreak < bestTieBreak)) { split.sah = sah; split.sortDim = m_sortDim; split.numLeft = i; split.leftBounds = leftBounds; split.rightBounds = m_rightBounds[i - 1]; bestTieBreak = tieBreak; } } } return split; } //------------------------------------------------------------------------ void FW::GPUSplitBVHBuilder::performObjectSplit(NodeSpec& left, NodeSpec& right, const NodeSpec& spec, const ObjectSplit& split) { if (spec.numRef < 100000) { m_sortDim = split.sortDim; sort(this, m_refStack.getSize() - spec.numRef, m_refStack.getSize(), sortCompare, sortSwap); } else { sortHelper(m_refStack.getSize() - spec.numRef, m_refStack.getSize(), split.sortDim); hipDeviceSynchronize(); } left.numRef = split.numLeft; left.bounds = split.leftBounds; right.numRef = spec.numRef - split.numLeft; right.bounds = split.rightBounds; } //------------------------------------------------------------------------ FW::GPUSplitBVHBuilder::SpatialSplit FW::GPUSplitBVHBuilder::findSpatialSplit(const NodeSpec& spec, F32 nodeSAH) { if (spec.numRef > 100000) { printf("findSpatialSplit(%d)", spec.numRef); m_progressTimer.start(); } const Vec3i* tris = (const Vec3i*)m_bvh.getScene()->getTriVtxIndexBuffer().getPtr(); const Vec3f* verts = (const Vec3f*)m_bvh.getScene()->getVtxPosBuffer().getPtr(); // Initialize bins. Vec3f origin = spec.bounds.min(); Vec3f binSize = (spec.bounds.max() - origin) * (1.0f / (F32)NumSpatialBins); Vec3f invBinSize = 1.0f / binSize; for (int dim = 0; dim < 3; dim++) { for (int i = 0; i < NumSpatialBins; i++) { SpatialBin& bin = m_bins[dim][i]; bin.bounds = AABB(); bin.enter = 0; bin.exit = 0; } } // Chop references into bins. for (int refIdx = m_refStack.getSize() - spec.numRef; refIdx < m_refStack.getSize(); refIdx++) { const Reference& ref = m_refStack[refIdx]; Vec3i firstBin = clamp(Vec3i((ref.bounds.min() - origin) * invBinSize), 0, NumSpatialBins - 1); Vec3i lastBin = clamp(Vec3i((ref.bounds.max() - origin) * invBinSize), firstBin, NumSpatialBins - 1); for (int dim = 0; dim < 3; dim++) { Reference currRef = ref; for (int i = firstBin[dim]; i < lastBin[dim]; i++) { Reference leftRef, rightRef; splitReference(leftRef, rightRef, currRef, dim, origin[dim] + binSize[dim] * (F32)(i + 1), tris, verts); m_bins[dim][i].bounds.grow(leftRef.bounds); currRef = rightRef; } m_bins[dim][lastBin[dim]].bounds.grow(currRef.bounds); m_bins[dim][firstBin[dim]].enter++; m_bins[dim][lastBin[dim]].exit++; } } // Select best split plane. SpatialSplit split; for (int dim = 0; dim < 3; dim++) { // Sweep right to left and determine bounds. AABB rightBounds; for (int i = NumSpatialBins - 1; i > 0; i--) { rightBounds.grow(m_bins[dim][i].bounds); m_rightBounds[i - 1] = rightBounds; } // Sweep left to right and select lowest SAH. AABB leftBounds; int leftNum = 0; int rightNum = spec.numRef; for (int i = 1; i < NumSpatialBins; i++) { leftBounds.grow(m_bins[dim][i - 1].bounds); leftNum += m_bins[dim][i - 1].enter; rightNum -= m_bins[dim][i - 1].exit; F32 sah = nodeSAH + leftBounds.area() * m_platform.getTriangleCost(leftNum) + m_rightBounds[i - 1].area() * m_platform.getTriangleCost(rightNum); if (sah < split.sah) { split.sah = sah; split.dim = dim; split.pos = origin[dim] + binSize[dim] * (F32)i; } } } if (spec.numRef > 100000) { printf(" t=%f\n", m_progressTimer.end()); } return split; } //------------------------------------------------------------------------ void FW::GPUSplitBVHBuilder::performSpatialSplit(NodeSpec& left, NodeSpec& right, const NodeSpec& spec, const SpatialSplit& split) { // Categorize references and compute bounds. // // Left-hand side: [leftStart, leftEnd[ // Uncategorized/split: [leftEnd, rightStart[ // Right-hand side: [rightStart, refs.getSize()[ const Vec3i* tris = (const Vec3i*)m_bvh.getScene()->getTriVtxIndexBuffer().getPtr(); const Vec3f* verts = (const Vec3f*)m_bvh.getScene()->getVtxPosBuffer().getPtr(); Array<Reference>& refs = m_refStack; int leftStart = refs.getSize() - spec.numRef; int leftEnd = leftStart; int rightStart = refs.getSize(); left.bounds = right.bounds = AABB(); for (int i = leftEnd; i < rightStart; i++) { // Entirely on the left-hand side? if (refs[i].bounds.max()[split.dim] <= split.pos) { left.bounds.grow(refs[i].bounds); swap(refs[i], refs[leftEnd++]); } // Entirely on the right-hand side? else if (refs[i].bounds.min()[split.dim] >= split.pos) { right.bounds.grow(refs[i].bounds); swap(refs[i--], refs[--rightStart]); } } // Duplicate or unsplit references intersecting both sides. while (leftEnd < rightStart) { // Split reference. Reference lref, rref; splitReference(lref, rref, refs[leftEnd], split.dim, split.pos, tris, verts); // Compute SAH for duplicate/unsplit candidates. AABB lub = left.bounds; // Unsplit to left: new left-hand bounds. AABB rub = right.bounds; // Unsplit to right: new right-hand bounds. AABB ldb = left.bounds; // Duplicate: new left-hand bounds. AABB rdb = right.bounds; // Duplicate: new right-hand bounds. lub.grow(refs[leftEnd].bounds); rub.grow(refs[leftEnd].bounds); ldb.grow(lref.bounds); rdb.grow(rref.bounds); F32 lac = m_platform.getTriangleCost(leftEnd - leftStart); F32 rac = m_platform.getTriangleCost(refs.getSize() - rightStart); F32 lbc = m_platform.getTriangleCost(leftEnd - leftStart + 1); F32 rbc = m_platform.getTriangleCost(refs.getSize() - rightStart + 1); F32 unsplitLeftSAH = lub.area() * lbc + right.bounds.area() * rac; F32 unsplitRightSAH = left.bounds.area() * lac + rub.area() * rbc; F32 duplicateSAH = ldb.area() * lbc + rdb.area() * rbc; F32 minSAH = min(unsplitLeftSAH, unsplitRightSAH, duplicateSAH); // Unsplit to left? if (minSAH == unsplitLeftSAH) { left.bounds = lub; leftEnd++; } // Unsplit to right? else if (minSAH == unsplitRightSAH) { right.bounds = rub; swap(refs[leftEnd], refs[--rightStart]); } // Duplicate? else { left.bounds = ldb; right.bounds = rdb; refs[leftEnd++] = lref; refs.add(rref); } } left.numRef = leftEnd - leftStart; right.numRef = refs.getSize() - rightStart; } //------------------------------------------------------------------------ void FW::GPUSplitBVHBuilder::splitReference(Reference& left, Reference& right, const Reference& ref, int dim, F32 pos, const Vec3i* tris, const Vec3f* verts) { // Initialize references. left.triIdx = right.triIdx = ref.triIdx; left.bounds = right.bounds = AABB(); // Loop over vertices/edges. const Vec3i& inds = tris[ref.triIdx]; const Vec3f* v1 = &verts[inds.z]; for (int i = 0; i < 3; i++) { const Vec3f* v0 = v1; v1 = &verts[inds[i]]; F32 v0p = v0->get(dim); F32 v1p = v1->get(dim); // Insert vertex to the boxes it belongs to. if (v0p <= pos) left.bounds.grow(*v0); if (v0p >= pos) right.bounds.grow(*v0); // Edge intersects the plane => insert intersection to both boxes. if ((v0p < pos && v1p > pos) || (v0p > pos && v1p < pos)) { Vec3f t = lerp(*v0, *v1, clamp((pos - v0p) / (v1p - v0p), 0.0f, 1.0f)); left.bounds.grow(t); right.bounds.grow(t); } } // Intersect with original bounds. left.bounds.max()[dim] = pos; right.bounds.min()[dim] = pos; left.bounds.intersect(ref.bounds); right.bounds.intersect(ref.bounds); } //------------------------------------------------------------------------ #if 0 void FW::GPUSplitBVHBuilder::Benchy() { FW::Timer tim; tim.start(); const size_t N = 5000001; AABB final; unsigned long long i; for (i = 0; i < N; i++) { GPUSplitBVHBuilder::Reference rr = m_refStack[rand() % m_refStack.getSize()]; for (int s = 0; s < 10; s++) { int dim = rand() % 3; float split = rr.bounds.min()[dim] + myrandf() * (rr.bounds.max() - rr.bounds.min())[dim]; GPUSplitBVHBuilder::Reference leftRef, rightRef; GPUSplitBVHBuilder::splitReference(leftRef, rightRef, rr, dim, split); rr = leftRef.bounds.area() > rightRef.bounds.area() ? leftRef : rightRef; final.grow(rr.bounds); } } printf("G i=%lld s=%f t=%f\n", i, final.area(), tim.getElapsed()); } #endif #endif
14ffe82e0c8f105dd70c600b8c31428ef7866cff.cu
/* * Copyright (c) 2009-2011, NVIDIA Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA Corporation nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #if 0 #define FW_ENABLE_ASSERT //#define THRUST_DEBUG_SYNC //#define DEBUG #include "bvh/BVHNode.hpp" #include "bvh/GPUSplitBVHBuilder.hpp" #include "base/Sort.hpp" #include <thrust/swap.h> #include <thrust/sort.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/host_vector.h> #include <thrust/transform.h> #include <thrust/reduce.h> #include <thrust/transform_reduce.h> #include <thrust/iterator/constant_iterator.h> #define HD __host__ __device__ //------------------------------------------------------------------------ FW::GPUSplitBVHBuilder::GPUSplitBVHBuilder(BVH& bvh, const BVH::BuildParams& params) : m_bvh(bvh), m_platform(bvh.getPlatform()), m_params(params), m_minOverlap(0.0f), m_sortDim(-1) { } //------------------------------------------------------------------------ FW::GPUSplitBVHBuilder::~GPUSplitBVHBuilder(void) { } //------------------------------------------------------------------------ FW::BVHNode* FW::GPUSplitBVHBuilder::run(void) { if (m_params.enablePrints) printf("SBVH alpha=%g minLeafSize=%d maxLeafSize=%d\n", m_params.splitAlpha, m_platform.getMinLeafSize(), m_platform.getMaxLeafSize()); // Initialize reference stack and determine root bounds. NodeSpec rootSpec; rootSpec.numRef = m_bvh.getScene()->getNumTriangles(); m_refStack.setManaged(true); m_refStack.resize(rootSpec.numRef); #if 0 const Vec3i* tris = (const Vec3i*)m_bvh.getScene()->getTriVtxIndexBuffer().getPtr(); const Vec3f* verts = (const Vec3f*)m_bvh.getScene()->getVtxPosBuffer().getPtr(); for (int i = 0; i < rootSpec.numRef; i++) { m_refStack[i].triIdx = i; for (int j = 0; j < 3; j++) m_refStack[i].bounds.grow(verts[tris[i][j]]); rootSpec.bounds.grow(m_refStack[i].bounds); } #else // Do this here in run() to set up the pointers for the whole algorithm m_ptris = (const Vec3i*)m_bvh.getScene()->getTriVtxIndexBuffer().getCudaPtr(); m_pverts = (const Vec3f*)m_bvh.getScene()->getVtxPosBuffer().getCudaPtr(); m_prefStack = m_refStack.getPtr(); // Do this in every function that uses a device lambda: const Vec3i* tris = m_ptris; const Vec3f* verts = m_pverts; Reference* refStack = m_prefStack; rootSpec.bounds = thrust::transform_reduce(thrust::device, thrust::make_counting_iterator(0), thrust::make_counting_iterator(rootSpec.numRef), [=] HD (S32 i) { refStack[i].triIdx = i; refStack[i].bounds = AABB(); for (int j = 0; j < 3; j++) refStack[i].bounds.grow(verts[tris[i][j]]); return refStack[i].bounds; }, AABB(), [] HD (AABB a, AABB b) { return a + b; }); cudaDeviceSynchronize(); #endif // Initialize rest of the members. m_minOverlap = rootSpec.bounds.area() * m_params.splitAlpha; m_rightBounds.reset(max(rootSpec.numRef, (int)NumSpatialBins) - 1); m_numDuplicates = 0; m_progressTimer.start(); // Build recursively. BVHNode* root = buildNode(rootSpec, 0, 0.0f, 1.0f); m_bvh.getTriIndices().compact(); // Done. if (m_params.enablePrints) printf("GPUSplitBVHBuilder: progress %.0f%%, duplicates %.0f%%\n", 100.0f, (F32)m_numDuplicates / (F32)m_bvh.getScene()->getNumTriangles() * 100.0f); return root; } //------------------------------------------------------------------------ bool FW::GPUSplitBVHBuilder::sortCompare(void* data, int idxA, int idxB) { const GPUSplitBVHBuilder* ptr = (const GPUSplitBVHBuilder*)data; int dim = ptr->m_sortDim; const Reference& ra = ptr->m_refStack[idxA]; const Reference& rb = ptr->m_refStack[idxB]; F32 ca = ra.bounds.min()[dim] + ra.bounds.max()[dim]; F32 cb = rb.bounds.min()[dim] + rb.bounds.max()[dim]; return (ca < cb || (ca == cb && ra.triIdx < rb.triIdx)); } //------------------------------------------------------------------------ void FW::GPUSplitBVHBuilder::sortSwap(void* data, int idxA, int idxB) { GPUSplitBVHBuilder* ptr = (GPUSplitBVHBuilder*)data; swap(ptr->m_refStack[idxA], ptr->m_refStack[idxB]); } //------------------------------------------------------------------------ FW::BVHNode* FW::GPUSplitBVHBuilder::buildNode(NodeSpec spec, int level, F32 progressStart, F32 progressEnd) { // Display progress. if (m_params.enablePrints && m_progressTimer.getElapsed() >= 1.0f) { printf("GPUSplitBVHBuilder: progress %.0f%%, duplicates %.0f%%\r", progressStart * 100.0f, (F32)m_numDuplicates / (F32)m_bvh.getScene()->getNumTriangles() * 100.0f); m_progressTimer.start(); } // Remove degenerates. { int firstRef = m_refStack.getSize() - spec.numRef; for (int i = m_refStack.getSize() - 1; i >= firstRef; i--) { Vec3f size = m_refStack[i].bounds.max() - m_refStack[i].bounds.min(); if (min(size) < 0.0f || sum(size) == max(size)) m_refStack.removeSwap(i); } spec.numRef = m_refStack.getSize() - firstRef; } // Small enough or too deep => create leaf. if (spec.numRef <= m_platform.getMinLeafSize() || level >= MaxDepth) return createLeaf(spec); // Find split candidates. F32 area = spec.bounds.area(); F32 leafSAH = area * m_platform.getTriangleCost(spec.numRef); F32 nodeSAH = area * m_platform.getNodeCost(2); ObjectSplit object = findObjectSplit(spec, nodeSAH); SpatialSplit spatial; if (level < MaxSpatialDepth) { AABB overlap = object.leftBounds; overlap.intersect(object.rightBounds); if (overlap.area() >= m_minOverlap) spatial = findSpatialSplit(spec, nodeSAH); } // Leaf SAH is the lowest => create leaf. F32 minSAH = min(leafSAH, object.sah, spatial.sah); if (minSAH == leafSAH && spec.numRef <= m_platform.getMaxLeafSize()) return createLeaf(spec); // Perform split. NodeSpec left, right; if (minSAH == spatial.sah) performSpatialSplit(left, right, spec, spatial); if (!left.numRef || !right.numRef) performObjectSplit(left, right, spec, object); // Create inner node. m_numDuplicates += left.numRef + right.numRef - spec.numRef; F32 progressMid = lerp(progressStart, progressEnd, (F32)right.numRef / (F32)(left.numRef + right.numRef)); BVHNode* rightNode = buildNode(right, level + 1, progressStart, progressMid); BVHNode* leftNode = buildNode(left, level + 1, progressMid, progressEnd); return new InnerNode(spec.bounds, leftNode, rightNode); } //------------------------------------------------------------------------ FW::BVHNode* FW::GPUSplitBVHBuilder::createLeaf(const NodeSpec& spec) { Array<S32>& tris = m_bvh.getTriIndices(); for (int i = 0; i < spec.numRef; i++) tris.add(m_refStack.removeLast().triIdx); return new LeafNode(spec.bounds, tris.getSize() - spec.numRef, tris.getSize()); } //------------------------------------------------------------------------ namespace FW { void __host__ __device__ swap(FW::Reference& a, FW::Reference& b) { //printf("swap 0x%016llx 0x%016llx\n", (U64)&a, (U64)&b); thrust::swap(a, b); } }; void FW::GPUSplitBVHBuilder::sortHelper(size_t beg, size_t end, int mdim) { Reference* refStack = m_prefStack; int dim = mdim; //printf("%lld %lld\n", beg, end); thrust::sort(thrust::device, refStack + beg, refStack + end, [dim] __device__ (const Reference ra, const Reference rb) { F32 ca = ra.bounds.min()[dim] + ra.bounds.max()[dim]; F32 cb = rb.bounds.min()[dim] + rb.bounds.max()[dim]; return (ca < cb || (ca == cb && ra.triIdx < rb.triIdx)); }); } FW::GPUSplitBVHBuilder::ObjectSplit FW::GPUSplitBVHBuilder::findObjectSplit(const NodeSpec& spec, F32 nodeSAH) { ObjectSplit split; const Reference* refPtr = m_refStack.getPtr(m_refStack.getSize() - spec.numRef); F32 bestTieBreak = FW_F32_MAX; // Sort along each dimension. for (m_sortDim = 0; m_sortDim < 3; m_sortDim++) { if (spec.numRef < 100000) { sort(this, m_refStack.getSize() - spec.numRef, m_refStack.getSize(), sortCompare, sortSwap); } else { // PAR: Sort by centroid sortHelper(m_refStack.getSize() - spec.numRef, m_refStack.getSize(), m_sortDim); cudaDeviceSynchronize(); } // Sweep right to left and determine bounds. // PAR: Is an inclusive scan AABB rightBounds; for (int i = spec.numRef - 1; i > 0; i--) { rightBounds.grow(refPtr[i].bounds); m_rightBounds[i - 1] = rightBounds; } // Sweep left to right and select lowest SAH. // PAR: Is an inclusive scan AABB leftBounds; for (int i = 1; i < spec.numRef; i++) { leftBounds.grow(refPtr[i - 1].bounds); F32 sah = nodeSAH + leftBounds.area() * m_platform.getTriangleCost(i) + m_rightBounds[i - 1].area() * m_platform.getTriangleCost(spec.numRef - i); F32 tieBreak = sqr((F32)i) + sqr((F32)(spec.numRef - i)); if (sah < split.sah || (sah == split.sah && tieBreak < bestTieBreak)) { split.sah = sah; split.sortDim = m_sortDim; split.numLeft = i; split.leftBounds = leftBounds; split.rightBounds = m_rightBounds[i - 1]; bestTieBreak = tieBreak; } } } return split; } //------------------------------------------------------------------------ void FW::GPUSplitBVHBuilder::performObjectSplit(NodeSpec& left, NodeSpec& right, const NodeSpec& spec, const ObjectSplit& split) { if (spec.numRef < 100000) { m_sortDim = split.sortDim; sort(this, m_refStack.getSize() - spec.numRef, m_refStack.getSize(), sortCompare, sortSwap); } else { sortHelper(m_refStack.getSize() - spec.numRef, m_refStack.getSize(), split.sortDim); cudaDeviceSynchronize(); } left.numRef = split.numLeft; left.bounds = split.leftBounds; right.numRef = spec.numRef - split.numLeft; right.bounds = split.rightBounds; } //------------------------------------------------------------------------ FW::GPUSplitBVHBuilder::SpatialSplit FW::GPUSplitBVHBuilder::findSpatialSplit(const NodeSpec& spec, F32 nodeSAH) { if (spec.numRef > 100000) { printf("findSpatialSplit(%d)", spec.numRef); m_progressTimer.start(); } const Vec3i* tris = (const Vec3i*)m_bvh.getScene()->getTriVtxIndexBuffer().getPtr(); const Vec3f* verts = (const Vec3f*)m_bvh.getScene()->getVtxPosBuffer().getPtr(); // Initialize bins. Vec3f origin = spec.bounds.min(); Vec3f binSize = (spec.bounds.max() - origin) * (1.0f / (F32)NumSpatialBins); Vec3f invBinSize = 1.0f / binSize; for (int dim = 0; dim < 3; dim++) { for (int i = 0; i < NumSpatialBins; i++) { SpatialBin& bin = m_bins[dim][i]; bin.bounds = AABB(); bin.enter = 0; bin.exit = 0; } } // Chop references into bins. for (int refIdx = m_refStack.getSize() - spec.numRef; refIdx < m_refStack.getSize(); refIdx++) { const Reference& ref = m_refStack[refIdx]; Vec3i firstBin = clamp(Vec3i((ref.bounds.min() - origin) * invBinSize), 0, NumSpatialBins - 1); Vec3i lastBin = clamp(Vec3i((ref.bounds.max() - origin) * invBinSize), firstBin, NumSpatialBins - 1); for (int dim = 0; dim < 3; dim++) { Reference currRef = ref; for (int i = firstBin[dim]; i < lastBin[dim]; i++) { Reference leftRef, rightRef; splitReference(leftRef, rightRef, currRef, dim, origin[dim] + binSize[dim] * (F32)(i + 1), tris, verts); m_bins[dim][i].bounds.grow(leftRef.bounds); currRef = rightRef; } m_bins[dim][lastBin[dim]].bounds.grow(currRef.bounds); m_bins[dim][firstBin[dim]].enter++; m_bins[dim][lastBin[dim]].exit++; } } // Select best split plane. SpatialSplit split; for (int dim = 0; dim < 3; dim++) { // Sweep right to left and determine bounds. AABB rightBounds; for (int i = NumSpatialBins - 1; i > 0; i--) { rightBounds.grow(m_bins[dim][i].bounds); m_rightBounds[i - 1] = rightBounds; } // Sweep left to right and select lowest SAH. AABB leftBounds; int leftNum = 0; int rightNum = spec.numRef; for (int i = 1; i < NumSpatialBins; i++) { leftBounds.grow(m_bins[dim][i - 1].bounds); leftNum += m_bins[dim][i - 1].enter; rightNum -= m_bins[dim][i - 1].exit; F32 sah = nodeSAH + leftBounds.area() * m_platform.getTriangleCost(leftNum) + m_rightBounds[i - 1].area() * m_platform.getTriangleCost(rightNum); if (sah < split.sah) { split.sah = sah; split.dim = dim; split.pos = origin[dim] + binSize[dim] * (F32)i; } } } if (spec.numRef > 100000) { printf(" t=%f\n", m_progressTimer.end()); } return split; } //------------------------------------------------------------------------ void FW::GPUSplitBVHBuilder::performSpatialSplit(NodeSpec& left, NodeSpec& right, const NodeSpec& spec, const SpatialSplit& split) { // Categorize references and compute bounds. // // Left-hand side: [leftStart, leftEnd[ // Uncategorized/split: [leftEnd, rightStart[ // Right-hand side: [rightStart, refs.getSize()[ const Vec3i* tris = (const Vec3i*)m_bvh.getScene()->getTriVtxIndexBuffer().getPtr(); const Vec3f* verts = (const Vec3f*)m_bvh.getScene()->getVtxPosBuffer().getPtr(); Array<Reference>& refs = m_refStack; int leftStart = refs.getSize() - spec.numRef; int leftEnd = leftStart; int rightStart = refs.getSize(); left.bounds = right.bounds = AABB(); for (int i = leftEnd; i < rightStart; i++) { // Entirely on the left-hand side? if (refs[i].bounds.max()[split.dim] <= split.pos) { left.bounds.grow(refs[i].bounds); swap(refs[i], refs[leftEnd++]); } // Entirely on the right-hand side? else if (refs[i].bounds.min()[split.dim] >= split.pos) { right.bounds.grow(refs[i].bounds); swap(refs[i--], refs[--rightStart]); } } // Duplicate or unsplit references intersecting both sides. while (leftEnd < rightStart) { // Split reference. Reference lref, rref; splitReference(lref, rref, refs[leftEnd], split.dim, split.pos, tris, verts); // Compute SAH for duplicate/unsplit candidates. AABB lub = left.bounds; // Unsplit to left: new left-hand bounds. AABB rub = right.bounds; // Unsplit to right: new right-hand bounds. AABB ldb = left.bounds; // Duplicate: new left-hand bounds. AABB rdb = right.bounds; // Duplicate: new right-hand bounds. lub.grow(refs[leftEnd].bounds); rub.grow(refs[leftEnd].bounds); ldb.grow(lref.bounds); rdb.grow(rref.bounds); F32 lac = m_platform.getTriangleCost(leftEnd - leftStart); F32 rac = m_platform.getTriangleCost(refs.getSize() - rightStart); F32 lbc = m_platform.getTriangleCost(leftEnd - leftStart + 1); F32 rbc = m_platform.getTriangleCost(refs.getSize() - rightStart + 1); F32 unsplitLeftSAH = lub.area() * lbc + right.bounds.area() * rac; F32 unsplitRightSAH = left.bounds.area() * lac + rub.area() * rbc; F32 duplicateSAH = ldb.area() * lbc + rdb.area() * rbc; F32 minSAH = min(unsplitLeftSAH, unsplitRightSAH, duplicateSAH); // Unsplit to left? if (minSAH == unsplitLeftSAH) { left.bounds = lub; leftEnd++; } // Unsplit to right? else if (minSAH == unsplitRightSAH) { right.bounds = rub; swap(refs[leftEnd], refs[--rightStart]); } // Duplicate? else { left.bounds = ldb; right.bounds = rdb; refs[leftEnd++] = lref; refs.add(rref); } } left.numRef = leftEnd - leftStart; right.numRef = refs.getSize() - rightStart; } //------------------------------------------------------------------------ void FW::GPUSplitBVHBuilder::splitReference(Reference& left, Reference& right, const Reference& ref, int dim, F32 pos, const Vec3i* tris, const Vec3f* verts) { // Initialize references. left.triIdx = right.triIdx = ref.triIdx; left.bounds = right.bounds = AABB(); // Loop over vertices/edges. const Vec3i& inds = tris[ref.triIdx]; const Vec3f* v1 = &verts[inds.z]; for (int i = 0; i < 3; i++) { const Vec3f* v0 = v1; v1 = &verts[inds[i]]; F32 v0p = v0->get(dim); F32 v1p = v1->get(dim); // Insert vertex to the boxes it belongs to. if (v0p <= pos) left.bounds.grow(*v0); if (v0p >= pos) right.bounds.grow(*v0); // Edge intersects the plane => insert intersection to both boxes. if ((v0p < pos && v1p > pos) || (v0p > pos && v1p < pos)) { Vec3f t = lerp(*v0, *v1, clamp((pos - v0p) / (v1p - v0p), 0.0f, 1.0f)); left.bounds.grow(t); right.bounds.grow(t); } } // Intersect with original bounds. left.bounds.max()[dim] = pos; right.bounds.min()[dim] = pos; left.bounds.intersect(ref.bounds); right.bounds.intersect(ref.bounds); } //------------------------------------------------------------------------ #if 0 void FW::GPUSplitBVHBuilder::Benchy() { FW::Timer tim; tim.start(); const size_t N = 5000001; AABB final; unsigned long long i; for (i = 0; i < N; i++) { GPUSplitBVHBuilder::Reference rr = m_refStack[rand() % m_refStack.getSize()]; for (int s = 0; s < 10; s++) { int dim = rand() % 3; float split = rr.bounds.min()[dim] + myrandf() * (rr.bounds.max() - rr.bounds.min())[dim]; GPUSplitBVHBuilder::Reference leftRef, rightRef; GPUSplitBVHBuilder::splitReference(leftRef, rightRef, rr, dim, split); rr = leftRef.bounds.area() > rightRef.bounds.area() ? leftRef : rightRef; final.grow(rr.bounds); } } printf("G i=%lld s=%f t=%f\n", i, final.area(), tim.getElapsed()); } #endif #endif
8a1146394fe166564e1d42994b72c4741d48f2e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include "oneflow/user/kernels/math_binary_elementwise_func.h" namespace oneflow { namespace { template<template<typename> class BinaryFunctor, typename T> __global__ void MathBinaryElementwiseForwardGpu(const int n, const T* x, const T* y, T* z) { CUDA_1D_KERNEL_LOOP(i, n) { z[i] = BinaryFunctor<T>::Forward(x[i], y[i]); } } template<template<typename> class BinaryFunctor, typename T> __global__ void MathBinaryElementwiseBackwardXGradGpu(const int n, const T* x, const T* y, const T* dz, T* dx) { CUDA_1D_KERNEL_LOOP(i, n) { dx[i] = BinaryFunctor<T>::BackwardXGrad(x[i], y[i], dz[i]); } } template<template<typename> class BinaryFunctor, typename T> __global__ void MathBinaryElementwiseBackwardYGradGpu(const int n, const T* x, const T* y, const T* dz, T* dy) { CUDA_1D_KERNEL_LOOP(i, n) { dy[i] = BinaryFunctor<T>::BackwardYGrad(x[i], y[i], dz[i]); } } } // namespace template<template<typename> class BinaryFunctor, typename T> class MathBinaryElementwiseGpuKernel final : public user_op::OpKernel { public: MathBinaryElementwiseGpuKernel() = default; ~MathBinaryElementwiseGpuKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* tensor_x = ctx->Tensor4ArgNameAndIndex("x", 0); const user_op::Tensor* tensor_y = ctx->Tensor4ArgNameAndIndex("y", 0); user_op::Tensor* tensor_z = ctx->Tensor4ArgNameAndIndex("z", 0); int64_t n = tensor_x->shape().elem_cnt(); CHECK_LE(n, GetMaxVal<int32_t>() / 2); if (n == 0) { return; } hipLaunchKernelGGL(( MathBinaryElementwiseForwardGpu<BinaryFunctor, T>) , dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(), n, tensor_x->dptr<T>(), tensor_y->dptr<T>(), tensor_z->mut_dptr<T>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<template<typename> class BinaryFunctor, typename T> class MathBinaryElementwiseXGradGpuKernel final : public user_op::OpKernel { public: MathBinaryElementwiseXGradGpuKernel() = default; ~MathBinaryElementwiseXGradGpuKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* tensor_x = ctx->Tensor4ArgNameAndIndex("x", 0); const user_op::Tensor* tensor_y = ctx->Tensor4ArgNameAndIndex("y", 0); const user_op::Tensor* tensor_dz = ctx->Tensor4ArgNameAndIndex("dz", 0); user_op::Tensor* tensor_dx = ctx->Tensor4ArgNameAndIndex("dx", 0); int64_t n = tensor_x->shape().elem_cnt(); CHECK_LE(n, GetMaxVal<int32_t>() / 2); if (n == 0) { return; } hipLaunchKernelGGL(( MathBinaryElementwiseBackwardXGradGpu<BinaryFunctor, T>) , dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(), n, tensor_x->dptr<T>(), tensor_y->dptr<T>(), tensor_dz->dptr<T>(), tensor_dx->mut_dptr<T>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<template<typename> class BinaryFunctor, typename T> class MathBinaryElementwiseYGradGpuKernel final : public user_op::OpKernel { public: MathBinaryElementwiseYGradGpuKernel() = default; ~MathBinaryElementwiseYGradGpuKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* tensor_x = ctx->Tensor4ArgNameAndIndex("x", 0); const user_op::Tensor* tensor_y = ctx->Tensor4ArgNameAndIndex("y", 0); const user_op::Tensor* tensor_dz = ctx->Tensor4ArgNameAndIndex("dz", 0); user_op::Tensor* tensor_dy = ctx->Tensor4ArgNameAndIndex("dy", 0); int64_t n = tensor_x->shape().elem_cnt(); CHECK_LE(n, GetMaxVal<int32_t>() / 2); if (n == 0) { return; } hipLaunchKernelGGL(( MathBinaryElementwiseBackwardYGradGpu<BinaryFunctor, T>) , dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(), n, tensor_x->dptr<T>(), tensor_y->dptr<T>(), tensor_dz->dptr<T>(), tensor_dy->mut_dptr<T>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_MATH_BINARY_ELEMENTWISE_GPU_KERNEL_AND_GRAD(math_type_pair, data_type_pair) \ REGISTER_USER_KERNEL(OF_PP_PAIR_FIRST(math_type_pair)) \ .SetCreateFn< \ MathBinaryElementwiseGpuKernel<OF_PP_CAT(OF_PP_PAIR_SECOND(math_type_pair), Functor), \ OF_PP_PAIR_FIRST(data_type_pair)>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("x", 0) == OF_PP_PAIR_SECOND(data_type_pair))); \ \ REGISTER_USER_KERNEL((std::string("") + OF_PP_PAIR_FIRST(math_type_pair) + "_x_grad")) \ .SetCreateFn<MathBinaryElementwiseXGradGpuKernel< \ OF_PP_CAT(OF_PP_PAIR_SECOND(math_type_pair), Functor), \ OF_PP_PAIR_FIRST(data_type_pair)>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("x", 0) == OF_PP_PAIR_SECOND(data_type_pair))); \ REGISTER_USER_KERNEL((std::string("") + OF_PP_PAIR_FIRST(math_type_pair) + "_y_grad")) \ .SetCreateFn<MathBinaryElementwiseYGradGpuKernel< \ OF_PP_CAT(OF_PP_PAIR_SECOND(math_type_pair), Functor), \ OF_PP_PAIR_FIRST(data_type_pair)>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("x", 0) == OF_PP_PAIR_SECOND(data_type_pair))); OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_MATH_BINARY_ELEMENTWISE_GPU_KERNEL_AND_GRAD, MATH_BINARY_ELEMENTWISE_FUNC_SEQ, FLOATING_DATA_TYPE_SEQ) template<template<typename> class BinaryFunctor> class MathBinaryElementwiseGpuHalfKernel final : public user_op::OpKernel { public: MathBinaryElementwiseGpuHalfKernel() = default; ~MathBinaryElementwiseGpuHalfKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* tensor_x = ctx->Tensor4ArgNameAndIndex("x", 0); const user_op::Tensor* tensor_y = ctx->Tensor4ArgNameAndIndex("y", 0); user_op::Tensor* tensor_z = ctx->Tensor4ArgNameAndIndex("z", 0); const half* x = reinterpret_cast<const half*>(tensor_x->dptr<float16>()); const half* y = reinterpret_cast<const half*>(tensor_y->dptr<float16>()); half* z = reinterpret_cast<half*>(tensor_z->mut_dptr<float16>()); int64_t n = tensor_x->shape().elem_cnt(); CHECK_LE(n, GetMaxVal<int32_t>() / 2); if (n == 0) { return; } hipLaunchKernelGGL(( MathBinaryElementwiseForwardGpu<BinaryFunctor, half>) , dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(), n, x, y, z); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<template<typename> class BinaryFunctor> class MathBinaryElementwiseXGradGpuHalfKernel final : public user_op::OpKernel { public: MathBinaryElementwiseXGradGpuHalfKernel() = default; ~MathBinaryElementwiseXGradGpuHalfKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* tensor_x = ctx->Tensor4ArgNameAndIndex("x", 0); const user_op::Tensor* tensor_y = ctx->Tensor4ArgNameAndIndex("y", 0); const user_op::Tensor* tensor_dz = ctx->Tensor4ArgNameAndIndex("dz", 0); user_op::Tensor* tensor_dx = ctx->Tensor4ArgNameAndIndex("dx", 0); const half* x = reinterpret_cast<const half*>(tensor_x->dptr<float16>()); const half* y = reinterpret_cast<const half*>(tensor_y->dptr<float16>()); const half* dz = reinterpret_cast<const half*>(tensor_dz->dptr<float16>()); half* dx = reinterpret_cast<half*>(tensor_dx->mut_dptr<float16>()); int64_t n = tensor_x->shape().elem_cnt(); CHECK_LE(n, GetMaxVal<int32_t>() / 2); if (n == 0) { return; } hipLaunchKernelGGL(( MathBinaryElementwiseBackwardXGradGpu<BinaryFunctor, half>) , dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(), n, x, y, dz, dx); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<template<typename> class BinaryFunctor> class MathBinaryElementwiseYGradGpuHalfKernel final : public user_op::OpKernel { public: MathBinaryElementwiseYGradGpuHalfKernel() = default; ~MathBinaryElementwiseYGradGpuHalfKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* tensor_x = ctx->Tensor4ArgNameAndIndex("x", 0); const user_op::Tensor* tensor_y = ctx->Tensor4ArgNameAndIndex("y", 0); const user_op::Tensor* tensor_dz = ctx->Tensor4ArgNameAndIndex("dz", 0); user_op::Tensor* tensor_dy = ctx->Tensor4ArgNameAndIndex("dy", 0); const half* x = reinterpret_cast<const half*>(tensor_x->dptr<float16>()); const half* y = reinterpret_cast<const half*>(tensor_y->dptr<float16>()); const half* dz = reinterpret_cast<const half*>(tensor_dz->dptr<float16>()); half* dy = reinterpret_cast<half*>(tensor_dy->mut_dptr<float16>()); int64_t n = tensor_x->shape().elem_cnt(); CHECK_LE(n, GetMaxVal<int32_t>() / 2); if (n == 0) { return; } hipLaunchKernelGGL(( MathBinaryElementwiseBackwardYGradGpu<BinaryFunctor, half>) , dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(), n, x, y, dz, dy); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_MATH_BINARY_ELEMENTWISE_GPU_HALF_KERNEL_AND_GRAD(math_type_str, math_func_prefix) \ REGISTER_USER_KERNEL(math_type_str) \ .SetCreateFn<MathBinaryElementwiseGpuHalfKernel<OF_PP_CAT(math_func_prefix, Functor)>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("x", 0) == DataType::kFloat16)); \ \ REGISTER_USER_KERNEL((std::string("") + math_type_str + "_x_grad")) \ .SetCreateFn< \ MathBinaryElementwiseXGradGpuHalfKernel<OF_PP_CAT(math_func_prefix, Functor)>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("x", 0) == DataType::kFloat16)); \ REGISTER_USER_KERNEL((std::string("") + math_type_str + "_y_grad")) \ .SetCreateFn< \ MathBinaryElementwiseYGradGpuHalfKernel<OF_PP_CAT(math_func_prefix, Functor)>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("x", 0) == DataType::kFloat16)); OF_PP_FOR_EACH_TUPLE(REGISTER_MATH_BINARY_ELEMENTWISE_GPU_HALF_KERNEL_AND_GRAD, MATH_BINARY_ELEMENTWISE_FUNC_SEQ) } // namespace oneflow
8a1146394fe166564e1d42994b72c4741d48f2e2.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include "oneflow/user/kernels/math_binary_elementwise_func.h" namespace oneflow { namespace { template<template<typename> class BinaryFunctor, typename T> __global__ void MathBinaryElementwiseForwardGpu(const int n, const T* x, const T* y, T* z) { CUDA_1D_KERNEL_LOOP(i, n) { z[i] = BinaryFunctor<T>::Forward(x[i], y[i]); } } template<template<typename> class BinaryFunctor, typename T> __global__ void MathBinaryElementwiseBackwardXGradGpu(const int n, const T* x, const T* y, const T* dz, T* dx) { CUDA_1D_KERNEL_LOOP(i, n) { dx[i] = BinaryFunctor<T>::BackwardXGrad(x[i], y[i], dz[i]); } } template<template<typename> class BinaryFunctor, typename T> __global__ void MathBinaryElementwiseBackwardYGradGpu(const int n, const T* x, const T* y, const T* dz, T* dy) { CUDA_1D_KERNEL_LOOP(i, n) { dy[i] = BinaryFunctor<T>::BackwardYGrad(x[i], y[i], dz[i]); } } } // namespace template<template<typename> class BinaryFunctor, typename T> class MathBinaryElementwiseGpuKernel final : public user_op::OpKernel { public: MathBinaryElementwiseGpuKernel() = default; ~MathBinaryElementwiseGpuKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* tensor_x = ctx->Tensor4ArgNameAndIndex("x", 0); const user_op::Tensor* tensor_y = ctx->Tensor4ArgNameAndIndex("y", 0); user_op::Tensor* tensor_z = ctx->Tensor4ArgNameAndIndex("z", 0); int64_t n = tensor_x->shape().elem_cnt(); CHECK_LE(n, GetMaxVal<int32_t>() / 2); if (n == 0) { return; } MathBinaryElementwiseForwardGpu<BinaryFunctor, T> <<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>( n, tensor_x->dptr<T>(), tensor_y->dptr<T>(), tensor_z->mut_dptr<T>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<template<typename> class BinaryFunctor, typename T> class MathBinaryElementwiseXGradGpuKernel final : public user_op::OpKernel { public: MathBinaryElementwiseXGradGpuKernel() = default; ~MathBinaryElementwiseXGradGpuKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* tensor_x = ctx->Tensor4ArgNameAndIndex("x", 0); const user_op::Tensor* tensor_y = ctx->Tensor4ArgNameAndIndex("y", 0); const user_op::Tensor* tensor_dz = ctx->Tensor4ArgNameAndIndex("dz", 0); user_op::Tensor* tensor_dx = ctx->Tensor4ArgNameAndIndex("dx", 0); int64_t n = tensor_x->shape().elem_cnt(); CHECK_LE(n, GetMaxVal<int32_t>() / 2); if (n == 0) { return; } MathBinaryElementwiseBackwardXGradGpu<BinaryFunctor, T> <<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>( n, tensor_x->dptr<T>(), tensor_y->dptr<T>(), tensor_dz->dptr<T>(), tensor_dx->mut_dptr<T>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<template<typename> class BinaryFunctor, typename T> class MathBinaryElementwiseYGradGpuKernel final : public user_op::OpKernel { public: MathBinaryElementwiseYGradGpuKernel() = default; ~MathBinaryElementwiseYGradGpuKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* tensor_x = ctx->Tensor4ArgNameAndIndex("x", 0); const user_op::Tensor* tensor_y = ctx->Tensor4ArgNameAndIndex("y", 0); const user_op::Tensor* tensor_dz = ctx->Tensor4ArgNameAndIndex("dz", 0); user_op::Tensor* tensor_dy = ctx->Tensor4ArgNameAndIndex("dy", 0); int64_t n = tensor_x->shape().elem_cnt(); CHECK_LE(n, GetMaxVal<int32_t>() / 2); if (n == 0) { return; } MathBinaryElementwiseBackwardYGradGpu<BinaryFunctor, T> <<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>( n, tensor_x->dptr<T>(), tensor_y->dptr<T>(), tensor_dz->dptr<T>(), tensor_dy->mut_dptr<T>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_MATH_BINARY_ELEMENTWISE_GPU_KERNEL_AND_GRAD(math_type_pair, data_type_pair) \ REGISTER_USER_KERNEL(OF_PP_PAIR_FIRST(math_type_pair)) \ .SetCreateFn< \ MathBinaryElementwiseGpuKernel<OF_PP_CAT(OF_PP_PAIR_SECOND(math_type_pair), Functor), \ OF_PP_PAIR_FIRST(data_type_pair)>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("x", 0) == OF_PP_PAIR_SECOND(data_type_pair))); \ \ REGISTER_USER_KERNEL((std::string("") + OF_PP_PAIR_FIRST(math_type_pair) + "_x_grad")) \ .SetCreateFn<MathBinaryElementwiseXGradGpuKernel< \ OF_PP_CAT(OF_PP_PAIR_SECOND(math_type_pair), Functor), \ OF_PP_PAIR_FIRST(data_type_pair)>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("x", 0) == OF_PP_PAIR_SECOND(data_type_pair))); \ REGISTER_USER_KERNEL((std::string("") + OF_PP_PAIR_FIRST(math_type_pair) + "_y_grad")) \ .SetCreateFn<MathBinaryElementwiseYGradGpuKernel< \ OF_PP_CAT(OF_PP_PAIR_SECOND(math_type_pair), Functor), \ OF_PP_PAIR_FIRST(data_type_pair)>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("x", 0) == OF_PP_PAIR_SECOND(data_type_pair))); OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_MATH_BINARY_ELEMENTWISE_GPU_KERNEL_AND_GRAD, MATH_BINARY_ELEMENTWISE_FUNC_SEQ, FLOATING_DATA_TYPE_SEQ) template<template<typename> class BinaryFunctor> class MathBinaryElementwiseGpuHalfKernel final : public user_op::OpKernel { public: MathBinaryElementwiseGpuHalfKernel() = default; ~MathBinaryElementwiseGpuHalfKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* tensor_x = ctx->Tensor4ArgNameAndIndex("x", 0); const user_op::Tensor* tensor_y = ctx->Tensor4ArgNameAndIndex("y", 0); user_op::Tensor* tensor_z = ctx->Tensor4ArgNameAndIndex("z", 0); const half* x = reinterpret_cast<const half*>(tensor_x->dptr<float16>()); const half* y = reinterpret_cast<const half*>(tensor_y->dptr<float16>()); half* z = reinterpret_cast<half*>(tensor_z->mut_dptr<float16>()); int64_t n = tensor_x->shape().elem_cnt(); CHECK_LE(n, GetMaxVal<int32_t>() / 2); if (n == 0) { return; } MathBinaryElementwiseForwardGpu<BinaryFunctor, half> <<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>( n, x, y, z); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<template<typename> class BinaryFunctor> class MathBinaryElementwiseXGradGpuHalfKernel final : public user_op::OpKernel { public: MathBinaryElementwiseXGradGpuHalfKernel() = default; ~MathBinaryElementwiseXGradGpuHalfKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* tensor_x = ctx->Tensor4ArgNameAndIndex("x", 0); const user_op::Tensor* tensor_y = ctx->Tensor4ArgNameAndIndex("y", 0); const user_op::Tensor* tensor_dz = ctx->Tensor4ArgNameAndIndex("dz", 0); user_op::Tensor* tensor_dx = ctx->Tensor4ArgNameAndIndex("dx", 0); const half* x = reinterpret_cast<const half*>(tensor_x->dptr<float16>()); const half* y = reinterpret_cast<const half*>(tensor_y->dptr<float16>()); const half* dz = reinterpret_cast<const half*>(tensor_dz->dptr<float16>()); half* dx = reinterpret_cast<half*>(tensor_dx->mut_dptr<float16>()); int64_t n = tensor_x->shape().elem_cnt(); CHECK_LE(n, GetMaxVal<int32_t>() / 2); if (n == 0) { return; } MathBinaryElementwiseBackwardXGradGpu<BinaryFunctor, half> <<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>( n, x, y, dz, dx); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<template<typename> class BinaryFunctor> class MathBinaryElementwiseYGradGpuHalfKernel final : public user_op::OpKernel { public: MathBinaryElementwiseYGradGpuHalfKernel() = default; ~MathBinaryElementwiseYGradGpuHalfKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* tensor_x = ctx->Tensor4ArgNameAndIndex("x", 0); const user_op::Tensor* tensor_y = ctx->Tensor4ArgNameAndIndex("y", 0); const user_op::Tensor* tensor_dz = ctx->Tensor4ArgNameAndIndex("dz", 0); user_op::Tensor* tensor_dy = ctx->Tensor4ArgNameAndIndex("dy", 0); const half* x = reinterpret_cast<const half*>(tensor_x->dptr<float16>()); const half* y = reinterpret_cast<const half*>(tensor_y->dptr<float16>()); const half* dz = reinterpret_cast<const half*>(tensor_dz->dptr<float16>()); half* dy = reinterpret_cast<half*>(tensor_dy->mut_dptr<float16>()); int64_t n = tensor_x->shape().elem_cnt(); CHECK_LE(n, GetMaxVal<int32_t>() / 2); if (n == 0) { return; } MathBinaryElementwiseBackwardYGradGpu<BinaryFunctor, half> <<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>( n, x, y, dz, dy); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_MATH_BINARY_ELEMENTWISE_GPU_HALF_KERNEL_AND_GRAD(math_type_str, math_func_prefix) \ REGISTER_USER_KERNEL(math_type_str) \ .SetCreateFn<MathBinaryElementwiseGpuHalfKernel<OF_PP_CAT(math_func_prefix, Functor)>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("x", 0) == DataType::kFloat16)); \ \ REGISTER_USER_KERNEL((std::string("") + math_type_str + "_x_grad")) \ .SetCreateFn< \ MathBinaryElementwiseXGradGpuHalfKernel<OF_PP_CAT(math_func_prefix, Functor)>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("x", 0) == DataType::kFloat16)); \ REGISTER_USER_KERNEL((std::string("") + math_type_str + "_y_grad")) \ .SetCreateFn< \ MathBinaryElementwiseYGradGpuHalfKernel<OF_PP_CAT(math_func_prefix, Functor)>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("x", 0) == DataType::kFloat16)); OF_PP_FOR_EACH_TUPLE(REGISTER_MATH_BINARY_ELEMENTWISE_GPU_HALF_KERNEL_AND_GRAD, MATH_BINARY_ELEMENTWISE_FUNC_SEQ) } // namespace oneflow
577d0ced3e2ab87cdcb8d83ffb79aca9832c7780.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "sh_handler.h" #define THREAD_NUM 512 #define RING_SIZE (PKT_BATCH_SIZE * THREAD_NUM) #define ONELINE 6 #define DUMP 0 #define TX 0 unsigned char * rx_pkt_buf; unsigned char * tx_pkt_buf; static int idx; int * rx_pkt_cnt; int tx_idx; int * pkt_batch_num; extern "C" int monotonic_time() { struct timespec timespec; clock_gettime(CLOCK_MONOTONIC, &timespec); return timespec.tv_sec * ONE_SEC + timespec.tv_nsec; } #if DUMP __global__ void print_gpu(unsigned char* d_pkt_buf, int * pkt_num) { int i; int total_pkt_num = *pkt_num * PKT_SIZE; START_RED printf("[GPU]: pkt_num = %d\n", *pkt_num); for(i = 0; i < total_pkt_num; i++) { if(i != 0 && i % ONELINE == 0) printf("\n"); if(i != 0 && i % PKT_SIZE == 0) printf("\n"); printf("%02x ", d_pkt_buf[i]); } printf("\n\n"); END } #endif __device__ void mani_pkt_gpu(unsigned char * d_pkt_buf) { int i; unsigned char tmp[6] = { 0 }; // Swap mac for(i = 0; i < 6; i++){ tmp[i] = d_pkt_buf[i]; d_pkt_buf[i] = d_pkt_buf[i + 6]; d_pkt_buf[i + 6] = tmp[i]; } // Swap ip for(i = 26; i < 30; i++){ tmp[i-26] = d_pkt_buf[i]; d_pkt_buf[i] = d_pkt_buf[i + 4]; d_pkt_buf[i + 4] = tmp[i-26]; } // Swap port for(i = 34; i < 36; i++){ tmp[i-34] = d_pkt_buf[i]; d_pkt_buf[i] = d_pkt_buf[i + 2]; d_pkt_buf[i + 2] = tmp[i-34]; } //Manipulatate data for(i = 36; i < PKT_SIZE; i++){ d_pkt_buf[i] = 0; } } extern "C" int copy_to_gpu(unsigned char* buf, int pkt_num) { hipMemcpy(rx_pkt_buf + (idx * PKT_BATCH_SIZE), buf, sizeof(unsigned char) * pkt_num * PKT_SIZE, hipMemcpyHostToDevice); hipMemcpy(pkt_batch_num + idx, &pkt_num, sizeof(int), hipMemcpyHostToDevice); #if DUMP hipLaunchKernelGGL(( print_gpu), dim3(1),dim3(1), 0, 0, rx_pkt_buf + (idx * PKT_BATCH_SIZE), pkt_batch_num + idx); hipDeviceSynchronize(); #endif idx++; if(idx == THREAD_NUM) idx = 0; return 1; } extern "C" void set_gpu_mem_for_dpdk(void) { idx = 0; tx_idx = 0; START_BLU printf("RING_SIZE = %d\n", RING_SIZE); printf("PKT_SIZE = %d, PKT_BATCH = %d + %d\n", PKT_SIZE, PKT_BATCH - RX_NB, RX_NB); END ASSERTRT(hipMalloc((void**)&rx_pkt_buf, RING_SIZE)); ASSERTRT(hipMemset(rx_pkt_buf, 0, RING_SIZE)); ASSERTRT(hipMalloc((void**)&tx_pkt_buf, RING_SIZE)); ASSERTRT(hipMemset(tx_pkt_buf, 0, RING_SIZE)); ASSERTRT(hipMalloc((void**)&rx_pkt_cnt, sizeof(int))); ASSERTRT(hipMemset(rx_pkt_cnt, 0, sizeof(int))); ASSERTRT(hipMalloc((void**)&pkt_batch_num, sizeof(int) * THREAD_NUM)); ASSERTRT(hipMemset(pkt_batch_num, 0, sizeof(int) * THREAD_NUM)); START_GRN printf("[Done]____GPU mem set for dpdk____\n"); END } extern "C" int get_rx_cnt(void) { int rx_cur_pkt = 0; hipMemcpy(&rx_cur_pkt, rx_pkt_cnt, sizeof(int), hipMemcpyDeviceToHost); hipMemset(rx_pkt_cnt, 0, sizeof(int)); return rx_cur_pkt; } extern "C" int get_tx_buf(unsigned char* tx_buf) { int tx_cur_pkt = 0; hipMemcpy(tx_buf, tx_pkt_buf + (tx_idx * PKT_BATCH_SIZE), sizeof(unsigned char) * PKT_BATCH_SIZE, hipMemcpyDeviceToHost); // hipMemcpy(&tx_cur_pkt, pkt_batch_num + tx_idx, sizeof(int), hipMemcpyDeviceToHost); tx_idx++; if(tx_idx == THREAD_NUM) tx_idx = 0; return tx_cur_pkt; } __global__ void gpu_monitor(unsigned char * rx_pkt_buf, unsigned char * tx_pkt_buf, int * rx_pkt_cnt, int * pkt_batch_num) { int mem_index = PKT_BATCH_SIZE * threadIdx.x; int batch_num; // PKT_BATCH_SIZE 64 * 512 // THREAD_NUM 512 // RING_SIZE (PKT_BATCH_SIZE * THREAD_NUM) __syncthreads(); if(pkt_batch_num[threadIdx.x] != 0 && rx_pkt_buf[mem_index + ((pkt_batch_num[threadIdx.x] - 1) * PKT_SIZE)] != 0) { __syncthreads(); batch_num = pkt_batch_num[threadIdx.x]; __syncthreads(); rx_pkt_buf[mem_index + ((batch_num - 1) * PKT_SIZE)] = 0; __syncthreads(); atomicAdd(rx_pkt_cnt, batch_num); #if TX __syncthreads(); memcpy(tx_pkt_buf + mem_index, rx_pkt_buf + mem_index, PKT_BATCH_SIZE); /* for(int i = 0; i < batch_num; i++) { __syncthreads(); mani_pkt_gpu(tx_pkt_buf + mem_index + i * PKT_SIZE); } */ #endif __syncthreads(); memset(pkt_batch_num + threadIdx.x, 0, sizeof(int)); } } #if 0 __global__ void gpu_mani_loop(unsigned char * tx_pkt_buf,int * pkt_batch_num) { __syncthreads(); if(pkt_batch_num[threadIdx.x] != 0 && rx_pkt_buf[mem_index + ((pkt_batch_num[threadIdx.x] - 1) * PKT_SIZE)] != 0) { __syncthreads(); memcpy(tx_pkt_buf + mem_index, rx_pkt_buf + mem_index, PKT_BATCH_SIZE); __syncthreads(); mani_pkt_gpu(tx_pkt_buf + mem_index + i * PKT_SIZE); } } #endif extern "C" void gpu_monitor_loop(void) { hipStream_t stream; ASSERTRT(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); while(true) { hipLaunchKernelGGL(( gpu_monitor), dim3(1), dim3(THREAD_NUM), 0, stream, rx_pkt_buf, tx_pkt_buf, rx_pkt_cnt, pkt_batch_num); hipDeviceSynchronize(); } }
577d0ced3e2ab87cdcb8d83ffb79aca9832c7780.cu
#include "sh_handler.h" #define THREAD_NUM 512 #define RING_SIZE (PKT_BATCH_SIZE * THREAD_NUM) #define ONELINE 6 #define DUMP 0 #define TX 0 unsigned char * rx_pkt_buf; unsigned char * tx_pkt_buf; static int idx; int * rx_pkt_cnt; int tx_idx; int * pkt_batch_num; extern "C" int monotonic_time() { struct timespec timespec; clock_gettime(CLOCK_MONOTONIC, &timespec); return timespec.tv_sec * ONE_SEC + timespec.tv_nsec; } #if DUMP __global__ void print_gpu(unsigned char* d_pkt_buf, int * pkt_num) { int i; int total_pkt_num = *pkt_num * PKT_SIZE; START_RED printf("[GPU]: pkt_num = %d\n", *pkt_num); for(i = 0; i < total_pkt_num; i++) { if(i != 0 && i % ONELINE == 0) printf("\n"); if(i != 0 && i % PKT_SIZE == 0) printf("\n"); printf("%02x ", d_pkt_buf[i]); } printf("\n\n"); END } #endif __device__ void mani_pkt_gpu(unsigned char * d_pkt_buf) { int i; unsigned char tmp[6] = { 0 }; // Swap mac for(i = 0; i < 6; i++){ tmp[i] = d_pkt_buf[i]; d_pkt_buf[i] = d_pkt_buf[i + 6]; d_pkt_buf[i + 6] = tmp[i]; } // Swap ip for(i = 26; i < 30; i++){ tmp[i-26] = d_pkt_buf[i]; d_pkt_buf[i] = d_pkt_buf[i + 4]; d_pkt_buf[i + 4] = tmp[i-26]; } // Swap port for(i = 34; i < 36; i++){ tmp[i-34] = d_pkt_buf[i]; d_pkt_buf[i] = d_pkt_buf[i + 2]; d_pkt_buf[i + 2] = tmp[i-34]; } //Manipulatate data for(i = 36; i < PKT_SIZE; i++){ d_pkt_buf[i] = 0; } } extern "C" int copy_to_gpu(unsigned char* buf, int pkt_num) { cudaMemcpy(rx_pkt_buf + (idx * PKT_BATCH_SIZE), buf, sizeof(unsigned char) * pkt_num * PKT_SIZE, cudaMemcpyHostToDevice); cudaMemcpy(pkt_batch_num + idx, &pkt_num, sizeof(int), cudaMemcpyHostToDevice); #if DUMP print_gpu<<<1,1>>>(rx_pkt_buf + (idx * PKT_BATCH_SIZE), pkt_batch_num + idx); cudaDeviceSynchronize(); #endif idx++; if(idx == THREAD_NUM) idx = 0; return 1; } extern "C" void set_gpu_mem_for_dpdk(void) { idx = 0; tx_idx = 0; START_BLU printf("RING_SIZE = %d\n", RING_SIZE); printf("PKT_SIZE = %d, PKT_BATCH = %d + %d\n", PKT_SIZE, PKT_BATCH - RX_NB, RX_NB); END ASSERTRT(cudaMalloc((void**)&rx_pkt_buf, RING_SIZE)); ASSERTRT(cudaMemset(rx_pkt_buf, 0, RING_SIZE)); ASSERTRT(cudaMalloc((void**)&tx_pkt_buf, RING_SIZE)); ASSERTRT(cudaMemset(tx_pkt_buf, 0, RING_SIZE)); ASSERTRT(cudaMalloc((void**)&rx_pkt_cnt, sizeof(int))); ASSERTRT(cudaMemset(rx_pkt_cnt, 0, sizeof(int))); ASSERTRT(cudaMalloc((void**)&pkt_batch_num, sizeof(int) * THREAD_NUM)); ASSERTRT(cudaMemset(pkt_batch_num, 0, sizeof(int) * THREAD_NUM)); START_GRN printf("[Done]____GPU mem set for dpdk____\n"); END } extern "C" int get_rx_cnt(void) { int rx_cur_pkt = 0; cudaMemcpy(&rx_cur_pkt, rx_pkt_cnt, sizeof(int), cudaMemcpyDeviceToHost); cudaMemset(rx_pkt_cnt, 0, sizeof(int)); return rx_cur_pkt; } extern "C" int get_tx_buf(unsigned char* tx_buf) { int tx_cur_pkt = 0; cudaMemcpy(tx_buf, tx_pkt_buf + (tx_idx * PKT_BATCH_SIZE), sizeof(unsigned char) * PKT_BATCH_SIZE, cudaMemcpyDeviceToHost); // cudaMemcpy(&tx_cur_pkt, pkt_batch_num + tx_idx, sizeof(int), cudaMemcpyDeviceToHost); tx_idx++; if(tx_idx == THREAD_NUM) tx_idx = 0; return tx_cur_pkt; } __global__ void gpu_monitor(unsigned char * rx_pkt_buf, unsigned char * tx_pkt_buf, int * rx_pkt_cnt, int * pkt_batch_num) { int mem_index = PKT_BATCH_SIZE * threadIdx.x; int batch_num; // PKT_BATCH_SIZE 64 * 512 // THREAD_NUM 512 // RING_SIZE (PKT_BATCH_SIZE * THREAD_NUM) __syncthreads(); if(pkt_batch_num[threadIdx.x] != 0 && rx_pkt_buf[mem_index + ((pkt_batch_num[threadIdx.x] - 1) * PKT_SIZE)] != 0) { __syncthreads(); batch_num = pkt_batch_num[threadIdx.x]; __syncthreads(); rx_pkt_buf[mem_index + ((batch_num - 1) * PKT_SIZE)] = 0; __syncthreads(); atomicAdd(rx_pkt_cnt, batch_num); #if TX __syncthreads(); memcpy(tx_pkt_buf + mem_index, rx_pkt_buf + mem_index, PKT_BATCH_SIZE); /* for(int i = 0; i < batch_num; i++) { __syncthreads(); mani_pkt_gpu(tx_pkt_buf + mem_index + i * PKT_SIZE); } */ #endif __syncthreads(); memset(pkt_batch_num + threadIdx.x, 0, sizeof(int)); } } #if 0 __global__ void gpu_mani_loop(unsigned char * tx_pkt_buf,int * pkt_batch_num) { __syncthreads(); if(pkt_batch_num[threadIdx.x] != 0 && rx_pkt_buf[mem_index + ((pkt_batch_num[threadIdx.x] - 1) * PKT_SIZE)] != 0) { __syncthreads(); memcpy(tx_pkt_buf + mem_index, rx_pkt_buf + mem_index, PKT_BATCH_SIZE); __syncthreads(); mani_pkt_gpu(tx_pkt_buf + mem_index + i * PKT_SIZE); } } #endif extern "C" void gpu_monitor_loop(void) { cudaStream_t stream; ASSERTRT(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); while(true) { gpu_monitor<<<1, THREAD_NUM, 0, stream>>>(rx_pkt_buf, tx_pkt_buf, rx_pkt_cnt, pkt_batch_num); cudaDeviceSynchronize(); } }
2f54c93ffbe620f23fd04ac953b11478b71e248d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <hip/hip_runtime.h> #include <unistd.h> #include <sys/time.h> #include <sys/mman.h> #include <unistd.h> #include <fcntl.h> #define CEIL(a, b) ( ((a) + (b) - 1) / (b) ) #define MIN(a, b) ( (a) < (b) ? (a) : (b) ) #define CAL_TIME ( 1e-6 * (temp_time.tv_usec - start_time.tv_usec) + (temp_time.tv_sec - start_time.tv_sec) ) #define C2I(i) ( ptr[i] - '0') #define ROW_COL(__i) ( __i / line_d ), ( ( __i % pitch ) / block_size ) const int INF = 1000000000; const int V = 20010; const int block_size = 32; int max_streams = 4; int first_round = 4; dim3 threads(block_size, block_size); void input(char *outFileName); void block_FW(); void block_FW_S(); void split_strings(char *ptr); void cuda_init(); void cuda_cleanup(); __constant__ unsigned int *Dist; __constant__ int pitch_d; __global__ void cal_phase1(int pivot) { __shared__ unsigned int block_dist[block_size][block_size]; int tx = threadIdx.x, ty = threadIdx.y; int tid = ty * pitch_d + tx; int block_index = pivot + tid; unsigned int origin, blk_dist, new_dist; block_dist[ty][tx] = origin = Dist[block_index]; __syncthreads(); if(origin > INF) Dist[block_index] = origin = INF; blk_dist = origin; for(int k=0; k<block_size-1; k++) { new_dist = block_dist[ty][k] + block_dist[k][tx]; //if (block_dist[ty][tx] > new_dist) //block_dist[ty][tx] = new_dist; if(blk_dist > new_dist) block_dist[ty][tx] = blk_dist = new_dist; __syncthreads(); } new_dist = block_dist[ty][block_size-1] + block_dist[block_size-1][tx]; if(blk_dist > new_dist) Dist[block_index] = new_dist; else if(origin > blk_dist) Dist[block_index] = blk_dist; } __global__ void cal_phase2_row(int pivot, int r) { __shared__ unsigned int block_dist[block_size][block_size+1]; __shared__ unsigned int pivot_dist[block_size][block_size+1]; int tx = threadIdx.x, ty = threadIdx.y; int tid = ty * pitch_d + tx; int column = block_size * (blockIdx.x - r); int block_index, pivot_index; unsigned int blk_dist, new_dist, origin; pivot_index = pivot + tid; if(blockIdx.x==r) // pivot block return; /* block_index = pivot_index + column; block_dist[ty][tx] = origin = Dist[block_index]; __syncthreads(); if(origin > INF) Dist[block_index] = origin = INF; pivot_dist[ty][tx] = Dist[pivot_index]; blk_dist = origin; for(int k=0; k<block_size-1; k++) { new_dist = pivot_dist[ty][k] + block_dist[k][tx]; //if (block_dist[ty][tx] > new_dist) //block_dist[ty][tx] = new_dist; if (blk_dist > new_dist) block_dist[ty][tx] = blk_dist = new_dist; __syncthreads(); } new_dist = pivot_dist[ty][block_size-1] + block_dist[block_size-1][tx]; if(blk_dist > new_dist) Dist[block_index] = new_dist; else if(origin > blk_dist) Dist[block_index] = blk_dist; */ pivot_dist[ty][tx] = Dist[pivot_index]; block_index = pivot_index + column; block_dist[tx][ty] = origin = Dist[block_index]; __syncthreads(); if(origin > INF) Dist[block_index] = origin = INF; blk_dist = block_dist[ty][tx]; for(int k=0; k<block_size; k++) { new_dist = pivot_dist[tx][k] + block_dist[ty][k]; if (blk_dist > new_dist) block_dist[ty][tx] = blk_dist = new_dist; } __syncthreads(); blk_dist = block_dist[tx][ty]; if(origin > blk_dist) Dist[block_index] = blk_dist; } __global__ void cal_phase2_blk(int p1_pivot, int p2_pivot) { __shared__ unsigned int block_dist[block_size][block_size]; __shared__ unsigned int pivot_dist[block_size][block_size]; int tx = threadIdx.x, ty = threadIdx.y; int tid = ty * pitch_d + tx; int block_index; unsigned int origin, blk_dist, new_dist; pivot_dist[ty][tx] = Dist[p1_pivot + tid]; __syncthreads(); block_index = p2_pivot + tid + blockIdx.x * pitch_d * block_size; block_dist[ty][tx] = origin = Dist[block_index]; blk_dist = origin; for(int k=0; k<block_size-1; k++) { new_dist = block_dist[ty][k] + pivot_dist[k][tx]; if(blk_dist > new_dist) block_dist[ty][tx] = blk_dist = new_dist; } new_dist = block_dist[ty][block_size-1] + pivot_dist[block_size-1][tx]; if(blk_dist > new_dist) Dist[block_index] = new_dist; else if(origin > blk_dist) Dist[block_index] = blk_dist; } __global__ void cal_phase3(int p1_pivot, int p2_pivot, int r) { __shared__ unsigned int pvRow_dist[block_size][block_size]; __shared__ unsigned int pvCol_dist[block_size][block_size]; int tx = threadIdx.x, ty = threadIdx.y; int tid = ty * pitch_d + tx; int col_diff = (blockIdx.x - r) * block_size; int block_index, p1_index, p2_index; unsigned int origin, block_dist, new1, new2; p1_index = p1_pivot + col_diff + tid; p2_index = p2_pivot + tid; if(col_diff==0) // pivots return; pvRow_dist[ty][tx] = Dist[p1_index]; pvCol_dist[ty][tx] = Dist[p2_index]; __syncthreads(); block_dist = pvCol_dist[ty][0] + pvRow_dist[0][tx]; new1 = pvCol_dist[ty][1] + pvRow_dist[1][tx]; block_index = p2_index + col_diff; origin = Dist[block_index]; if (block_dist > new1) block_dist = new1; for(int k=2; k<block_size; k+=2) { new1 = pvCol_dist[ty][k] + pvRow_dist[k][tx]; new2 = pvCol_dist[ty][k+1] + pvRow_dist[k+1][tx]; if (block_dist > new1) block_dist = new1; if (block_dist > new2) block_dist = new2; } if(origin>block_dist) Dist[block_index] = block_dist; //Dist[block_index] = MIN(origin, block_dist); } __global__ void cal_phase3_n(int p1_pivot, int p2_pivot, int r, int n) { __shared__ unsigned int pvR_dist[block_size][block_size]; __shared__ unsigned int pvC_dist[2][block_size][block_size]; int tx = threadIdx.x, ty = threadIdx.y; int tid = ty * pitch_d + tx; int col_diff = (blockIdx.x - r) * block_size; int row_diff = pitch_d * block_size; int b1_index, b2_index, p_index; unsigned int origin, b1_dist, b2_dist; unsigned int inter[block_size], new1, new2; int p1 = 0, p2 = 1; p_index = p1_pivot + tid + col_diff; if(col_diff==0) // pivots return; pvR_dist[ty][tx] = Dist[p_index]; __syncthreads(); for(int k=0; k<block_size; k++) inter[k] = pvR_dist[k][tx]; p_index = p2_pivot + tid; pvC_dist[p1][ty][tx] = Dist[p_index]; b1_index = p_index + col_diff; b1_dist = origin = Dist[b1_index]; while(n-->1) { p_index += row_diff; pvC_dist[p2][ty][tx] = Dist[p_index]; b2_index = b1_index + row_diff; b2_dist = Dist[b2_index]; for(int k=0; k<block_size; k+=2) { new1 = pvC_dist[p1][ty][k] + inter[k]; new2 = pvC_dist[p1][ty][k+1] + inter[k+1]; if (b1_dist > new1) b1_dist = new1; if (b1_dist > new2) b1_dist = new2; } if (origin > b1_dist) Dist[b1_index] = b1_dist; //Dist[b1_index] = MIN(origin, b1_dist); p1 ^= 1; p2 ^= 1; b1_dist = origin = b2_dist; b1_index = b2_index; } for(int k=0; k<block_size; k+=2) { new1 = pvC_dist[p1][ty][k] + inter[k]; new2 = pvC_dist[p1][ty][k+1] + inter[k+1]; if (b1_dist > new1) b1_dist = new1; if (b1_dist > new2) b1_dist = new2; } if (origin > b1_dist) Dist[b1_index] = b1_dist; //Dist[b1_index] = MIN(origin, b1_dist); } int n, n_bytes, out_size; // Number of vertices, edges int Rounds, b_rounds, b_rounds_bytes; int line_n, last_line, max_row; FILE *infile; int out_fd; struct timeval start_time, temp_time; unsigned int *Dist_h, *Dist_d; int pitch_bytes, pitch; int diag_size, line_d_bytes, line_d; hipStream_t stream[8], stream_s, stream_m; hipEvent_t ev_1, ev_2, ev_m; int main(int argc, char* argv[]) { assert(argc==4); //block_size = atoi(argv[3]); gettimeofday(&start_time, NULL); infile = fopen(argv[1], "r"); input(argv[2]); gettimeofday(&temp_time, NULL); //printf("input> %g s\n", CAL_TIME); if(Rounds<=8) { block_FW_S(); } else { block_FW(); //printf("NOP\n"); } hipEventRecord(ev_m, stream_m); hipEventSynchronize(ev_m); msync(Dist_h, out_size, MS_SYNC); munmap(Dist_h, out_size); close(out_fd); cuda_cleanup(); gettimeofday(&temp_time, NULL); //printf("block_FW> %g s\n", CAL_TIME); return 0; } void cuda_init() { int bline = Rounds==1 ? n : block_size; hipMemcpy2DAsync(Dist_d, pitch_bytes, Dist_h, n_bytes, n_bytes, bline, hipMemcpyHostToDevice); hipStreamCreate(&stream_m); hipStreamCreate(&stream[0]); hipLaunchKernelGGL(( cal_phase1), dim3(1), dim3(threads), 0, stream[0], 0); hipEventCreateWithFlags(&ev_1, hipEventDisableTiming); hipEventRecord(ev_1, stream[0]); hipLaunchKernelGGL(( cal_phase2_row), dim3(Rounds), dim3(threads), 0, stream[0], 0, 0); hipEventCreateWithFlags(&ev_2, hipEventDisableTiming); hipEventRecord(ev_2, stream[0]); hipEventCreateWithFlags(&ev_m, hipEventDisableTiming); } void cuda_cleanup() { hipDeviceSynchronize(); int num_streams; if(Rounds<=8) { num_streams = Rounds; } else { num_streams = max_streams; hipStreamDestroy(stream_s); } hipStreamDestroy(stream_m); for(int i=0; i<num_streams; i++) { hipStreamDestroy(stream[i]); } hipEventDestroy(ev_1); hipEventDestroy(ev_2); hipEventDestroy(ev_m); hipFree(Dist_d); } void block_FW() { int id_1[V], do_r[V], row; int p1_start = 0, p2_start = 0, p2_sub; unsigned int *ptr_h = Dist_h, *ptr_d = Dist_d; int flag, bline = block_size; hipStream_t *sp, s; cuda_init(); id_1[0] = 0; do_r[0] = max_row; //printf("Round 1: row < first_round (in pivot)\n"); for(int i=1; i<first_round; i++) { sp = &stream[i]; id_1[i] = i / max_row; do_r[i] = max_row - i % max_row; ptr_h += line_n; ptr_d += line_d; hipMemcpy2DAsync(ptr_d, pitch_bytes, ptr_h, n_bytes, n_bytes, block_size, hipMemcpyHostToDevice, stream_m); hipEventRecord(ev_m, stream_m); hipStreamCreate(sp); s = *sp; hipStreamWaitEvent(s, ev_1, 0); hipStreamWaitEvent(s, ev_m, 0); p2_start += line_d; hipLaunchKernelGGL(( cal_phase2_blk), dim3(1), dim3(threads), 0, s, p1_start, p2_start); hipStreamWaitEvent(s, ev_2, 0); hipLaunchKernelGGL(( cal_phase3), dim3(Rounds), dim3(threads), 0, s, p1_start, p2_start, 0); } //printf("Round (2-first_round): row < first_round\n"); for(int i=1; i<first_round; i++) { s = stream[i]; p1_start += diag_size; //printf("round %d: p1=(%d,%d) stream %d\n", i, ROW_COL(p1_start), i); hipLaunchKernelGGL(( cal_phase1), dim3(1), dim3(threads), 0, s, p1_start); hipEventRecord(ev_1, s); hipLaunchKernelGGL(( cal_phase2_row), dim3(Rounds), dim3(threads), 0, s, p1_start, i); hipEventRecord(ev_2, s); for(int j=0; j<first_round; j++) { if(i==j) continue; hipStream_t sj = stream[j]; p2_sub = p1_start + line_d * (j - i); //printf("\tp1=(%d,%d), p2=(%d,%d) stream %d\n", ROW_COL(p1_start), ROW_COL(p2_sub), j); hipStreamWaitEvent(sj, ev_1, 0); hipLaunchKernelGGL(( cal_phase2_blk), dim3(1), dim3(threads), 0, sj, p1_start, p2_sub); hipStreamWaitEvent(sj, ev_2, 0); hipLaunchKernelGGL(( cal_phase3), dim3(Rounds), dim3(threads), 0, sj, p1_start, p2_sub, i); } } for(int i=0; i<max_streams; i++) { hipEventRecord(ev_1, stream[i]); for(int j=0; j<max_streams; j++) { if(i==j) continue; hipStreamWaitEvent(stream[j], ev_1, 0); } } //printf("Round (1-first_round): other rows\n"); flag = 1; for(int i=first_round; i<Rounds; i++) { id_1[i] = i / max_row % max_streams; do_r[i] = max_row - i % max_row; if(i + do_r[i] > Rounds) do_r[i] = Rounds - i; s = stream[id_1[i]]; ptr_h += line_n; ptr_d += line_d; p2_start += line_d; if(flag>0) { if(i==first_round) { row = 1; bline = block_size; } else { row = do_r[i]; bline = (i+do_r[i]==Rounds) ? last_line + (do_r[i]-1) * block_size : do_r[i] * block_size; } hipMemcpy2DAsync(ptr_d, pitch_bytes, ptr_h, n_bytes, n_bytes, bline, hipMemcpyHostToDevice, stream_m); hipEventRecord(ev_m, stream_m); hipStreamWaitEvent(s, ev_m, 0); p1_start = 0; p2_sub = p2_start; for(int r=0; r<first_round; r++) { hipLaunchKernelGGL(( cal_phase2_blk), dim3(row), dim3(threads), 0, s, p1_start, p2_sub); hipLaunchKernelGGL(( cal_phase3_n), dim3(Rounds), dim3(threads), 0, s, p1_start, p2_sub, r, row); p1_start += diag_size; p2_sub += block_size; } if(i==first_round) { hipStreamCreate(&stream_s); hipEventRecord(ev_1, s); hipStreamWaitEvent(stream_s, ev_1, 0); } flag -= row - 1; } else { flag++; } } //printf("R %d\n", Rounds); for (int r=first_round; r<Rounds; ++r) { hipLaunchKernelGGL(( cal_phase1), dim3(1), dim3(threads), 0, stream_s, p1_start); hipEventRecord(ev_1, stream_s); hipLaunchKernelGGL(( cal_phase2_row), dim3(Rounds), dim3(threads), 0, stream_s, p1_start, r); hipEventRecord(ev_2, stream_s); if(r==Rounds-1) { bline = last_line; ptr_h = Dist_h + r * line_n; ptr_d = Dist_d + r * line_d; hipStreamWaitEvent(stream_m, ev_2, 0); hipMemcpy2DAsync(ptr_h, n_bytes, ptr_d, pitch_bytes, n_bytes, bline, hipMemcpyDeviceToHost, stream_m); //hipMemcpy2D(ptr_h, n_bytes, ptr_d, pitch_bytes, n_bytes, bline, hipMemcpyDeviceToHost); } //printf("r %d\n", r); int next_r = r + 1; if(next_r<Rounds) { s = stream[id_1[next_r]]; p2_start = p1_start + line_d; hipStreamWaitEvent(s, ev_1, 0); hipLaunchKernelGGL(( cal_phase2_blk), dim3(1), dim3(threads), 0, s, p1_start, p2_start); hipStreamWaitEvent(s, ev_2, 0); hipLaunchKernelGGL(( cal_phase3), dim3(Rounds), dim3(threads), 0, s, p1_start, p2_start, r); hipEventRecord(ev_m, s); hipStreamWaitEvent(stream_s, ev_m, 0); } flag = 1; for(int i = (r+1) % Rounds; i != r; i = (i==Rounds-1) ? 0 : i+1) { if(i==r+1) continue; s = stream[id_1[i]]; p2_start = p1_start + line_d * (i-r); if(flag>0) { row = (i<r && i+do_r[i]>r) ? r - i : do_r[i]; flag -= row - 1; hipStreamWaitEvent(s, ev_1, 0); hipLaunchKernelGGL(( cal_phase2_blk), dim3(row), dim3(threads), 0, s, p1_start, p2_start); hipStreamWaitEvent(s, ev_2, 0); hipLaunchKernelGGL(( cal_phase3_n), dim3(Rounds), dim3(threads), 0, s, p1_start, p2_start, r, row); if(r==Rounds-1) { bline = row * block_size; ptr_h = Dist_h + i * line_n; ptr_d = Dist_d + i * line_d; hipEventRecord(ev_m, s); hipStreamWaitEvent(stream_m, ev_m, 0); //hipMemcpy2DAsync(ptr_h, n_bytes, ptr_d, pitch_bytes, n_bytes, bline, hipMemcpyDeviceToHost, stream_m); hipMemcpy2D(ptr_h, n_bytes, ptr_d, pitch_bytes, n_bytes, bline, hipMemcpyDeviceToHost); } } else { flag++; } } p1_start += diag_size; } } size_t m, sz; void input(char *outFileName) { char *tok_1, *tok_2, *fstr; char temp[30]; size_t p_bytes; fseek(infile, 0L, SEEK_END); sz = ftell(infile); fseek(infile, 0L, SEEK_SET); fstr = (char *) mmap(NULL, sz, PROT_READ, MAP_PRIVATE|MAP_POPULATE, fileno(infile), 0); if(fstr==MAP_FAILED) { fprintf(stderr, "mmap faild fstr\n"); exit(1); } tok_1 = strchr(fstr, ' '); strncpy(temp, fstr, tok_1-fstr); n = atoi(temp); tok_1++; tok_2 = strchr(tok_1, '\n'); strncpy(temp, tok_1, tok_2-tok_1); m = atoi(temp); tok_2++; Rounds = CEIL(n, block_size); b_rounds = block_size * Rounds; b_rounds_bytes = b_rounds * sizeof(int); gettimeofday(&temp_time, NULL); //printf("before parsing> %g s\n", CAL_TIME); n_bytes = n * sizeof(int); last_line = n - (Rounds-1) * block_size; out_size = n * n_bytes; max_row = (Rounds+max_streams-1) / max_streams; int fflag = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH; out_fd = open(outFileName, O_RDWR|O_CREAT, fflag); if(0!=posix_fallocate(out_fd, 0, out_size)) { fprintf(stderr, "posix_fallocate failed\n"); exit(1); } Dist_h = (unsigned int *) mmap(NULL, out_size, PROT_READ|PROT_WRITE, MAP_SHARED, out_fd, 0); if(Dist_h==MAP_FAILED) { fprintf(stderr, "mmap faild Dist_h\n"); exit(1); } memset(Dist_h, 64, out_size); for (int i = 0; i < n*n; i+=n+1) Dist_h[i] = 0; //fprintf(stderr, "memset success\n"); gettimeofday(&temp_time, NULL); //printf("\tfile read done> %g s\n", CAL_TIME); split_strings(tok_2); munmap(fstr, sz); fclose(infile); gettimeofday(&temp_time, NULL); //printf("\tparsing done> %g s\n", CAL_TIME); if(n>=10000) { max_streams = 6; first_round = 6; } hipMallocPitch(&Dist_d, &p_bytes, b_rounds_bytes, b_rounds); pitch_bytes = p_bytes; pitch = pitch_bytes / sizeof(int); hipMemcpyToSymbolAsync(Dist, &Dist_d, sizeof(Dist_d), 0); hipMemcpyToSymbolAsync(pitch_d, &pitch, sizeof(pitch), 0); hipMemset2DAsync(Dist_d, p_bytes, 64, b_rounds_bytes, b_rounds); line_n = block_size * n; line_d = block_size * pitch; diag_size = (pitch + 1) * block_size; fprintf(stderr, "n %d, Rounds %d, streams %d, rows %d\n", n, Rounds, max_streams, max_row); gettimeofday(&temp_time, NULL); //printf("\tcuda allocate done> %g s\n", CAL_TIME); } void split_strings(char *ptr) { int a, b, v; while(m-->0) { if(ptr[1]==' ') { a = C2I(0); ptr += 2; } else if(ptr[2]==' ') { a = C2I(0) * 10 + C2I(1); ptr += 3; } else if(ptr[3]==' ') { a = C2I(0) * 100 + C2I(1) * 10 + C2I(2); ptr += 4; } else if(ptr[4]==' ') { a = C2I(0) * 1000 + C2I(1) * 100 + C2I(2) * 10 + C2I(3); ptr += 5; } else { a = C2I(0) * 10000 + C2I(1) * 1000 + C2I(2) * 100 + C2I(3) * 10 + C2I(4); ptr += 6; } if(ptr[1]==' ') { b = C2I(0); ptr += 2; } else if(ptr[2]==' ') { b = C2I(0) * 10 + C2I(1); ptr += 3; } else if(ptr[3]==' ') { b = C2I(0) * 100 + C2I(1) * 10 + C2I(2); ptr += 4; } else if(ptr[4]==' ') { b = C2I(0) * 1000 + C2I(1) * 100 + C2I(2) * 10 + C2I(3); ptr += 5; } else { b = C2I(0) * 10000 + C2I(1) * 1000 + C2I(2) * 100 + C2I(3) * 10 + C2I(4); ptr += 6; } if(ptr[1]=='\n') { v = C2I(0); ptr += 2; } else if(ptr[2]=='\n') { v = C2I(0) * 10 + C2I(1); ptr += 3; } else { v = C2I(0) * 100 + C2I(1) * 10 + C2I(2); ptr += 4; } Dist_h[ n * a + b ] = v; } } void block_FW_S() { int p1_start = 0, p2_start = 0; unsigned int *ptr_h = Dist_h, *ptr_d = Dist_d; int p2_sub, bline; cuda_init(); //printf("round 1\n"); for(int i=1; i<Rounds; i++) { ptr_h += line_n; ptr_d += line_d; bline = i==Rounds-1 ? last_line : block_size; hipMemcpy2DAsync(ptr_d, pitch_bytes, ptr_h, n_bytes, n_bytes, bline, hipMemcpyHostToDevice, stream_m); hipEventRecord(ev_m, stream_m); hipStreamCreate(&stream[i]); hipStreamWaitEvent(stream[i], ev_m, 0); p2_start += line_d; //printf("\tp1=(%d,%d), p2=(%d,%d) stream %d\n", ROW_COL(p1_start), ROW_COL(p2_start), i); hipStreamWaitEvent(stream[i], ev_1, 0); hipLaunchKernelGGL(( cal_phase2_blk), dim3(1), dim3(threads), 0, stream[i], p1_start, p2_start); hipStreamWaitEvent(stream[i], ev_2, 0); hipLaunchKernelGGL(( cal_phase3), dim3(Rounds), dim3(threads), 0, stream[i], p1_start, p2_start, 0); } //fprintf(stderr, "%d first round done\n", tid); for(int i=1; i<Rounds; i++) { p1_start += diag_size; hipLaunchKernelGGL(( cal_phase1), dim3(1), dim3(threads), 0, stream[i], p1_start); hipEventRecord(ev_1, stream[i]); hipLaunchKernelGGL(( cal_phase2_row), dim3(Rounds), dim3(threads), 0, stream[i], p1_start, i); hipEventRecord(ev_2, stream[i]); for(int j=0; j<Rounds; j++) { if(i==j) continue; p2_sub = p1_start + line_d * (j - i); //printf("\tp1=(%d,%d), p2=(%d,%d) stream %d\n", ROW_COL(p1_start), ROW_COL(p2_sub), j); hipStreamWaitEvent(stream[j], ev_1, 0); hipLaunchKernelGGL(( cal_phase2_blk), dim3(1), dim3(threads), 0, stream[j], p1_start, p2_sub); hipStreamWaitEvent(stream[j], ev_2, 0); hipLaunchKernelGGL(( cal_phase3), dim3(Rounds), dim3(threads), 0, stream[j], p1_start, p2_sub, i); } } //fprintf(stderr, "%d all rounds done\n", tid); ptr_h = Dist_h; ptr_d = Dist_d; for(int i=0; i<Rounds; i++) { bline = i==Rounds-1 ? last_line : block_size; hipMemcpy2DAsync(ptr_h, n_bytes, ptr_d, pitch_bytes, n_bytes, bline, hipMemcpyDeviceToHost, stream[i]); ptr_h += line_n; ptr_d += line_d; } //gettimeofday(&temp_time, NULL); //printf("before output> %g s\n", CAL_TIME); }
2f54c93ffbe620f23fd04ac953b11478b71e248d.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <cuda.h> #include <unistd.h> #include <sys/time.h> #include <sys/mman.h> #include <unistd.h> #include <fcntl.h> #define CEIL(a, b) ( ((a) + (b) - 1) / (b) ) #define MIN(a, b) ( (a) < (b) ? (a) : (b) ) #define CAL_TIME ( 1e-6 * (temp_time.tv_usec - start_time.tv_usec) + (temp_time.tv_sec - start_time.tv_sec) ) #define C2I(i) ( ptr[i] - '0') #define ROW_COL(__i) ( __i / line_d ), ( ( __i % pitch ) / block_size ) const int INF = 1000000000; const int V = 20010; const int block_size = 32; int max_streams = 4; int first_round = 4; dim3 threads(block_size, block_size); void input(char *outFileName); void block_FW(); void block_FW_S(); void split_strings(char *ptr); void cuda_init(); void cuda_cleanup(); __constant__ unsigned int *Dist; __constant__ int pitch_d; __global__ void cal_phase1(int pivot) { __shared__ unsigned int block_dist[block_size][block_size]; int tx = threadIdx.x, ty = threadIdx.y; int tid = ty * pitch_d + tx; int block_index = pivot + tid; unsigned int origin, blk_dist, new_dist; block_dist[ty][tx] = origin = Dist[block_index]; __syncthreads(); if(origin > INF) Dist[block_index] = origin = INF; blk_dist = origin; for(int k=0; k<block_size-1; k++) { new_dist = block_dist[ty][k] + block_dist[k][tx]; //if (block_dist[ty][tx] > new_dist) //block_dist[ty][tx] = new_dist; if(blk_dist > new_dist) block_dist[ty][tx] = blk_dist = new_dist; __syncthreads(); } new_dist = block_dist[ty][block_size-1] + block_dist[block_size-1][tx]; if(blk_dist > new_dist) Dist[block_index] = new_dist; else if(origin > blk_dist) Dist[block_index] = blk_dist; } __global__ void cal_phase2_row(int pivot, int r) { __shared__ unsigned int block_dist[block_size][block_size+1]; __shared__ unsigned int pivot_dist[block_size][block_size+1]; int tx = threadIdx.x, ty = threadIdx.y; int tid = ty * pitch_d + tx; int column = block_size * (blockIdx.x - r); int block_index, pivot_index; unsigned int blk_dist, new_dist, origin; pivot_index = pivot + tid; if(blockIdx.x==r) // pivot block return; /* block_index = pivot_index + column; block_dist[ty][tx] = origin = Dist[block_index]; __syncthreads(); if(origin > INF) Dist[block_index] = origin = INF; pivot_dist[ty][tx] = Dist[pivot_index]; blk_dist = origin; for(int k=0; k<block_size-1; k++) { new_dist = pivot_dist[ty][k] + block_dist[k][tx]; //if (block_dist[ty][tx] > new_dist) //block_dist[ty][tx] = new_dist; if (blk_dist > new_dist) block_dist[ty][tx] = blk_dist = new_dist; __syncthreads(); } new_dist = pivot_dist[ty][block_size-1] + block_dist[block_size-1][tx]; if(blk_dist > new_dist) Dist[block_index] = new_dist; else if(origin > blk_dist) Dist[block_index] = blk_dist; */ pivot_dist[ty][tx] = Dist[pivot_index]; block_index = pivot_index + column; block_dist[tx][ty] = origin = Dist[block_index]; __syncthreads(); if(origin > INF) Dist[block_index] = origin = INF; blk_dist = block_dist[ty][tx]; for(int k=0; k<block_size; k++) { new_dist = pivot_dist[tx][k] + block_dist[ty][k]; if (blk_dist > new_dist) block_dist[ty][tx] = blk_dist = new_dist; } __syncthreads(); blk_dist = block_dist[tx][ty]; if(origin > blk_dist) Dist[block_index] = blk_dist; } __global__ void cal_phase2_blk(int p1_pivot, int p2_pivot) { __shared__ unsigned int block_dist[block_size][block_size]; __shared__ unsigned int pivot_dist[block_size][block_size]; int tx = threadIdx.x, ty = threadIdx.y; int tid = ty * pitch_d + tx; int block_index; unsigned int origin, blk_dist, new_dist; pivot_dist[ty][tx] = Dist[p1_pivot + tid]; __syncthreads(); block_index = p2_pivot + tid + blockIdx.x * pitch_d * block_size; block_dist[ty][tx] = origin = Dist[block_index]; blk_dist = origin; for(int k=0; k<block_size-1; k++) { new_dist = block_dist[ty][k] + pivot_dist[k][tx]; if(blk_dist > new_dist) block_dist[ty][tx] = blk_dist = new_dist; } new_dist = block_dist[ty][block_size-1] + pivot_dist[block_size-1][tx]; if(blk_dist > new_dist) Dist[block_index] = new_dist; else if(origin > blk_dist) Dist[block_index] = blk_dist; } __global__ void cal_phase3(int p1_pivot, int p2_pivot, int r) { __shared__ unsigned int pvRow_dist[block_size][block_size]; __shared__ unsigned int pvCol_dist[block_size][block_size]; int tx = threadIdx.x, ty = threadIdx.y; int tid = ty * pitch_d + tx; int col_diff = (blockIdx.x - r) * block_size; int block_index, p1_index, p2_index; unsigned int origin, block_dist, new1, new2; p1_index = p1_pivot + col_diff + tid; p2_index = p2_pivot + tid; if(col_diff==0) // pivots return; pvRow_dist[ty][tx] = Dist[p1_index]; pvCol_dist[ty][tx] = Dist[p2_index]; __syncthreads(); block_dist = pvCol_dist[ty][0] + pvRow_dist[0][tx]; new1 = pvCol_dist[ty][1] + pvRow_dist[1][tx]; block_index = p2_index + col_diff; origin = Dist[block_index]; if (block_dist > new1) block_dist = new1; for(int k=2; k<block_size; k+=2) { new1 = pvCol_dist[ty][k] + pvRow_dist[k][tx]; new2 = pvCol_dist[ty][k+1] + pvRow_dist[k+1][tx]; if (block_dist > new1) block_dist = new1; if (block_dist > new2) block_dist = new2; } if(origin>block_dist) Dist[block_index] = block_dist; //Dist[block_index] = MIN(origin, block_dist); } __global__ void cal_phase3_n(int p1_pivot, int p2_pivot, int r, int n) { __shared__ unsigned int pvR_dist[block_size][block_size]; __shared__ unsigned int pvC_dist[2][block_size][block_size]; int tx = threadIdx.x, ty = threadIdx.y; int tid = ty * pitch_d + tx; int col_diff = (blockIdx.x - r) * block_size; int row_diff = pitch_d * block_size; int b1_index, b2_index, p_index; unsigned int origin, b1_dist, b2_dist; unsigned int inter[block_size], new1, new2; int p1 = 0, p2 = 1; p_index = p1_pivot + tid + col_diff; if(col_diff==0) // pivots return; pvR_dist[ty][tx] = Dist[p_index]; __syncthreads(); for(int k=0; k<block_size; k++) inter[k] = pvR_dist[k][tx]; p_index = p2_pivot + tid; pvC_dist[p1][ty][tx] = Dist[p_index]; b1_index = p_index + col_diff; b1_dist = origin = Dist[b1_index]; while(n-->1) { p_index += row_diff; pvC_dist[p2][ty][tx] = Dist[p_index]; b2_index = b1_index + row_diff; b2_dist = Dist[b2_index]; for(int k=0; k<block_size; k+=2) { new1 = pvC_dist[p1][ty][k] + inter[k]; new2 = pvC_dist[p1][ty][k+1] + inter[k+1]; if (b1_dist > new1) b1_dist = new1; if (b1_dist > new2) b1_dist = new2; } if (origin > b1_dist) Dist[b1_index] = b1_dist; //Dist[b1_index] = MIN(origin, b1_dist); p1 ^= 1; p2 ^= 1; b1_dist = origin = b2_dist; b1_index = b2_index; } for(int k=0; k<block_size; k+=2) { new1 = pvC_dist[p1][ty][k] + inter[k]; new2 = pvC_dist[p1][ty][k+1] + inter[k+1]; if (b1_dist > new1) b1_dist = new1; if (b1_dist > new2) b1_dist = new2; } if (origin > b1_dist) Dist[b1_index] = b1_dist; //Dist[b1_index] = MIN(origin, b1_dist); } int n, n_bytes, out_size; // Number of vertices, edges int Rounds, b_rounds, b_rounds_bytes; int line_n, last_line, max_row; FILE *infile; int out_fd; struct timeval start_time, temp_time; unsigned int *Dist_h, *Dist_d; int pitch_bytes, pitch; int diag_size, line_d_bytes, line_d; cudaStream_t stream[8], stream_s, stream_m; cudaEvent_t ev_1, ev_2, ev_m; int main(int argc, char* argv[]) { assert(argc==4); //block_size = atoi(argv[3]); gettimeofday(&start_time, NULL); infile = fopen(argv[1], "r"); input(argv[2]); gettimeofday(&temp_time, NULL); //printf("input> %g s\n", CAL_TIME); if(Rounds<=8) { block_FW_S(); } else { block_FW(); //printf("NOP\n"); } cudaEventRecord(ev_m, stream_m); cudaEventSynchronize(ev_m); msync(Dist_h, out_size, MS_SYNC); munmap(Dist_h, out_size); close(out_fd); cuda_cleanup(); gettimeofday(&temp_time, NULL); //printf("block_FW> %g s\n", CAL_TIME); return 0; } void cuda_init() { int bline = Rounds==1 ? n : block_size; cudaMemcpy2DAsync(Dist_d, pitch_bytes, Dist_h, n_bytes, n_bytes, bline, cudaMemcpyHostToDevice); cudaStreamCreate(&stream_m); cudaStreamCreate(&stream[0]); cal_phase1<<<1, threads, 0, stream[0]>>>(0); cudaEventCreateWithFlags(&ev_1, cudaEventDisableTiming); cudaEventRecord(ev_1, stream[0]); cal_phase2_row<<<Rounds, threads, 0, stream[0]>>>(0, 0); cudaEventCreateWithFlags(&ev_2, cudaEventDisableTiming); cudaEventRecord(ev_2, stream[0]); cudaEventCreateWithFlags(&ev_m, cudaEventDisableTiming); } void cuda_cleanup() { cudaDeviceSynchronize(); int num_streams; if(Rounds<=8) { num_streams = Rounds; } else { num_streams = max_streams; cudaStreamDestroy(stream_s); } cudaStreamDestroy(stream_m); for(int i=0; i<num_streams; i++) { cudaStreamDestroy(stream[i]); } cudaEventDestroy(ev_1); cudaEventDestroy(ev_2); cudaEventDestroy(ev_m); cudaFree(Dist_d); } void block_FW() { int id_1[V], do_r[V], row; int p1_start = 0, p2_start = 0, p2_sub; unsigned int *ptr_h = Dist_h, *ptr_d = Dist_d; int flag, bline = block_size; cudaStream_t *sp, s; cuda_init(); id_1[0] = 0; do_r[0] = max_row; //printf("Round 1: row < first_round (in pivot)\n"); for(int i=1; i<first_round; i++) { sp = &stream[i]; id_1[i] = i / max_row; do_r[i] = max_row - i % max_row; ptr_h += line_n; ptr_d += line_d; cudaMemcpy2DAsync(ptr_d, pitch_bytes, ptr_h, n_bytes, n_bytes, block_size, cudaMemcpyHostToDevice, stream_m); cudaEventRecord(ev_m, stream_m); cudaStreamCreate(sp); s = *sp; cudaStreamWaitEvent(s, ev_1, 0); cudaStreamWaitEvent(s, ev_m, 0); p2_start += line_d; cal_phase2_blk<<< 1, threads, 0, s>>>(p1_start, p2_start); cudaStreamWaitEvent(s, ev_2, 0); cal_phase3<<<Rounds, threads, 0, s>>>(p1_start, p2_start, 0); } //printf("Round (2-first_round): row < first_round\n"); for(int i=1; i<first_round; i++) { s = stream[i]; p1_start += diag_size; //printf("round %d: p1=(%d,%d) stream %d\n", i, ROW_COL(p1_start), i); cal_phase1<<<1, threads, 0, s>>>(p1_start); cudaEventRecord(ev_1, s); cal_phase2_row<<<Rounds, threads, 0, s>>>(p1_start, i); cudaEventRecord(ev_2, s); for(int j=0; j<first_round; j++) { if(i==j) continue; cudaStream_t sj = stream[j]; p2_sub = p1_start + line_d * (j - i); //printf("\tp1=(%d,%d), p2=(%d,%d) stream %d\n", ROW_COL(p1_start), ROW_COL(p2_sub), j); cudaStreamWaitEvent(sj, ev_1, 0); cal_phase2_blk<<< 1, threads, 0, sj>>>(p1_start, p2_sub); cudaStreamWaitEvent(sj, ev_2, 0); cal_phase3<<<Rounds, threads, 0, sj>>>(p1_start, p2_sub, i); } } for(int i=0; i<max_streams; i++) { cudaEventRecord(ev_1, stream[i]); for(int j=0; j<max_streams; j++) { if(i==j) continue; cudaStreamWaitEvent(stream[j], ev_1, 0); } } //printf("Round (1-first_round): other rows\n"); flag = 1; for(int i=first_round; i<Rounds; i++) { id_1[i] = i / max_row % max_streams; do_r[i] = max_row - i % max_row; if(i + do_r[i] > Rounds) do_r[i] = Rounds - i; s = stream[id_1[i]]; ptr_h += line_n; ptr_d += line_d; p2_start += line_d; if(flag>0) { if(i==first_round) { row = 1; bline = block_size; } else { row = do_r[i]; bline = (i+do_r[i]==Rounds) ? last_line + (do_r[i]-1) * block_size : do_r[i] * block_size; } cudaMemcpy2DAsync(ptr_d, pitch_bytes, ptr_h, n_bytes, n_bytes, bline, cudaMemcpyHostToDevice, stream_m); cudaEventRecord(ev_m, stream_m); cudaStreamWaitEvent(s, ev_m, 0); p1_start = 0; p2_sub = p2_start; for(int r=0; r<first_round; r++) { cal_phase2_blk<<<row, threads, 0, s>>>(p1_start, p2_sub); cal_phase3_n<<<Rounds, threads, 0, s>>>(p1_start, p2_sub, r, row); p1_start += diag_size; p2_sub += block_size; } if(i==first_round) { cudaStreamCreate(&stream_s); cudaEventRecord(ev_1, s); cudaStreamWaitEvent(stream_s, ev_1, 0); } flag -= row - 1; } else { flag++; } } //printf("R %d\n", Rounds); for (int r=first_round; r<Rounds; ++r) { cal_phase1<<<1, threads, 0, stream_s>>>(p1_start); cudaEventRecord(ev_1, stream_s); cal_phase2_row<<<Rounds, threads, 0, stream_s>>>(p1_start, r); cudaEventRecord(ev_2, stream_s); if(r==Rounds-1) { bline = last_line; ptr_h = Dist_h + r * line_n; ptr_d = Dist_d + r * line_d; cudaStreamWaitEvent(stream_m, ev_2, 0); cudaMemcpy2DAsync(ptr_h, n_bytes, ptr_d, pitch_bytes, n_bytes, bline, cudaMemcpyDeviceToHost, stream_m); //cudaMemcpy2D(ptr_h, n_bytes, ptr_d, pitch_bytes, n_bytes, bline, cudaMemcpyDeviceToHost); } //printf("r %d\n", r); int next_r = r + 1; if(next_r<Rounds) { s = stream[id_1[next_r]]; p2_start = p1_start + line_d; cudaStreamWaitEvent(s, ev_1, 0); cal_phase2_blk<<<1, threads, 0, s>>>(p1_start, p2_start); cudaStreamWaitEvent(s, ev_2, 0); cal_phase3<<<Rounds, threads, 0, s>>>(p1_start, p2_start, r); cudaEventRecord(ev_m, s); cudaStreamWaitEvent(stream_s, ev_m, 0); } flag = 1; for(int i = (r+1) % Rounds; i != r; i = (i==Rounds-1) ? 0 : i+1) { if(i==r+1) continue; s = stream[id_1[i]]; p2_start = p1_start + line_d * (i-r); if(flag>0) { row = (i<r && i+do_r[i]>r) ? r - i : do_r[i]; flag -= row - 1; cudaStreamWaitEvent(s, ev_1, 0); cal_phase2_blk<<<row, threads, 0, s>>>(p1_start, p2_start); cudaStreamWaitEvent(s, ev_2, 0); cal_phase3_n<<<Rounds, threads, 0, s>>>(p1_start, p2_start, r, row); if(r==Rounds-1) { bline = row * block_size; ptr_h = Dist_h + i * line_n; ptr_d = Dist_d + i * line_d; cudaEventRecord(ev_m, s); cudaStreamWaitEvent(stream_m, ev_m, 0); //cudaMemcpy2DAsync(ptr_h, n_bytes, ptr_d, pitch_bytes, n_bytes, bline, cudaMemcpyDeviceToHost, stream_m); cudaMemcpy2D(ptr_h, n_bytes, ptr_d, pitch_bytes, n_bytes, bline, cudaMemcpyDeviceToHost); } } else { flag++; } } p1_start += diag_size; } } size_t m, sz; void input(char *outFileName) { char *tok_1, *tok_2, *fstr; char temp[30]; size_t p_bytes; fseek(infile, 0L, SEEK_END); sz = ftell(infile); fseek(infile, 0L, SEEK_SET); fstr = (char *) mmap(NULL, sz, PROT_READ, MAP_PRIVATE|MAP_POPULATE, fileno(infile), 0); if(fstr==MAP_FAILED) { fprintf(stderr, "mmap faild fstr\n"); exit(1); } tok_1 = strchr(fstr, ' '); strncpy(temp, fstr, tok_1-fstr); n = atoi(temp); tok_1++; tok_2 = strchr(tok_1, '\n'); strncpy(temp, tok_1, tok_2-tok_1); m = atoi(temp); tok_2++; Rounds = CEIL(n, block_size); b_rounds = block_size * Rounds; b_rounds_bytes = b_rounds * sizeof(int); gettimeofday(&temp_time, NULL); //printf("before parsing> %g s\n", CAL_TIME); n_bytes = n * sizeof(int); last_line = n - (Rounds-1) * block_size; out_size = n * n_bytes; max_row = (Rounds+max_streams-1) / max_streams; int fflag = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH; out_fd = open(outFileName, O_RDWR|O_CREAT, fflag); if(0!=posix_fallocate(out_fd, 0, out_size)) { fprintf(stderr, "posix_fallocate failed\n"); exit(1); } Dist_h = (unsigned int *) mmap(NULL, out_size, PROT_READ|PROT_WRITE, MAP_SHARED, out_fd, 0); if(Dist_h==MAP_FAILED) { fprintf(stderr, "mmap faild Dist_h\n"); exit(1); } memset(Dist_h, 64, out_size); for (int i = 0; i < n*n; i+=n+1) Dist_h[i] = 0; //fprintf(stderr, "memset success\n"); gettimeofday(&temp_time, NULL); //printf("\tfile read done> %g s\n", CAL_TIME); split_strings(tok_2); munmap(fstr, sz); fclose(infile); gettimeofday(&temp_time, NULL); //printf("\tparsing done> %g s\n", CAL_TIME); if(n>=10000) { max_streams = 6; first_round = 6; } cudaMallocPitch(&Dist_d, &p_bytes, b_rounds_bytes, b_rounds); pitch_bytes = p_bytes; pitch = pitch_bytes / sizeof(int); cudaMemcpyToSymbolAsync(Dist, &Dist_d, sizeof(Dist_d), 0); cudaMemcpyToSymbolAsync(pitch_d, &pitch, sizeof(pitch), 0); cudaMemset2DAsync(Dist_d, p_bytes, 64, b_rounds_bytes, b_rounds); line_n = block_size * n; line_d = block_size * pitch; diag_size = (pitch + 1) * block_size; fprintf(stderr, "n %d, Rounds %d, streams %d, rows %d\n", n, Rounds, max_streams, max_row); gettimeofday(&temp_time, NULL); //printf("\tcuda allocate done> %g s\n", CAL_TIME); } void split_strings(char *ptr) { int a, b, v; while(m-->0) { if(ptr[1]==' ') { a = C2I(0); ptr += 2; } else if(ptr[2]==' ') { a = C2I(0) * 10 + C2I(1); ptr += 3; } else if(ptr[3]==' ') { a = C2I(0) * 100 + C2I(1) * 10 + C2I(2); ptr += 4; } else if(ptr[4]==' ') { a = C2I(0) * 1000 + C2I(1) * 100 + C2I(2) * 10 + C2I(3); ptr += 5; } else { a = C2I(0) * 10000 + C2I(1) * 1000 + C2I(2) * 100 + C2I(3) * 10 + C2I(4); ptr += 6; } if(ptr[1]==' ') { b = C2I(0); ptr += 2; } else if(ptr[2]==' ') { b = C2I(0) * 10 + C2I(1); ptr += 3; } else if(ptr[3]==' ') { b = C2I(0) * 100 + C2I(1) * 10 + C2I(2); ptr += 4; } else if(ptr[4]==' ') { b = C2I(0) * 1000 + C2I(1) * 100 + C2I(2) * 10 + C2I(3); ptr += 5; } else { b = C2I(0) * 10000 + C2I(1) * 1000 + C2I(2) * 100 + C2I(3) * 10 + C2I(4); ptr += 6; } if(ptr[1]=='\n') { v = C2I(0); ptr += 2; } else if(ptr[2]=='\n') { v = C2I(0) * 10 + C2I(1); ptr += 3; } else { v = C2I(0) * 100 + C2I(1) * 10 + C2I(2); ptr += 4; } Dist_h[ n * a + b ] = v; } } void block_FW_S() { int p1_start = 0, p2_start = 0; unsigned int *ptr_h = Dist_h, *ptr_d = Dist_d; int p2_sub, bline; cuda_init(); //printf("round 1\n"); for(int i=1; i<Rounds; i++) { ptr_h += line_n; ptr_d += line_d; bline = i==Rounds-1 ? last_line : block_size; cudaMemcpy2DAsync(ptr_d, pitch_bytes, ptr_h, n_bytes, n_bytes, bline, cudaMemcpyHostToDevice, stream_m); cudaEventRecord(ev_m, stream_m); cudaStreamCreate(&stream[i]); cudaStreamWaitEvent(stream[i], ev_m, 0); p2_start += line_d; //printf("\tp1=(%d,%d), p2=(%d,%d) stream %d\n", ROW_COL(p1_start), ROW_COL(p2_start), i); cudaStreamWaitEvent(stream[i], ev_1, 0); cal_phase2_blk<<< 1, threads, 0, stream[i]>>>(p1_start, p2_start); cudaStreamWaitEvent(stream[i], ev_2, 0); cal_phase3<<<Rounds, threads, 0, stream[i]>>>(p1_start, p2_start, 0); } //fprintf(stderr, "%d first round done\n", tid); for(int i=1; i<Rounds; i++) { p1_start += diag_size; cal_phase1<<<1, threads, 0, stream[i]>>>(p1_start); cudaEventRecord(ev_1, stream[i]); cal_phase2_row<<<Rounds, threads, 0, stream[i]>>>(p1_start, i); cudaEventRecord(ev_2, stream[i]); for(int j=0; j<Rounds; j++) { if(i==j) continue; p2_sub = p1_start + line_d * (j - i); //printf("\tp1=(%d,%d), p2=(%d,%d) stream %d\n", ROW_COL(p1_start), ROW_COL(p2_sub), j); cudaStreamWaitEvent(stream[j], ev_1, 0); cal_phase2_blk<<< 1, threads, 0, stream[j]>>>(p1_start, p2_sub); cudaStreamWaitEvent(stream[j], ev_2, 0); cal_phase3<<<Rounds, threads, 0, stream[j]>>>(p1_start, p2_sub, i); } } //fprintf(stderr, "%d all rounds done\n", tid); ptr_h = Dist_h; ptr_d = Dist_d; for(int i=0; i<Rounds; i++) { bline = i==Rounds-1 ? last_line : block_size; cudaMemcpy2DAsync(ptr_h, n_bytes, ptr_d, pitch_bytes, n_bytes, bline, cudaMemcpyDeviceToHost, stream[i]); ptr_h += line_n; ptr_d += line_d; } //gettimeofday(&temp_time, NULL); //printf("before output> %g s\n", CAL_TIME); }
1e674555121c815fedbd4933e9d2258b03903d22.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <helper_cuda.h> //#include <cutil_inline.h> #include <iostream> #include <layer_kernels.cuh> #include <layer.cuh> #include <data.cuh> #include <util.cuh> #include <cudaconv2.cuh> #include <matrix.h> using namespace std; /* * ======================= * Layer * ======================= */ Layer::Layer(ConvNet* convNet, PyObject* paramsDict, bool trans) : _convNet(convNet), _trans(trans), _dropout_mask(trans) { _name = pyDictGetString(paramsDict, "name"); _type = pyDictGetString(paramsDict, "type"); _numGradProducersNext = 0; _foundGradConsumers = false; _gradConsumer = pyDictGetInt(paramsDict, "gradConsumer"); _actsTarget = pyDictGetInt(paramsDict, "actsTarget"); _actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget"); _conserveMem = pyDictGetInt(paramsDict, "conserveMem"); _dropout_prob = pyDictGetFloat(paramsDict, "dropout"); _outputs = _actsTarget < 0 ? new NVMatrix() : NULL; _actsGrad = _actsGradTarget < 0 ? new NVMatrix() : NULL; } void Layer::fpropNext(PASS_TYPE passType) { for (int i = 0; i < _next.size(); i++) { _next[i]->fprop(passType); } } void Layer::truncBwdActs() { // Only truncate actsGrad if I own it if (_conserveMem && _actsGradTarget < 0) { getActsGrad().truncate(); } if (_conserveMem) { getActs().truncate(); } } void Layer::fprop(PASS_TYPE passType) { _rcvdFInputs += 1; if (_rcvdFInputs == _prev.size()) { NVMatrixV v; for (int i = 0; i < _prev.size(); i++) { v.push_back(&_prev[i]->getActs()); } fprop(v, passType); } } void Layer::fprop(NVMatrix& v, PASS_TYPE passType) { NVMatrixV vl; vl.push_back(&v); fprop(vl, passType); } void Layer::fprop(NVMatrixV& v, PASS_TYPE passType) { assert(v.size() == _prev.size()); _inputs.clear(); _inputs.insert(_inputs.begin(), v.begin(), v.end()); _outputs = _actsTarget < 0 ? _outputs : _inputs[_actsTarget]; _rcvdFInputs = _prev.size(); for (NVMatrixV::iterator it = v.begin(); it != v.end(); ++it) { (*it)->transpose(_trans); } getActs().transpose(_trans); // First do fprop on the input whose acts matrix I'm sharing, if any if (_actsTarget >= 0) { fpropActs(_actsTarget, 0, passType); } // Then add the rest of the inputs to that for (int i = 0; i < _prev.size(); i++) { if (i != _actsTarget) { fpropActs(i, _actsTarget >= 0 || i > 0, passType); } } if (passType != PASS_TEST && _dropout_prob < 1.0 ) { _dropout_mask.resize(getActs().getNumRows(), getActs().getNumCols()); _dropout_mask.randomizeUniform(); _dropout_mask.smallerThanScalar(_dropout_prob); getActs().eltwiseMult(_dropout_mask); } if (passType == PASS_TEST && _dropout_prob < 1.0) { getActs().scale(_dropout_prob); } fpropNext(passType); } void Layer::bprop(PASS_TYPE passType) { if (_rcvdBInputs == _numGradProducersNext) { _rcvdBInputs++; // avoid doing bprop computation twice bprop(getActsGrad(), passType); } } void Layer::bprop(NVMatrix& v, PASS_TYPE passType) { v.transpose(_trans); for (int i = 0; i < _prev.size(); i++) { _prev[i]->getActs().transpose(_trans); _prev[i]->getActsGrad().transpose(_trans); } getActs().transpose(_trans); if ( passType != PASS_TEST && _dropout_prob < 1.0) { // passType will never be PASS_TEST Here v.eltwiseMult( _dropout_mask ); } bpropCommon(v, passType); if (isGradProducer()) { // First propagate activity gradient to all layers whose activity // gradient matrix I'm definitely not sharing. for (int i = 0; i < _prev.size(); i++) { if (_prev[i]->isGradConsumer() && _actsGradTarget != i) { bpropActs(v, i, _prev[i]->getRcvdBInputs() > 0 ? 1 : 0, passType); _prev[i]->incRcvdBInputs(); } } // Then propagate activity gradient to the layer whose activity gradient // matrix I'm sharing, if any. if (_actsGradTarget >= 0 && _prev[_actsGradTarget]->isGradConsumer()) { bpropActs(v, _actsGradTarget, _prev[_actsGradTarget]->getRcvdBInputs() > 0 ? 1 : 0, passType); _prev[_actsGradTarget]->incRcvdBInputs(); } } truncBwdActs(); if (isGradProducer()) { for (int i = 0; i < _prev.size(); i++) { if (_prev[i]->isGradConsumer()) { _prev[i]->bprop(passType); } } } } void Layer::reset() { _rcvdFInputs = 0; _rcvdBInputs = 0; } string& Layer::getName() { return _name; } string& Layer::getType() { return _type; } int Layer::getRcvdFInputs() { return _rcvdFInputs; } int Layer::getRcvdBInputs() { return _rcvdBInputs; } int Layer::incRcvdBInputs() { return ++_rcvdBInputs; } void Layer::addNext(Layer* l) { _next.push_back(l); _numGradProducersNext += l->isGradProducer(); } void Layer::addPrev(Layer* l) { _prev.push_back(l); } void Layer::postInit() { // _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs(); _actsGrad = _actsGradTarget < 0 ? new NVMatrix() : &_prev[_actsGradTarget]->getActsGrad(); } // Does this layer, or some layer below it, need the gradient // for parameter updates? // Only weight layers should be grad consumers themselves. bool Layer::isGradConsumer() { if (!_foundGradConsumers) { for (int i = 0; i < _prev.size(); i++) { _gradConsumer |= _prev[i]->isGradConsumer(); } _foundGradConsumers = true; } return _gradConsumer; } // Does this layer produce gradient for layers below? bool Layer::isGradProducer() { return true; } vector<Layer*>& Layer::getPrev() { return _prev; } vector<Layer*>& Layer::getNext() { return _next; } NVMatrix& Layer::getActs() { assert(_outputs != NULL); return *_outputs; } NVMatrix& Layer::getActsGrad() { assert(_actsGrad != NULL); return *_actsGrad; } /* * ======================= * NeuronLayer * ======================= */ NeuronLayer::NeuronLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, true) { _neuron = &Neuron::makeNeuron(PyDict_GetItemString(paramsDict, "neuron")); } void NeuronLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { _neuron->computeInputGrad(v, _prev[0]->getActsGrad(), scaleTargets > 0); } void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { _neuron->activate(*_inputs[0], getActs()); } /* * ======================= * WeightLayer * ======================= */ WeightLayer::WeightLayer(ConvNet* convNet, PyObject* paramsDict, bool trans, bool useGrad) : Layer(convNet, paramsDict, trans) { MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights"); MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc"); Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases"); Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc"); floatv& momW = *pyDictGetFloatV(paramsDict, "momW"); float momB = pyDictGetFloat(paramsDict, "momB"); floatv& epsW = *pyDictGetFloatV(paramsDict, "epsW"); float epsB = pyDictGetFloat(paramsDict, "epsB"); floatv& wc = *pyDictGetFloatV(paramsDict, "wc"); // Source layers for shared weights intv& weightSourceLayerIndices = *pyDictGetIntV(paramsDict, "weightSourceLayerIndices"); // Weight matrix indices (inside the above source layers) for shared weights intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices"); for (int i = 0; i < weightSourceLayerIndices.size(); i++) { int srcLayerIdx = weightSourceLayerIndices[i]; int matrixIdx = weightSourceMatrixIndices[i]; if (srcLayerIdx == convNet->getNumLayers()) { // Current layer _weights.addWeights(*new Weights(_weights[matrixIdx], epsW[i])); } else if (srcLayerIdx >= 0) { WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNet->getLayer(srcLayerIdx)); Weights* srcWeights = &srcLayer.getWeights(matrixIdx); _weights.addWeights(*new Weights(*srcWeights, epsW[i])); } else { _weights.addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], epsW[i], wc[i], momW[i], useGrad)); } } _biases = new Weights(hBiases, hBiasesInc, epsB, 0, momB, true); // Epsilons for finite-difference gradient checking operation _wStep = 0.001; _bStep = 0.002; delete &weightSourceLayerIndices; delete &weightSourceMatrixIndices; delete &hWeights; delete &hWeightsInc; delete &momW; delete &epsW; delete &wc; } void WeightLayer::bpropCommon(NVMatrix& v, PASS_TYPE passType) { if (_biases->getEps() > 0) { bpropBiases(v, passType); } for (int i = 0; i < _weights.getSize(); i++) { if (_weights[i].getEps() > 0) { bpropWeights(v, i, passType); // Increment its number of updates _weights[i].incNumUpdates(); } } } void WeightLayer::updateWeights() { _weights.update(); _biases->update(); } void WeightLayer::copyToCPU() { _weights.copyToCPU(); _biases->copyToCPU(); } void WeightLayer::copyToGPU() { _weights.copyToGPU(); _biases->copyToGPU(); } void WeightLayer::checkGradients() { for (int i = 0; i < _weights.getSize(); i++) { _convNet->checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights[i]); } _convNet->checkGradient(_name + " biases", _bStep, *_biases); } Weights& WeightLayer::getWeights(int idx) { return _weights[idx]; } /* * ======================= * FCLayer * ======================= */ FCLayer::FCLayer(ConvNet* convNet, PyObject* paramsDict) : WeightLayer(convNet, paramsDict, true, false) { _wStep = 0.1; _bStep = 0.01; } void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { getActs().addProduct(*_inputs[inpIdx], *_weights[inpIdx], scaleTargets, 1); if (scaleTargets == 0) { getActs().addVector(_biases->getW()); } } void FCLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& weights_T = _weights[inpIdx].getW().getTranspose(); _prev[inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1); delete &weights_T; } void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { int numCases = v.getNumRows(); float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases; _biases->getGrad().addSum(v, 0, 0, scaleBGrad); } void FCLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { int numCases = v.getNumRows(); NVMatrix& prevActs_T = _prev[inpIdx]->getActs().getTranspose(); float scaleInc = (_weights[inpIdx].getNumUpdates() == 0 && passType != PASS_GC) * _weights[inpIdx].getMom(); float scaleGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; _weights[inpIdx].getInc().addProduct(prevActs_T, v, scaleInc, scaleGrad); delete &prevActs_T; } /* * ======================= * LocalLayer * ======================= */ LocalLayer::LocalLayer(ConvNet* convNet, PyObject* paramsDict, bool useGrad) : WeightLayer(convNet, paramsDict, false, useGrad) { _padding = pyDictGetIntV(paramsDict, "padding"); _stride = pyDictGetIntV(paramsDict, "stride"); _filterSize = pyDictGetIntV(paramsDict, "filterSize"); _channels = pyDictGetIntV(paramsDict, "channels"); _imgSize = pyDictGetIntV(paramsDict, "imgSize"); _numFilters = pyDictGetInt(paramsDict, "filters"); _groups = pyDictGetIntV(paramsDict, "groups"); _filterChannels = pyDictGetIntV(paramsDict, "filterChannels"); _randSparse = pyDictGetIntV(paramsDict, "randSparse"); _overSample = pyDictGetIntV(paramsDict, "overSample"); _filterPixels = pyDictGetIntV(paramsDict, "filterPixels"); _imgPixels = pyDictGetIntV(paramsDict, "imgPixels"); _modulesX = pyDictGetInt(paramsDict, "modulesX"); _modules = pyDictGetInt(paramsDict, "modules"); // It's a vector on the heap to be consistent with all the others... _filterConns = new vector<FilterConns>(); PyObject* pyFilterConns = PyDict_GetItemString(paramsDict, "filterConns"); for (int i = 0; i < _randSparse->size(); i++) { FilterConns fc; if (_randSparse->at(i)) { fc.hFilterConns = getIntA(PyList_GET_ITEM(pyFilterConns, i)); } _filterConns->push_back(fc); } } void LocalLayer::copyToGPU() { WeightLayer::copyToGPU(); for (int i = 0; i < _prev.size(); i++) { if (_randSparse->at(i)) { // Copy to GPU vector that describes sparse random connectivity hipMalloc(&_filterConns->at(i).dFilterConns, sizeof(int) * _groups->at(i) * _filterChannels->at(i)); hipMemcpy(_filterConns->at(i).dFilterConns, _filterConns->at(i).hFilterConns, sizeof(int) * _groups->at(i) * _filterChannels->at(i), hipMemcpyHostToDevice); getLastCudaError("hipMemcpy: failed"); } } } /* * ======================= * ConvLayer * ======================= */ ConvLayer::ConvLayer(ConvNet* convNet, PyObject* paramsDict) : LocalLayer(convNet, paramsDict, true) { _partialSum = pyDictGetInt(paramsDict, "partialSum"); _sharedBiases = pyDictGetInt(paramsDict, "sharedBiases"); } void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { convFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } else { convFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } if (scaleTargets == 0) { if (_sharedBiases) { getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters); getActs().addVector(_biases->getW()); getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules)); } else { getActs().addVector(_biases->getW()); } } } void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { int numCases = v.getNumCols(); float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases; if (_sharedBiases) { v.reshape(_numFilters, v.getNumElements() / _numFilters); _biases->getGrad().addSum(v, 1, 0, scaleBGrad); v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules)); } else { _biases->getGrad().addSum(v, 1, 0, scaleBGrad); } } void ConvLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { int numCases = v.getNumCols(); NVMatrix& tgt = _partialSum > 0 ? _weightGradTmp : _weights[inpIdx].getGrad(); float scaleWGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; float scaleTargets = _weights[inpIdx].getNumUpdates() > 0 && _partialSum == 0; // ? 1 : 0; if (_randSparse->at(inpIdx)) { convWeightActsSparse(_prev[inpIdx]->getActs(), v, tgt, _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad); } else { convWeightActs(_prev[inpIdx]->getActs(), v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad); } if (_partialSum > 0) { scaleTargets = _weights[inpIdx].getNumUpdates() > 0; _weightGradTmp.reshape(_modules / _partialSum, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters); _weights[inpIdx].getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1); _weights[inpIdx].getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters); } } void ConvLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { NVMatrix& tgt = _overSample->at(inpIdx) > 1 ? _actGradTmp : _prev[inpIdx]->getActsGrad(); convImgActsSparse(v, *_weights[inpIdx], tgt, _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); if (_overSample->at(inpIdx) > 1) { _actGradTmp.reshape(_overSample->at(inpIdx), _actGradTmp.getNumElements() / _overSample->at(inpIdx)); _actGradTmp.sum(0, _prev[inpIdx]->getActsGrad()); _prev[inpIdx]->getActsGrad().reshape(_prev[inpIdx]->getActsGrad().getNumElements() / v.getNumCols(), v.getNumCols()); } } else { convImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } } void ConvLayer::truncBwdActs() { LocalLayer::truncBwdActs(); if (_conserveMem) { _weightGradTmp.truncate(); _actGradTmp.truncate(); } } /* * ======================= * LocalUnsharedLayer * ======================= */ LocalUnsharedLayer::LocalUnsharedLayer(ConvNet* convNet, PyObject* paramsDict) : LocalLayer(convNet, paramsDict, false) { } void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { localFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } else { localFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } if (scaleTargets == 0) { getActs().addVector(_biases->getW()); } } void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { int numCases = v.getNumCols(); float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases; _biases->getGrad().addSum(v, 1, 0, scaleBGrad); } void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { int numCases = v.getNumCols(); float scaleInc = (passType != PASS_GC && _weights[inpIdx].getNumUpdates() == 0) * _weights[inpIdx].getMom(); // momentum float scaleWGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; // eps / numCases if (_randSparse->at(inpIdx)) { localWeightActsSparse(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getInc(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad); } else { localWeightActs(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getInc(), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad); } } void LocalUnsharedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { localImgActsSparse(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } else { localImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } } /* * ======================= * SoftmaxLayer * ======================= */ SoftmaxLayer::SoftmaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, true) { } void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& input = *_inputs[0]; NVMatrix& max = input.max(1); input.addVector(max, -1, getActs()); getActs().apply(NVMatrixOps::Exp()); NVMatrix& sum = getActs().sum(1); getActs().eltwiseDivideByVector(sum); delete &max; delete &sum; } void SoftmaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); bool doLogregGrad = _next.size() == 1 && _next[0]->getType() == "cost.logreg"; if (doLogregGrad) { NVMatrix& labels = _next[0]->getPrev()[0]->getActs(); float gradCoeff = dynamic_cast<CostLayer*>(_next[0])->getCoeff(); computeLogregSoftmaxGrad(labels, getActs(), _prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff); } else { computeSoftmaxGrad(getActs(), v, _prev[0]->getActsGrad(), scaleTargets == 1); } } /* * ======================= * SliceLayer * ======================= */ SliceLayer::SliceLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _startX = pyDictGetInt(paramsDict, "startX"); _endX = pyDictGetInt(paramsDict, "endX"); } void SliceLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_outputs != NULL) { delete _outputs; // } assert(inpIdx == 0); // only one input can be accepted for now _outputs = &(_inputs[inpIdx]->slice(_startX, _endX + 1, 0, -1)); } SliceLayer::~SliceLayer() { if (_outputs != NULL) { delete _outputs; } } void SliceLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { // previous layer must use act _prev[inpIdx]->getActsGrad().resize(_prev[inpIdx]->getActs()); NVMatrix &sliced_actsgrads = (_prev[inpIdx]->getActsGrad()).slice(_startX, _endX + 1,0,-1); // a view, not copy if (scaleTargets == 0) { _prev[inpIdx]->getActsGrad().inRangeInc(1,0); // set all to zero sliced_actsgrads.add(v); } else { sliced_actsgrads.add(v); } delete &sliced_actsgrads; } /* * ======================= * ConcatenationLayer * ======================= */ ConcatenationLayer::ConcatenationLayer(ConvNet* convnet, PyObject* paramsDict) : Layer(convnet, paramsDict,false) { _numOutputs = pyDictGetInt(paramsDict, "outputs"); _copyOffsets = pyDictGetIntV(paramsDict, "copyOffsets"); _copyOffsets->push_back(_numOutputs); } ConcatenationLayer::~ConcatenationLayer() { delete _copyOffsets; } void ConcatenationLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { getActs().resize(_numOutputs, _inputs[inpIdx]->getNumCols()); _inputs[inpIdx]->copy(getActs(), 0, -1, 0, -1, _copyOffsets->at(inpIdx), 0); } void ConcatenationLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& copySrc = v.sliceRows(_copyOffsets->at(inpIdx), _copyOffsets->at(inpIdx + 1)); // view _prev[inpIdx]->getActsGrad().add(copySrc, scaleTargets, 1); delete &copySrc; } /* * ======================= * EltwiseSumLayer * ======================= */ EltwiseSumLayer::EltwiseSumLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _coeffs = pyDictGetFloatV(paramsDict, "coeffs"); } void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (scaleTargets == 0) { _inputs[inpIdx]->scale(_coeffs->at(inpIdx), getActs()); } else { getActs().add(*_inputs[inpIdx], _coeffs->at(inpIdx)); } } void EltwiseSumLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (scaleTargets == 0 ) { v.scale(_coeffs->at(inpIdx), _prev[inpIdx]->getActsGrad()); } else { assert(&_prev[inpIdx]->getActsGrad() != &v); _prev[inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx)); } } /* * ======================= * EltwiseMulLayer * ======================= */ EltwiseMulLayer::EltwiseMulLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { } void EltwiseMulLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (scaleTargets == 0) { // input ==> getActs() _inputs[inpIdx]->copy(getActs()); } else { getActs().eltwiseMult(*_inputs[inpIdx]); } } void EltwiseMulLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (scaleTargets == 0) { // The number of input is exactly two v.eltwiseMult(_prev[1 - inpIdx]->getActs(), _prev[inpIdx]->getActsGrad()); } else { NVMatrix temp_grads; v.eltwiseMult(_prev[1 - inpIdx]->getActs(), temp_grads); _prev[inpIdx]->getActsGrad().add(temp_grads); } } /* * ======================= * EltwiseMaxLayer * ======================= */ EltwiseMaxLayer::EltwiseMaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { } void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (inpIdx == 1) { // First input, do nothing _inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs()); } else if (inpIdx > 1) { getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]); } } void EltwiseMaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[inpIdx]->getActsGrad(), scaleTargets != 0); } /* * ======================= * ForwardLayer * ======================= */ ForwardLayer::ForwardLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _random_type = pyDictGetString(paramsDict, "randomtype"); _pass_gradients = pyDictGetInt(paramsDict, "passgradients"); if (_pass_gradients != 0) { _pass_gradients= 1; } _add_noise = false; if (_random_type == "gauss") { float mean = pyDictGetFloat(paramsDict, "mean"); float sigma = pyDictGetFloat(paramsDict, "sigma"); _params.push_back(mean); _params.push_back(sigma); _add_noise = true; } else { assert(_random_type == "none"); } } void ForwardLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx==0); _inputs[inpIdx]->copy(getActs()); // only process gaussian noise now if (_random_type == "gauss") { getActs().addGaussianNoise(_params[1]); if (_params[0] != 0) { getActs().addScalar(_params[0]); } } } void ForwardLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { _prev[inpIdx]->getActsGrad().add(v, scaleTargets, _pass_gradients); } void ForwardLayer::set_params(const float* params, int len) { _params.clear(); for (int i = 0; i < len; ++i) { _params.push_back(params[i]); } } /* * ======================= * DataLayer * ======================= */ DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _dataIdx = pyDictGetInt(paramsDict, "dataIdx"); } void DataLayer::fprop(PASS_TYPE passType) { throw string("No dava given!"); } void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { } void DataLayer::fprop(NVMatrixV& data, PASS_TYPE passType) { _outputs = data[_dataIdx]; fpropNext(passType); } bool DataLayer::isGradProducer() { return false; } /* * ===================== * PoolLayer * ===================== */ PoolLayer::PoolLayer(ConvNet* convNet, PyObject* paramsDict, bool trans) : Layer(convNet, paramsDict, trans) { _channels = pyDictGetInt(paramsDict, "channels"); _sizeX = pyDictGetInt(paramsDict, "sizeX"); _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputsX = pyDictGetInt(paramsDict, "outputsX"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); _pool = pyDictGetString(paramsDict, "pool"); } PoolLayer& PoolLayer::makePoolLayer(ConvNet* convNet, PyObject* paramsDict) { string _pool = pyDictGetString(paramsDict, "pool"); if (_pool == "max") { return *new MaxPoolLayer(convNet, paramsDict); } else if(_pool == "avg") { return *new AvgPoolLayer(convNet, paramsDict); } throw string("Unknown pooling layer type ") + _pool; } /* * ===================== * AvgPoolLayer * ===================== */ AvgPoolLayer::AvgPoolLayer(ConvNet* convNet, PyObject* paramsDict) : PoolLayer(convNet, paramsDict, false) { } void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler()); } void AvgPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalAvgUndo(v, _prev[0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1); } /* * ===================== * MaxPoolLayer * ===================== */ MaxPoolLayer::MaxPoolLayer(ConvNet* convNet, PyObject* paramsDict) : PoolLayer(convNet, paramsDict, false) { } void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler()); } void MaxPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalMaxUndo(_prev[0]->getActs(), v, getActs(), _prev[inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1); } /* * ===================== * NailbedLayer * ===================== */ NailbedLayer::NailbedLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputsX = pyDictGetInt(paramsDict, "outputsX"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); } void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1); } void NailbedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convBedOfNailsUndo(v, _prev[0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1); } /* * ===================== * GaussianBlurLayer * ===================== */ GaussianBlurLayer::GaussianBlurLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _hFilter = pyDictGetMatrix(paramsDict, "filter"); } void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1); convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1); } // This is here just for completeness' sake. Why would you backpropagate // through a blur filter? void GaussianBlurLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& tgt1 = _prev[0]->getRcvdBInputs() > 0 ? _actGradsTmp : _prev[0]->getActsGrad(); convGaussianBlur(v, _filter, tgt1, true, _channels, 0, 1); convGaussianBlur(tgt1, _filter, _prev[0]->getActsGrad(), false, _channels, scaleTargets, 1); } void GaussianBlurLayer::copyToGPU() { _filter.copyFromHost(*_hFilter, true); } /* * ===================== * ResizeLayer * ===================== */ ResizeLayer::ResizeLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); _tgtSize = pyDictGetInt(paramsDict, "tgtSize"); _scale = pyDictGetFloat(paramsDict, "scale"); } void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale); } // Can't do this void ResizeLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * RGBToYUVLayer * ===================== */ RGBToYUVLayer::RGBToYUVLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { } void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convRGBToYUV(*_inputs[0], getActs()); } // Can't do this void RGBToYUVLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * RGBToLABLayer * ===================== */ RGBToLABLayer::RGBToLABLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _center = pyDictGetInt(paramsDict, "center"); } void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convRGBToLAB(*_inputs[0], getActs(), _center); } // Can't do this void RGBToLABLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * ResponseNormLayer * ===================== */ ResponseNormLayer::ResponseNormLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _size = pyDictGetInt(paramsDict, "size"); _scale = pyDictGetFloat(paramsDict, "scale"); _pow = pyDictGetFloat(paramsDict, "pow"); } void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow); } void ResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); } void ResponseNormLayer::truncBwdActs() { Layer::truncBwdActs(); if (_conserveMem) { _denoms.truncate(); } } /* * ===================== * CrossMapResponseNormLayer * ===================== */ CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNet* convNet, PyObject* paramsDict) : ResponseNormLayer(convNet, paramsDict) { _blocked = pyDictGetInt(paramsDict, "blocked"); } void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormCrossMap(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _blocked); } void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormCrossMapUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, _blocked, scaleTargets, 1); } /* * ===================== * ContrastNormLayer * ===================== */ ContrastNormLayer::ContrastNormLayer(ConvNet* convNet, PyObject* paramsDict) : ResponseNormLayer(convNet, paramsDict) { _imgSize = pyDictGetInt(paramsDict, "imgSize"); } void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& images = *_inputs[0]; convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler()); _meanDiffs.add(images, -1, 1); convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow); } void ContrastNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); } void ContrastNormLayer::truncBwdActs() { ResponseNormLayer::truncBwdActs(); if (_conserveMem) { _meanDiffs.truncate(); } } /* * ===================== * CostLayer * ===================== */ CostLayer::CostLayer(ConvNet* convNet, PyObject* paramsDict, bool trans) : Layer(convNet, paramsDict, trans) { _coeff = pyDictGetFloat(paramsDict, "coeff"); } float CostLayer::getCoeff() { return _coeff; } void CostLayer::bprop(PASS_TYPE passType) { if (_coeff != 0) { Layer::bprop(passType); } } bool CostLayer::isGradProducer() { return _coeff != 0; } doublev& CostLayer::getCost() { doublev& v = *new doublev(); v.insert(v.begin(), _costv.begin(), _costv.end()); return v; } CostLayer& CostLayer::makeCostLayer(ConvNet* convNet, string& type, PyObject* paramsDict) { if (type == "cost.logreg") { return *new LogregCostLayer(convNet, paramsDict); } else if (type == "cost.sum2") { return *new SumOfSquaresCostLayer(convNet, paramsDict); } else if (type == "cost.eltlogreg") { return *new EltwiseLogregCostLayer(convNet, paramsDict); } else if (type == "cost.eltl2svm") { return *new EltwiseL2SVMCostLayer(convNet, paramsDict); } else if (type == "cost.loglikegauss") { return *new LoglikeGaussianCostLayer(convNet, paramsDict); } else if (type == "cost.ssvm") { return *new SSVMCostLayer(convNet, paramsDict); } else if (type == "cost.logistic") { return *new LogisticCostLayer(convNet, paramsDict); } else if (type == "cost.const") { return *new ConstCostLayer(convNet, paramsDict); } throw string("Unknown cost layer type ") + type; } /* * ===================== * LogregCostLayer * ===================== */ LogregCostLayer::LogregCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; int numCases = labels.getNumElements(); NVMatrix& trueLabelLogProbs = getActs(), correctProbs; computeLogregCost(labels, probs, trueLabelLogProbs, correctProbs); _costv.clear(); _costv.push_back(-trueLabelLogProbs.sum()); _costv.push_back(numCases - correctProbs.sum()); } } void LogregCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); NVMatrix& labels = _prev[0]->getActs(); NVMatrix& probs = _prev[1]->getActs(); NVMatrix& target = _prev[1]->getActsGrad(); // Numerical stability optimization: if the layer below me is a softmax layer, let it handle // the entire gradient computation to avoid multiplying and dividing by a near-zero quantity. bool doWork = _prev[1]->getNext().size() > 1 || _prev[1]->getType() != "softmax"; if (doWork) { computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff); } } /* * ===================== * SumOfSquaresCostLayer * ===================== */ SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { _inputs[0]->apply(NVMatrixOps::Square(), getActs()); _costv.clear(); _costv.push_back(getActs().sum()); } void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { _prev[inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -2 * _coeff); } /* * ===================== * EltwiseLogregCostLayer Note: This is logistic + y_i log x_i layer given input z_i, the output -logprob is -y_i log x_i where x_i = logistic(z_i) Combine these just for numerical consideration * ===================== */ EltwiseLogregCostLayer::EltwiseLogregCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } void EltwiseLogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // Do fprop once if (inpIdx == 0) { NVMatrix& indmap = *_inputs[0]; NVMatrix& predmap = *_inputs[1]; int numCases = indmap.getNumCols(); int numTasks = indmap.getNumRows(); NVMatrix& indlogpred = getActs(), correctprobs; computeEltwiseLogregCost(indmap, predmap, indlogpred, correctprobs); _costv.clear(); _costv.push_back(-indlogpred.sum()); _costv.push_back(numCases - correctprobs.sum() / numTasks); } } void EltwiseLogregCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); NVMatrix& indmap = _prev[0]->getActs(); NVMatrix& predmap = _prev[1]->getActs(); NVMatrix& target = _prev[1]->getActsGrad(); bool doWork = _prev[1]->getNext().size() > 0; assert( doWork );// Always do work computeEltwiseLogregGrad(indmap, predmap, target, scaleTargets == 1, _coeff); } /* * ===================== * EltwiseL2SVMCostLayer Calculate the following cost function 1/2 * _coeff * max(_a - y * (lables - _b), 0)^2 * ===================== */ EltwiseL2SVMCostLayer::EltwiseL2SVMCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { _a = pyDictGetFloat(paramsDict, "a"); _b = pyDictGetFloat(paramsDict, "b"); } void EltwiseL2SVMCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& y = *_inputs[1]; NVMatrix& pre_grad = getActs(), all_cost; int numCases = labels.getNumElements(); computeEltwiseL2SVMCost(labels, y, pre_grad, all_cost, _a, _b); _costv.clear(); _costv.push_back( all_cost.sum() * 0.5 / numCases); // without multiplied by _coeff } } void EltwiseL2SVMCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); NVMatrix& labels = _prev[0]->getActs(); NVMatrix& grad = _prev[1]->getActsGrad(); grad.resize(labels); computeEltwiseL2SVMGrad(labels, getActs(), grad, scaleTargets == 1, _b, _coeff); } LoglikeGaussianCostLayer:: LoglikeGaussianCostLayer(ConvNet* convNet, PyObject* paramsDict):CostLayer(convNet, paramsDict, false) { _use_ubound =(pyDictGetInt(paramsDict, "use_ubound")!=0); _use_lbound =(pyDictGetInt(paramsDict, "use_lbound")!=0); PyObject* pstring = Py_BuildValue("s", "close_form_freq"); if (PyDict_Contains(paramsDict, pstring)) { _close_form_freq = pyDictGetInt(paramsDict, "close_form_freq"); } else { _close_form_freq = -1; } Py_DECREF(pstring); pstring = Py_BuildValue("s", "use_log_weights"); if (PyDict_Contains(paramsDict, pstring)) { _use_log_weights = pyDictGetInt(paramsDict, "use_log_weights")!=0; } else { _use_log_weights = false; } Py_DECREF(pstring); printf("C close_form_freq = %d\n", _close_form_freq); _close_form_update_count = 0; if (_use_ubound) { floatv &ubound_list = *pyDictGetFloatV(paramsDict, "ubound"); _ubound = ubound_list[0]; delete &ubound_list; printf("Use upper bound, ubound = %.6f\n", _ubound); } else { _ubound = 0; } if (_use_lbound) { floatv &lbound_list = *pyDictGetFloatV(paramsDict, "lbound"); _lbound = lbound_list[0]; printf("Use lower bound, lbound = %.6f\n", _lbound); delete &lbound_list; } else { _lbound = 0; } // printf("Before getting the weights matrix\n"); MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights"); MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc"); floatv& momW = *pyDictGetFloatV(paramsDict, "momW"); floatv& epsW = *pyDictGetFloatV(paramsDict, "epsW"); floatv& wc = *pyDictGetFloatV(paramsDict, "wc"); // Source layers for shared weights intv& weightSourceLayerIndices = *pyDictGetIntV(paramsDict, "weightSourceLayerIndices"); // Weight matrix indices (inside the above source layers) for shared weights intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices"); //I copy from weightlayer, It is not good though for (int i = 0; i < weightSourceLayerIndices.size(); i++) { int srcLayerIdx = weightSourceLayerIndices[i]; int matrixIdx = weightSourceMatrixIndices[i]; if (srcLayerIdx == convNet->getNumLayers()) { // Current layer assert(0); // This should not happen in principle _weights.addWeights(*new Weights(_weights[matrixIdx], epsW[i])); } else if (srcLayerIdx >= 0) { assert(0); // This should not happen in principle // WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNet->getLayer(srcLayerIdx)); // Weights* srcWeights = &srcLayer.getWeights(matrixIdx); // _weights.addWeights(*new Weights(*srcWeights, epsW[i])); } else { _weights.addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], epsW[i], wc[i], momW[i], false)); } } delete &weightSourceLayerIndices; delete &weightSourceMatrixIndices; delete &hWeights; delete &hWeightsInc; delete &momW; delete &epsW; delete &wc; } void LoglikeGaussianCostLayer::updateWeights() { // For _use_log_weights WILL NOT AFFECT CLOSE FORM UPDATING if (_close_form_freq <= 0) { _weights.update(); } else { // If we update the sigma in close form, we copy the weights from getInc() // when time is right if (_close_form_update_count >= _close_form_freq) { for (int i = 0; i < _weights.getSize(); ++i) { _weights[i].getInc().copy(_weights[i].getW()); _weights[i].getW().scale(1.0/_close_form_update_count); } _close_form_update_count = 0; } } NVMatrix mask; if (_use_lbound) { _weights[0].getW().biggerThanScalar( _lbound, mask); _weights[0].getW().eltwiseMult(mask); _weights[0].getW().add(mask, -_lbound); _weights[0].getW().addScalar(_lbound); } if (_use_ubound) { _weights[0].getW().smallerThanScalar( _ubound, mask); _weights[0].getW().eltwiseMult(mask); _weights[0].getW().add(mask, -_ubound); _weights[0].getW().addScalar(_ubound); } printf("Max weights = %.6f, min weights = %.6f\n", _weights[0].getW().max(), \ _weights[0].getW().min()); // (Maybe modify nvmatrix.cu later). Fortunately, weights[0] will not be a large matrix // The computation should be acceptable } void LoglikeGaussianCostLayer::copyToCPU() { _weights.copyToCPU(); } void LoglikeGaussianCostLayer::copyToGPU() { _weights.copyToGPU(); } Weights& LoglikeGaussianCostLayer::getWeights(int idx) { assert( idx == 0); return _weights[idx]; } void LoglikeGaussianCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); float sqdiff_part, prior_part, loglike; _inputs[inpIdx]->apply(NVMatrixOps::Square(), getActs()); int num_data = getActs().getNumCols(); float init_act_sum = getActs().sum(); if (_use_log_weights) { NVMatrix exp_neg2weights; _weights[0].getW().scale(-2, exp_neg2weights); exp_neg2weights.apply(NVMatrixOps::Exp()); getActs().eltwiseMultByVector(exp_neg2weights); sqdiff_part = getActs().sum() * 0.5; prior_part = _weights[0].getW().sum() * num_data; } else { // NVMatrix result_mat; NVMatrix square_weight; _weights[0].getW().apply(NVMatrixOps::Square(), square_weight); // printf("\n=====Init act=%.6f\n", getActs().sum()); getActs().eltwiseDivideByVector(square_weight); square_weight.apply(NVMatrixOps::Log()); sqdiff_part = getActs().sum() * 0.5; prior_part = square_weight.sum()* num_data * 0.5; } loglike = sqdiff_part + prior_part; // printf("sqdiff part = %.6f, prior_part = %.6f\n", sqdiff_part, prior_part); // Keep consistent (with squarecost layer), I do the sum instead of mean _costv.clear(); _costv.push_back(loglike); _costv.push_back(init_act_sum); _costv.push_back(sqdiff_part); _costv.push_back(prior_part); } void LoglikeGaussianCostLayer::bpropCommon(NVMatrix& v, PASS_TYPE passType) { // v is ? for cost layer? // Don't need to use v anyway for (int i = 0; i < _weights.getSize(); i++) { if (_weights[i].getEps() > 0) { bpropWeights(v, i, passType); // Increment its number of updates _weights[i].incNumUpdates(); } } } void LoglikeGaussianCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); if (_use_log_weights) { NVMatrix exp_neg2weights, grads; _weights[0].getW().scale(-2, exp_neg2weights); exp_neg2weights.apply(NVMatrixOps::Exp()); _inputs[0]->eltwiseMultByVector(exp_neg2weights, grads); _prev[inpIdx]->getActsGrad().add(grads, scaleTargets, -_coeff); } else { NVMatrix square_weight, grads; _weights[0].getW().apply(NVMatrixOps::Square(), square_weight); _inputs[0]->eltwiseDivideByVector(square_weight, grads); // 2 is cancelled by 1/2 for each term _prev[inpIdx]->getActsGrad().add(grads, scaleTargets, -_coeff); } } void LoglikeGaussianCostLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { NVMatrix res; assert(inpIdx== 0); int ncases = getActs().getNumCols(); if (_close_form_freq <= 0) { NVMatrix res_avg; float scaleInc = (_weights[inpIdx].getNumUpdates() == 0 && passType != PASS_GC) * _weights[inpIdx].getMom(); float scaleGrads = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / ncases; if (_use_log_weights) { //getActs() == e^(-2t) * || y_i - \hat{y}_i ||^2 getActs().addScalar(-1,1,res); res.sum(1, res_avg); _weights[inpIdx].getInc().add(res_avg, scaleInc, -scaleGrads * _coeff); } else { getActs().addScalar(-1, 1, res); res.sum(1,res_avg); res_avg.eltwiseDivideByVector(_weights[0].getW()); _weights[inpIdx].getInc().add(res_avg, scaleInc, -scaleGrads * _coeff); } } else { //store variance in getInc() NVMatrix abs_inputs; NVMatrix var_inputs; _inputs[inpIdx]->apply(NVMatrixOps::Abs(), abs_inputs); abs_inputs.sum(1, var_inputs); if (_close_form_update_count == 0) { _weights[inpIdx].getInc().add(var_inputs, 0, 1); } else { _weights[inpIdx].getInc().add(var_inputs,1,1); } _close_form_update_count += ncases; printf("abs sum = %.6f\n", var_inputs.sum()); printf("close_form_update_count = %d\n", _close_form_update_count); } } /* * ===================== * SSVMCostLayer Calculate the following cost function max_y \delta(y,y_i) + <\Phi(x_i, y), w > - < \Phi(x_i, y_i),w> * ===================== */ SSVMCostLayer::SSVMCostLayer(ConvNet* convNet, PyObject* paramsDict):CostLayer(convNet, paramsDict, false) { _num_groups = pyDictGetInt(paramsDict, "groups"); } SSVMCostLayer::~SSVMCostLayer() { // pass } void SSVMCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // calculate the cost once if (inpIdx == 0) { // Only pick one max at the time NVMatrix& ind = *_inputs[0]; NVMatrix& pred = *_inputs[1]; NVMatrix act; int ndata = ind.getNumCols(); pred.copy(act); act.subtract(ind); _act_max_ind.resize(ind); _act_max_ind.apply(NVMatrixOps::Zero()); _act_max_value.resize(_num_groups, ndata); int num_data = ind.getNumCols(); // do sth here computeSSVMCost(ind, act, _act_max_ind, _act_max_value); _costv.clear(); _costv.push_back(_act_max_value.sum() + _num_groups * num_data); } } void SSVMCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); // because the pred is the second input NVMatrix& ind = *_inputs[0]; _act_max_ind.subtract(ind); _prev[inpIdx]->getActsGrad().add(_act_max_ind, scaleTargets, -_coeff); } /* * ===================== * LogisticCostLayer Calculate the following cost function log ( 1 + e^(ax) )/a * ===================== */ LogisticCostLayer::LogisticCostLayer(ConvNet* convNet, PyObject* paramsDict):CostLayer(convNet, paramsDict, false) { _a = pyDictGetFloat(paramsDict, "a"); _u = pyDictGetFloat(paramsDict, "u"); _neuron = new SoftReluNeuron(); } LogisticCostLayer::~LogisticCostLayer() { // pass } // Acts will store g = log ( 1 + e^(a(x-u))) // void LogisticCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); if (_u == 0 && _a == 1) { _neuron->activate( *_inputs[0], getActs()); } else { NVMatrix transformed_input; _inputs[0]->copy(transformed_input); if (_u != 0) { transformed_input.addScalar(-_u); } if (_a != 1) { transformed_input.scale(_a); } // I am sure softRelu will neverl use input _neuron->activate(transformed_input, getActs()); } _costv.clear(); _costv.push_back(getActs().sum()/_a); } void LogisticCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); v.resize(_inputs[0]->getNumRows(), _inputs[0]->getNumCols()); v.apply(NVMatrixOps::Zero()); v.addScalar(-_coeff); _neuron->computeInputGrad(v, _prev[0]->getActsGrad(), scaleTargets > 0); } /* * ===================== * ConstCostlayer Calculate the following cost function L(x) = x The gradient will always be one * ===================== */ ConstCostLayer::ConstCostLayer(ConvNet* convNet, PyObject* paramsDict):CostLayer(convNet, paramsDict, false) { } ConstCostLayer::~ConstCostLayer() { // pass } void ConstCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); _inputs[0]->copy(getActs()); _costv.clear(); _costv.push_back(getActs().sum()); } void ConstCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); v.resize(getActs().getNumRows(), getActs().getNumCols()); v.apply(NVMatrixOps::One()); _prev[0]->getActsGrad().add(v, scaleTargets > 0, -_coeff); }
1e674555121c815fedbd4933e9d2258b03903d22.cu
/* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <helper_cuda.h> //#include <cutil_inline.h> #include <iostream> #include <layer_kernels.cuh> #include <layer.cuh> #include <data.cuh> #include <util.cuh> #include <cudaconv2.cuh> #include <matrix.h> using namespace std; /* * ======================= * Layer * ======================= */ Layer::Layer(ConvNet* convNet, PyObject* paramsDict, bool trans) : _convNet(convNet), _trans(trans), _dropout_mask(trans) { _name = pyDictGetString(paramsDict, "name"); _type = pyDictGetString(paramsDict, "type"); _numGradProducersNext = 0; _foundGradConsumers = false; _gradConsumer = pyDictGetInt(paramsDict, "gradConsumer"); _actsTarget = pyDictGetInt(paramsDict, "actsTarget"); _actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget"); _conserveMem = pyDictGetInt(paramsDict, "conserveMem"); _dropout_prob = pyDictGetFloat(paramsDict, "dropout"); _outputs = _actsTarget < 0 ? new NVMatrix() : NULL; _actsGrad = _actsGradTarget < 0 ? new NVMatrix() : NULL; } void Layer::fpropNext(PASS_TYPE passType) { for (int i = 0; i < _next.size(); i++) { _next[i]->fprop(passType); } } void Layer::truncBwdActs() { // Only truncate actsGrad if I own it if (_conserveMem && _actsGradTarget < 0) { getActsGrad().truncate(); } if (_conserveMem) { getActs().truncate(); } } void Layer::fprop(PASS_TYPE passType) { _rcvdFInputs += 1; if (_rcvdFInputs == _prev.size()) { NVMatrixV v; for (int i = 0; i < _prev.size(); i++) { v.push_back(&_prev[i]->getActs()); } fprop(v, passType); } } void Layer::fprop(NVMatrix& v, PASS_TYPE passType) { NVMatrixV vl; vl.push_back(&v); fprop(vl, passType); } void Layer::fprop(NVMatrixV& v, PASS_TYPE passType) { assert(v.size() == _prev.size()); _inputs.clear(); _inputs.insert(_inputs.begin(), v.begin(), v.end()); _outputs = _actsTarget < 0 ? _outputs : _inputs[_actsTarget]; _rcvdFInputs = _prev.size(); for (NVMatrixV::iterator it = v.begin(); it != v.end(); ++it) { (*it)->transpose(_trans); } getActs().transpose(_trans); // First do fprop on the input whose acts matrix I'm sharing, if any if (_actsTarget >= 0) { fpropActs(_actsTarget, 0, passType); } // Then add the rest of the inputs to that for (int i = 0; i < _prev.size(); i++) { if (i != _actsTarget) { fpropActs(i, _actsTarget >= 0 || i > 0, passType); } } if (passType != PASS_TEST && _dropout_prob < 1.0 ) { _dropout_mask.resize(getActs().getNumRows(), getActs().getNumCols()); _dropout_mask.randomizeUniform(); _dropout_mask.smallerThanScalar(_dropout_prob); getActs().eltwiseMult(_dropout_mask); } if (passType == PASS_TEST && _dropout_prob < 1.0) { getActs().scale(_dropout_prob); } fpropNext(passType); } void Layer::bprop(PASS_TYPE passType) { if (_rcvdBInputs == _numGradProducersNext) { _rcvdBInputs++; // avoid doing bprop computation twice bprop(getActsGrad(), passType); } } void Layer::bprop(NVMatrix& v, PASS_TYPE passType) { v.transpose(_trans); for (int i = 0; i < _prev.size(); i++) { _prev[i]->getActs().transpose(_trans); _prev[i]->getActsGrad().transpose(_trans); } getActs().transpose(_trans); if ( passType != PASS_TEST && _dropout_prob < 1.0) { // passType will never be PASS_TEST Here v.eltwiseMult( _dropout_mask ); } bpropCommon(v, passType); if (isGradProducer()) { // First propagate activity gradient to all layers whose activity // gradient matrix I'm definitely not sharing. for (int i = 0; i < _prev.size(); i++) { if (_prev[i]->isGradConsumer() && _actsGradTarget != i) { bpropActs(v, i, _prev[i]->getRcvdBInputs() > 0 ? 1 : 0, passType); _prev[i]->incRcvdBInputs(); } } // Then propagate activity gradient to the layer whose activity gradient // matrix I'm sharing, if any. if (_actsGradTarget >= 0 && _prev[_actsGradTarget]->isGradConsumer()) { bpropActs(v, _actsGradTarget, _prev[_actsGradTarget]->getRcvdBInputs() > 0 ? 1 : 0, passType); _prev[_actsGradTarget]->incRcvdBInputs(); } } truncBwdActs(); if (isGradProducer()) { for (int i = 0; i < _prev.size(); i++) { if (_prev[i]->isGradConsumer()) { _prev[i]->bprop(passType); } } } } void Layer::reset() { _rcvdFInputs = 0; _rcvdBInputs = 0; } string& Layer::getName() { return _name; } string& Layer::getType() { return _type; } int Layer::getRcvdFInputs() { return _rcvdFInputs; } int Layer::getRcvdBInputs() { return _rcvdBInputs; } int Layer::incRcvdBInputs() { return ++_rcvdBInputs; } void Layer::addNext(Layer* l) { _next.push_back(l); _numGradProducersNext += l->isGradProducer(); } void Layer::addPrev(Layer* l) { _prev.push_back(l); } void Layer::postInit() { // _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs(); _actsGrad = _actsGradTarget < 0 ? new NVMatrix() : &_prev[_actsGradTarget]->getActsGrad(); } // Does this layer, or some layer below it, need the gradient // for parameter updates? // Only weight layers should be grad consumers themselves. bool Layer::isGradConsumer() { if (!_foundGradConsumers) { for (int i = 0; i < _prev.size(); i++) { _gradConsumer |= _prev[i]->isGradConsumer(); } _foundGradConsumers = true; } return _gradConsumer; } // Does this layer produce gradient for layers below? bool Layer::isGradProducer() { return true; } vector<Layer*>& Layer::getPrev() { return _prev; } vector<Layer*>& Layer::getNext() { return _next; } NVMatrix& Layer::getActs() { assert(_outputs != NULL); return *_outputs; } NVMatrix& Layer::getActsGrad() { assert(_actsGrad != NULL); return *_actsGrad; } /* * ======================= * NeuronLayer * ======================= */ NeuronLayer::NeuronLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, true) { _neuron = &Neuron::makeNeuron(PyDict_GetItemString(paramsDict, "neuron")); } void NeuronLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { _neuron->computeInputGrad(v, _prev[0]->getActsGrad(), scaleTargets > 0); } void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { _neuron->activate(*_inputs[0], getActs()); } /* * ======================= * WeightLayer * ======================= */ WeightLayer::WeightLayer(ConvNet* convNet, PyObject* paramsDict, bool trans, bool useGrad) : Layer(convNet, paramsDict, trans) { MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights"); MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc"); Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases"); Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc"); floatv& momW = *pyDictGetFloatV(paramsDict, "momW"); float momB = pyDictGetFloat(paramsDict, "momB"); floatv& epsW = *pyDictGetFloatV(paramsDict, "epsW"); float epsB = pyDictGetFloat(paramsDict, "epsB"); floatv& wc = *pyDictGetFloatV(paramsDict, "wc"); // Source layers for shared weights intv& weightSourceLayerIndices = *pyDictGetIntV(paramsDict, "weightSourceLayerIndices"); // Weight matrix indices (inside the above source layers) for shared weights intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices"); for (int i = 0; i < weightSourceLayerIndices.size(); i++) { int srcLayerIdx = weightSourceLayerIndices[i]; int matrixIdx = weightSourceMatrixIndices[i]; if (srcLayerIdx == convNet->getNumLayers()) { // Current layer _weights.addWeights(*new Weights(_weights[matrixIdx], epsW[i])); } else if (srcLayerIdx >= 0) { WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNet->getLayer(srcLayerIdx)); Weights* srcWeights = &srcLayer.getWeights(matrixIdx); _weights.addWeights(*new Weights(*srcWeights, epsW[i])); } else { _weights.addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], epsW[i], wc[i], momW[i], useGrad)); } } _biases = new Weights(hBiases, hBiasesInc, epsB, 0, momB, true); // Epsilons for finite-difference gradient checking operation _wStep = 0.001; _bStep = 0.002; delete &weightSourceLayerIndices; delete &weightSourceMatrixIndices; delete &hWeights; delete &hWeightsInc; delete &momW; delete &epsW; delete &wc; } void WeightLayer::bpropCommon(NVMatrix& v, PASS_TYPE passType) { if (_biases->getEps() > 0) { bpropBiases(v, passType); } for (int i = 0; i < _weights.getSize(); i++) { if (_weights[i].getEps() > 0) { bpropWeights(v, i, passType); // Increment its number of updates _weights[i].incNumUpdates(); } } } void WeightLayer::updateWeights() { _weights.update(); _biases->update(); } void WeightLayer::copyToCPU() { _weights.copyToCPU(); _biases->copyToCPU(); } void WeightLayer::copyToGPU() { _weights.copyToGPU(); _biases->copyToGPU(); } void WeightLayer::checkGradients() { for (int i = 0; i < _weights.getSize(); i++) { _convNet->checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights[i]); } _convNet->checkGradient(_name + " biases", _bStep, *_biases); } Weights& WeightLayer::getWeights(int idx) { return _weights[idx]; } /* * ======================= * FCLayer * ======================= */ FCLayer::FCLayer(ConvNet* convNet, PyObject* paramsDict) : WeightLayer(convNet, paramsDict, true, false) { _wStep = 0.1; _bStep = 0.01; } void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { getActs().addProduct(*_inputs[inpIdx], *_weights[inpIdx], scaleTargets, 1); if (scaleTargets == 0) { getActs().addVector(_biases->getW()); } } void FCLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& weights_T = _weights[inpIdx].getW().getTranspose(); _prev[inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1); delete &weights_T; } void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { int numCases = v.getNumRows(); float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases; _biases->getGrad().addSum(v, 0, 0, scaleBGrad); } void FCLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { int numCases = v.getNumRows(); NVMatrix& prevActs_T = _prev[inpIdx]->getActs().getTranspose(); float scaleInc = (_weights[inpIdx].getNumUpdates() == 0 && passType != PASS_GC) * _weights[inpIdx].getMom(); float scaleGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; _weights[inpIdx].getInc().addProduct(prevActs_T, v, scaleInc, scaleGrad); delete &prevActs_T; } /* * ======================= * LocalLayer * ======================= */ LocalLayer::LocalLayer(ConvNet* convNet, PyObject* paramsDict, bool useGrad) : WeightLayer(convNet, paramsDict, false, useGrad) { _padding = pyDictGetIntV(paramsDict, "padding"); _stride = pyDictGetIntV(paramsDict, "stride"); _filterSize = pyDictGetIntV(paramsDict, "filterSize"); _channels = pyDictGetIntV(paramsDict, "channels"); _imgSize = pyDictGetIntV(paramsDict, "imgSize"); _numFilters = pyDictGetInt(paramsDict, "filters"); _groups = pyDictGetIntV(paramsDict, "groups"); _filterChannels = pyDictGetIntV(paramsDict, "filterChannels"); _randSparse = pyDictGetIntV(paramsDict, "randSparse"); _overSample = pyDictGetIntV(paramsDict, "overSample"); _filterPixels = pyDictGetIntV(paramsDict, "filterPixels"); _imgPixels = pyDictGetIntV(paramsDict, "imgPixels"); _modulesX = pyDictGetInt(paramsDict, "modulesX"); _modules = pyDictGetInt(paramsDict, "modules"); // It's a vector on the heap to be consistent with all the others... _filterConns = new vector<FilterConns>(); PyObject* pyFilterConns = PyDict_GetItemString(paramsDict, "filterConns"); for (int i = 0; i < _randSparse->size(); i++) { FilterConns fc; if (_randSparse->at(i)) { fc.hFilterConns = getIntA(PyList_GET_ITEM(pyFilterConns, i)); } _filterConns->push_back(fc); } } void LocalLayer::copyToGPU() { WeightLayer::copyToGPU(); for (int i = 0; i < _prev.size(); i++) { if (_randSparse->at(i)) { // Copy to GPU vector that describes sparse random connectivity cudaMalloc(&_filterConns->at(i).dFilterConns, sizeof(int) * _groups->at(i) * _filterChannels->at(i)); cudaMemcpy(_filterConns->at(i).dFilterConns, _filterConns->at(i).hFilterConns, sizeof(int) * _groups->at(i) * _filterChannels->at(i), cudaMemcpyHostToDevice); getLastCudaError("cudaMemcpy: failed"); } } } /* * ======================= * ConvLayer * ======================= */ ConvLayer::ConvLayer(ConvNet* convNet, PyObject* paramsDict) : LocalLayer(convNet, paramsDict, true) { _partialSum = pyDictGetInt(paramsDict, "partialSum"); _sharedBiases = pyDictGetInt(paramsDict, "sharedBiases"); } void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { convFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } else { convFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } if (scaleTargets == 0) { if (_sharedBiases) { getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters); getActs().addVector(_biases->getW()); getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules)); } else { getActs().addVector(_biases->getW()); } } } void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { int numCases = v.getNumCols(); float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases; if (_sharedBiases) { v.reshape(_numFilters, v.getNumElements() / _numFilters); _biases->getGrad().addSum(v, 1, 0, scaleBGrad); v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules)); } else { _biases->getGrad().addSum(v, 1, 0, scaleBGrad); } } void ConvLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { int numCases = v.getNumCols(); NVMatrix& tgt = _partialSum > 0 ? _weightGradTmp : _weights[inpIdx].getGrad(); float scaleWGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; float scaleTargets = _weights[inpIdx].getNumUpdates() > 0 && _partialSum == 0; // ? 1 : 0; if (_randSparse->at(inpIdx)) { convWeightActsSparse(_prev[inpIdx]->getActs(), v, tgt, _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad); } else { convWeightActs(_prev[inpIdx]->getActs(), v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad); } if (_partialSum > 0) { scaleTargets = _weights[inpIdx].getNumUpdates() > 0; _weightGradTmp.reshape(_modules / _partialSum, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters); _weights[inpIdx].getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1); _weights[inpIdx].getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters); } } void ConvLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { NVMatrix& tgt = _overSample->at(inpIdx) > 1 ? _actGradTmp : _prev[inpIdx]->getActsGrad(); convImgActsSparse(v, *_weights[inpIdx], tgt, _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); if (_overSample->at(inpIdx) > 1) { _actGradTmp.reshape(_overSample->at(inpIdx), _actGradTmp.getNumElements() / _overSample->at(inpIdx)); _actGradTmp.sum(0, _prev[inpIdx]->getActsGrad()); _prev[inpIdx]->getActsGrad().reshape(_prev[inpIdx]->getActsGrad().getNumElements() / v.getNumCols(), v.getNumCols()); } } else { convImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } } void ConvLayer::truncBwdActs() { LocalLayer::truncBwdActs(); if (_conserveMem) { _weightGradTmp.truncate(); _actGradTmp.truncate(); } } /* * ======================= * LocalUnsharedLayer * ======================= */ LocalUnsharedLayer::LocalUnsharedLayer(ConvNet* convNet, PyObject* paramsDict) : LocalLayer(convNet, paramsDict, false) { } void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { localFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } else { localFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } if (scaleTargets == 0) { getActs().addVector(_biases->getW()); } } void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) { int numCases = v.getNumCols(); float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases; _biases->getGrad().addSum(v, 1, 0, scaleBGrad); } void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { int numCases = v.getNumCols(); float scaleInc = (passType != PASS_GC && _weights[inpIdx].getNumUpdates() == 0) * _weights[inpIdx].getMom(); // momentum float scaleWGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; // eps / numCases if (_randSparse->at(inpIdx)) { localWeightActsSparse(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getInc(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad); } else { localWeightActs(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getInc(), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad); } } void LocalUnsharedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_randSparse->at(inpIdx)) { localImgActsSparse(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } else { localImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1); } } /* * ======================= * SoftmaxLayer * ======================= */ SoftmaxLayer::SoftmaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, true) { } void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& input = *_inputs[0]; NVMatrix& max = input.max(1); input.addVector(max, -1, getActs()); getActs().apply(NVMatrixOps::Exp()); NVMatrix& sum = getActs().sum(1); getActs().eltwiseDivideByVector(sum); delete &max; delete &sum; } void SoftmaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); bool doLogregGrad = _next.size() == 1 && _next[0]->getType() == "cost.logreg"; if (doLogregGrad) { NVMatrix& labels = _next[0]->getPrev()[0]->getActs(); float gradCoeff = dynamic_cast<CostLayer*>(_next[0])->getCoeff(); computeLogregSoftmaxGrad(labels, getActs(), _prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff); } else { computeSoftmaxGrad(getActs(), v, _prev[0]->getActsGrad(), scaleTargets == 1); } } /* * ======================= * SliceLayer * ======================= */ SliceLayer::SliceLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _startX = pyDictGetInt(paramsDict, "startX"); _endX = pyDictGetInt(paramsDict, "endX"); } void SliceLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (_outputs != NULL) { delete _outputs; // } assert(inpIdx == 0); // only one input can be accepted for now _outputs = &(_inputs[inpIdx]->slice(_startX, _endX + 1, 0, -1)); } SliceLayer::~SliceLayer() { if (_outputs != NULL) { delete _outputs; } } void SliceLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { // previous layer must use act _prev[inpIdx]->getActsGrad().resize(_prev[inpIdx]->getActs()); NVMatrix &sliced_actsgrads = (_prev[inpIdx]->getActsGrad()).slice(_startX, _endX + 1,0,-1); // a view, not copy if (scaleTargets == 0) { _prev[inpIdx]->getActsGrad().inRangeInc(1,0); // set all to zero sliced_actsgrads.add(v); } else { sliced_actsgrads.add(v); } delete &sliced_actsgrads; } /* * ======================= * ConcatenationLayer * ======================= */ ConcatenationLayer::ConcatenationLayer(ConvNet* convnet, PyObject* paramsDict) : Layer(convnet, paramsDict,false) { _numOutputs = pyDictGetInt(paramsDict, "outputs"); _copyOffsets = pyDictGetIntV(paramsDict, "copyOffsets"); _copyOffsets->push_back(_numOutputs); } ConcatenationLayer::~ConcatenationLayer() { delete _copyOffsets; } void ConcatenationLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { getActs().resize(_numOutputs, _inputs[inpIdx]->getNumCols()); _inputs[inpIdx]->copy(getActs(), 0, -1, 0, -1, _copyOffsets->at(inpIdx), 0); } void ConcatenationLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& copySrc = v.sliceRows(_copyOffsets->at(inpIdx), _copyOffsets->at(inpIdx + 1)); // view _prev[inpIdx]->getActsGrad().add(copySrc, scaleTargets, 1); delete &copySrc; } /* * ======================= * EltwiseSumLayer * ======================= */ EltwiseSumLayer::EltwiseSumLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _coeffs = pyDictGetFloatV(paramsDict, "coeffs"); } void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (scaleTargets == 0) { _inputs[inpIdx]->scale(_coeffs->at(inpIdx), getActs()); } else { getActs().add(*_inputs[inpIdx], _coeffs->at(inpIdx)); } } void EltwiseSumLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (scaleTargets == 0 ) { v.scale(_coeffs->at(inpIdx), _prev[inpIdx]->getActsGrad()); } else { assert(&_prev[inpIdx]->getActsGrad() != &v); _prev[inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx)); } } /* * ======================= * EltwiseMulLayer * ======================= */ EltwiseMulLayer::EltwiseMulLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { } void EltwiseMulLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (scaleTargets == 0) { // input ==> getActs() _inputs[inpIdx]->copy(getActs()); } else { getActs().eltwiseMult(*_inputs[inpIdx]); } } void EltwiseMulLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { if (scaleTargets == 0) { // The number of input is exactly two v.eltwiseMult(_prev[1 - inpIdx]->getActs(), _prev[inpIdx]->getActsGrad()); } else { NVMatrix temp_grads; v.eltwiseMult(_prev[1 - inpIdx]->getActs(), temp_grads); _prev[inpIdx]->getActsGrad().add(temp_grads); } } /* * ======================= * EltwiseMaxLayer * ======================= */ EltwiseMaxLayer::EltwiseMaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { } void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (inpIdx == 1) { // First input, do nothing _inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs()); } else if (inpIdx > 1) { getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]); } } void EltwiseMaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[inpIdx]->getActsGrad(), scaleTargets != 0); } /* * ======================= * ForwardLayer * ======================= */ ForwardLayer::ForwardLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _random_type = pyDictGetString(paramsDict, "randomtype"); _pass_gradients = pyDictGetInt(paramsDict, "passgradients"); if (_pass_gradients != 0) { _pass_gradients= 1; } _add_noise = false; if (_random_type == "gauss") { float mean = pyDictGetFloat(paramsDict, "mean"); float sigma = pyDictGetFloat(paramsDict, "sigma"); _params.push_back(mean); _params.push_back(sigma); _add_noise = true; } else { assert(_random_type == "none"); } } void ForwardLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx==0); _inputs[inpIdx]->copy(getActs()); // only process gaussian noise now if (_random_type == "gauss") { getActs().addGaussianNoise(_params[1]); if (_params[0] != 0) { getActs().addScalar(_params[0]); } } } void ForwardLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { _prev[inpIdx]->getActsGrad().add(v, scaleTargets, _pass_gradients); } void ForwardLayer::set_params(const float* params, int len) { _params.clear(); for (int i = 0; i < len; ++i) { _params.push_back(params[i]); } } /* * ======================= * DataLayer * ======================= */ DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _dataIdx = pyDictGetInt(paramsDict, "dataIdx"); } void DataLayer::fprop(PASS_TYPE passType) { throw string("No dava given!"); } void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { } void DataLayer::fprop(NVMatrixV& data, PASS_TYPE passType) { _outputs = data[_dataIdx]; fpropNext(passType); } bool DataLayer::isGradProducer() { return false; } /* * ===================== * PoolLayer * ===================== */ PoolLayer::PoolLayer(ConvNet* convNet, PyObject* paramsDict, bool trans) : Layer(convNet, paramsDict, trans) { _channels = pyDictGetInt(paramsDict, "channels"); _sizeX = pyDictGetInt(paramsDict, "sizeX"); _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputsX = pyDictGetInt(paramsDict, "outputsX"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); _pool = pyDictGetString(paramsDict, "pool"); } PoolLayer& PoolLayer::makePoolLayer(ConvNet* convNet, PyObject* paramsDict) { string _pool = pyDictGetString(paramsDict, "pool"); if (_pool == "max") { return *new MaxPoolLayer(convNet, paramsDict); } else if(_pool == "avg") { return *new AvgPoolLayer(convNet, paramsDict); } throw string("Unknown pooling layer type ") + _pool; } /* * ===================== * AvgPoolLayer * ===================== */ AvgPoolLayer::AvgPoolLayer(ConvNet* convNet, PyObject* paramsDict) : PoolLayer(convNet, paramsDict, false) { } void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler()); } void AvgPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalAvgUndo(v, _prev[0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1); } /* * ===================== * MaxPoolLayer * ===================== */ MaxPoolLayer::MaxPoolLayer(ConvNet* convNet, PyObject* paramsDict) : PoolLayer(convNet, paramsDict, false) { } void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler()); } void MaxPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convLocalMaxUndo(_prev[0]->getActs(), v, getActs(), _prev[inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1); } /* * ===================== * NailbedLayer * ===================== */ NailbedLayer::NailbedLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _start = pyDictGetInt(paramsDict, "start"); _stride = pyDictGetInt(paramsDict, "stride"); _outputsX = pyDictGetInt(paramsDict, "outputsX"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); } void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1); } void NailbedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convBedOfNailsUndo(v, _prev[0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1); } /* * ===================== * GaussianBlurLayer * ===================== */ GaussianBlurLayer::GaussianBlurLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _hFilter = pyDictGetMatrix(paramsDict, "filter"); } void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1); convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1); } // This is here just for completeness' sake. Why would you backpropagate // through a blur filter? void GaussianBlurLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& tgt1 = _prev[0]->getRcvdBInputs() > 0 ? _actGradsTmp : _prev[0]->getActsGrad(); convGaussianBlur(v, _filter, tgt1, true, _channels, 0, 1); convGaussianBlur(tgt1, _filter, _prev[0]->getActsGrad(), false, _channels, scaleTargets, 1); } void GaussianBlurLayer::copyToGPU() { _filter.copyFromHost(*_hFilter, true); } /* * ===================== * ResizeLayer * ===================== */ ResizeLayer::ResizeLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _imgSize = pyDictGetInt(paramsDict, "imgSize"); _tgtSize = pyDictGetInt(paramsDict, "tgtSize"); _scale = pyDictGetFloat(paramsDict, "scale"); } void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale); } // Can't do this void ResizeLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * RGBToYUVLayer * ===================== */ RGBToYUVLayer::RGBToYUVLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { } void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convRGBToYUV(*_inputs[0], getActs()); } // Can't do this void RGBToYUVLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * RGBToLABLayer * ===================== */ RGBToLABLayer::RGBToLABLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _center = pyDictGetInt(paramsDict, "center"); } void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convRGBToLAB(*_inputs[0], getActs(), _center); } // Can't do this void RGBToLABLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(false); } /* * ===================== * ResponseNormLayer * ===================== */ ResponseNormLayer::ResponseNormLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) { _channels = pyDictGetInt(paramsDict, "channels"); _size = pyDictGetInt(paramsDict, "size"); _scale = pyDictGetFloat(paramsDict, "scale"); _pow = pyDictGetFloat(paramsDict, "pow"); } void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow); } void ResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); } void ResponseNormLayer::truncBwdActs() { Layer::truncBwdActs(); if (_conserveMem) { _denoms.truncate(); } } /* * ===================== * CrossMapResponseNormLayer * ===================== */ CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNet* convNet, PyObject* paramsDict) : ResponseNormLayer(convNet, paramsDict) { _blocked = pyDictGetInt(paramsDict, "blocked"); } void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormCrossMap(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _blocked); } void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convResponseNormCrossMapUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, _blocked, scaleTargets, 1); } /* * ===================== * ContrastNormLayer * ===================== */ ContrastNormLayer::ContrastNormLayer(ConvNet* convNet, PyObject* paramsDict) : ResponseNormLayer(convNet, paramsDict) { _imgSize = pyDictGetInt(paramsDict, "imgSize"); } void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { NVMatrix& images = *_inputs[0]; convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler()); _meanDiffs.add(images, -1, 1); convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow); } void ContrastNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); } void ContrastNormLayer::truncBwdActs() { ResponseNormLayer::truncBwdActs(); if (_conserveMem) { _meanDiffs.truncate(); } } /* * ===================== * CostLayer * ===================== */ CostLayer::CostLayer(ConvNet* convNet, PyObject* paramsDict, bool trans) : Layer(convNet, paramsDict, trans) { _coeff = pyDictGetFloat(paramsDict, "coeff"); } float CostLayer::getCoeff() { return _coeff; } void CostLayer::bprop(PASS_TYPE passType) { if (_coeff != 0) { Layer::bprop(passType); } } bool CostLayer::isGradProducer() { return _coeff != 0; } doublev& CostLayer::getCost() { doublev& v = *new doublev(); v.insert(v.begin(), _costv.begin(), _costv.end()); return v; } CostLayer& CostLayer::makeCostLayer(ConvNet* convNet, string& type, PyObject* paramsDict) { if (type == "cost.logreg") { return *new LogregCostLayer(convNet, paramsDict); } else if (type == "cost.sum2") { return *new SumOfSquaresCostLayer(convNet, paramsDict); } else if (type == "cost.eltlogreg") { return *new EltwiseLogregCostLayer(convNet, paramsDict); } else if (type == "cost.eltl2svm") { return *new EltwiseL2SVMCostLayer(convNet, paramsDict); } else if (type == "cost.loglikegauss") { return *new LoglikeGaussianCostLayer(convNet, paramsDict); } else if (type == "cost.ssvm") { return *new SSVMCostLayer(convNet, paramsDict); } else if (type == "cost.logistic") { return *new LogisticCostLayer(convNet, paramsDict); } else if (type == "cost.const") { return *new ConstCostLayer(convNet, paramsDict); } throw string("Unknown cost layer type ") + type; } /* * ===================== * LogregCostLayer * ===================== */ LogregCostLayer::LogregCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // This layer uses its two inputs together if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& probs = *_inputs[1]; int numCases = labels.getNumElements(); NVMatrix& trueLabelLogProbs = getActs(), correctProbs; computeLogregCost(labels, probs, trueLabelLogProbs, correctProbs); _costv.clear(); _costv.push_back(-trueLabelLogProbs.sum()); _costv.push_back(numCases - correctProbs.sum()); } } void LogregCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); NVMatrix& labels = _prev[0]->getActs(); NVMatrix& probs = _prev[1]->getActs(); NVMatrix& target = _prev[1]->getActsGrad(); // Numerical stability optimization: if the layer below me is a softmax layer, let it handle // the entire gradient computation to avoid multiplying and dividing by a near-zero quantity. bool doWork = _prev[1]->getNext().size() > 1 || _prev[1]->getType() != "softmax"; if (doWork) { computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff); } } /* * ===================== * SumOfSquaresCostLayer * ===================== */ SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { _inputs[0]->apply(NVMatrixOps::Square(), getActs()); _costv.clear(); _costv.push_back(getActs().sum()); } void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { _prev[inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -2 * _coeff); } /* * ===================== * EltwiseLogregCostLayer Note: This is logistic + y_i log x_i layer given input z_i, the output -logprob is -y_i log x_i where x_i = logistic(z_i) Combine these just for numerical consideration * ===================== */ EltwiseLogregCostLayer::EltwiseLogregCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { } void EltwiseLogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // Do fprop once if (inpIdx == 0) { NVMatrix& indmap = *_inputs[0]; NVMatrix& predmap = *_inputs[1]; int numCases = indmap.getNumCols(); int numTasks = indmap.getNumRows(); NVMatrix& indlogpred = getActs(), correctprobs; computeEltwiseLogregCost(indmap, predmap, indlogpred, correctprobs); _costv.clear(); _costv.push_back(-indlogpred.sum()); _costv.push_back(numCases - correctprobs.sum() / numTasks); } } void EltwiseLogregCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); NVMatrix& indmap = _prev[0]->getActs(); NVMatrix& predmap = _prev[1]->getActs(); NVMatrix& target = _prev[1]->getActsGrad(); bool doWork = _prev[1]->getNext().size() > 0; assert( doWork );// Always do work computeEltwiseLogregGrad(indmap, predmap, target, scaleTargets == 1, _coeff); } /* * ===================== * EltwiseL2SVMCostLayer Calculate the following cost function 1/2 * _coeff * max(_a - y * (lables - _b), 0)^2 * ===================== */ EltwiseL2SVMCostLayer::EltwiseL2SVMCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) { _a = pyDictGetFloat(paramsDict, "a"); _b = pyDictGetFloat(paramsDict, "b"); } void EltwiseL2SVMCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { if (inpIdx == 0) { NVMatrix& labels = *_inputs[0]; NVMatrix& y = *_inputs[1]; NVMatrix& pre_grad = getActs(), all_cost; int numCases = labels.getNumElements(); computeEltwiseL2SVMCost(labels, y, pre_grad, all_cost, _a, _b); _costv.clear(); _costv.push_back( all_cost.sum() * 0.5 / numCases); // without multiplied by _coeff } } void EltwiseL2SVMCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); NVMatrix& labels = _prev[0]->getActs(); NVMatrix& grad = _prev[1]->getActsGrad(); grad.resize(labels); computeEltwiseL2SVMGrad(labels, getActs(), grad, scaleTargets == 1, _b, _coeff); } LoglikeGaussianCostLayer:: LoglikeGaussianCostLayer(ConvNet* convNet, PyObject* paramsDict):CostLayer(convNet, paramsDict, false) { _use_ubound =(pyDictGetInt(paramsDict, "use_ubound")!=0); _use_lbound =(pyDictGetInt(paramsDict, "use_lbound")!=0); PyObject* pstring = Py_BuildValue("s", "close_form_freq"); if (PyDict_Contains(paramsDict, pstring)) { _close_form_freq = pyDictGetInt(paramsDict, "close_form_freq"); } else { _close_form_freq = -1; } Py_DECREF(pstring); pstring = Py_BuildValue("s", "use_log_weights"); if (PyDict_Contains(paramsDict, pstring)) { _use_log_weights = pyDictGetInt(paramsDict, "use_log_weights")!=0; } else { _use_log_weights = false; } Py_DECREF(pstring); printf("C close_form_freq = %d\n", _close_form_freq); _close_form_update_count = 0; if (_use_ubound) { floatv &ubound_list = *pyDictGetFloatV(paramsDict, "ubound"); _ubound = ubound_list[0]; delete &ubound_list; printf("Use upper bound, ubound = %.6f\n", _ubound); } else { _ubound = 0; } if (_use_lbound) { floatv &lbound_list = *pyDictGetFloatV(paramsDict, "lbound"); _lbound = lbound_list[0]; printf("Use lower bound, lbound = %.6f\n", _lbound); delete &lbound_list; } else { _lbound = 0; } // printf("Before getting the weights matrix\n"); MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights"); MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc"); floatv& momW = *pyDictGetFloatV(paramsDict, "momW"); floatv& epsW = *pyDictGetFloatV(paramsDict, "epsW"); floatv& wc = *pyDictGetFloatV(paramsDict, "wc"); // Source layers for shared weights intv& weightSourceLayerIndices = *pyDictGetIntV(paramsDict, "weightSourceLayerIndices"); // Weight matrix indices (inside the above source layers) for shared weights intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices"); //I copy from weightlayer, It is not good though for (int i = 0; i < weightSourceLayerIndices.size(); i++) { int srcLayerIdx = weightSourceLayerIndices[i]; int matrixIdx = weightSourceMatrixIndices[i]; if (srcLayerIdx == convNet->getNumLayers()) { // Current layer assert(0); // This should not happen in principle _weights.addWeights(*new Weights(_weights[matrixIdx], epsW[i])); } else if (srcLayerIdx >= 0) { assert(0); // This should not happen in principle // WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNet->getLayer(srcLayerIdx)); // Weights* srcWeights = &srcLayer.getWeights(matrixIdx); // _weights.addWeights(*new Weights(*srcWeights, epsW[i])); } else { _weights.addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], epsW[i], wc[i], momW[i], false)); } } delete &weightSourceLayerIndices; delete &weightSourceMatrixIndices; delete &hWeights; delete &hWeightsInc; delete &momW; delete &epsW; delete &wc; } void LoglikeGaussianCostLayer::updateWeights() { // For _use_log_weights WILL NOT AFFECT CLOSE FORM UPDATING if (_close_form_freq <= 0) { _weights.update(); } else { // If we update the sigma in close form, we copy the weights from getInc() // when time is right if (_close_form_update_count >= _close_form_freq) { for (int i = 0; i < _weights.getSize(); ++i) { _weights[i].getInc().copy(_weights[i].getW()); _weights[i].getW().scale(1.0/_close_form_update_count); } _close_form_update_count = 0; } } NVMatrix mask; if (_use_lbound) { _weights[0].getW().biggerThanScalar( _lbound, mask); _weights[0].getW().eltwiseMult(mask); _weights[0].getW().add(mask, -_lbound); _weights[0].getW().addScalar(_lbound); } if (_use_ubound) { _weights[0].getW().smallerThanScalar( _ubound, mask); _weights[0].getW().eltwiseMult(mask); _weights[0].getW().add(mask, -_ubound); _weights[0].getW().addScalar(_ubound); } printf("Max weights = %.6f, min weights = %.6f\n", _weights[0].getW().max(), \ _weights[0].getW().min()); // (Maybe modify nvmatrix.cu later). Fortunately, weights[0] will not be a large matrix // The computation should be acceptable } void LoglikeGaussianCostLayer::copyToCPU() { _weights.copyToCPU(); } void LoglikeGaussianCostLayer::copyToGPU() { _weights.copyToGPU(); } Weights& LoglikeGaussianCostLayer::getWeights(int idx) { assert( idx == 0); return _weights[idx]; } void LoglikeGaussianCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); float sqdiff_part, prior_part, loglike; _inputs[inpIdx]->apply(NVMatrixOps::Square(), getActs()); int num_data = getActs().getNumCols(); float init_act_sum = getActs().sum(); if (_use_log_weights) { NVMatrix exp_neg2weights; _weights[0].getW().scale(-2, exp_neg2weights); exp_neg2weights.apply(NVMatrixOps::Exp()); getActs().eltwiseMultByVector(exp_neg2weights); sqdiff_part = getActs().sum() * 0.5; prior_part = _weights[0].getW().sum() * num_data; } else { // NVMatrix result_mat; NVMatrix square_weight; _weights[0].getW().apply(NVMatrixOps::Square(), square_weight); // printf("\n=====Init act=%.6f\n", getActs().sum()); getActs().eltwiseDivideByVector(square_weight); square_weight.apply(NVMatrixOps::Log()); sqdiff_part = getActs().sum() * 0.5; prior_part = square_weight.sum()* num_data * 0.5; } loglike = sqdiff_part + prior_part; // printf("sqdiff part = %.6f, prior_part = %.6f\n", sqdiff_part, prior_part); // Keep consistent (with squarecost layer), I do the sum instead of mean _costv.clear(); _costv.push_back(loglike); _costv.push_back(init_act_sum); _costv.push_back(sqdiff_part); _costv.push_back(prior_part); } void LoglikeGaussianCostLayer::bpropCommon(NVMatrix& v, PASS_TYPE passType) { // v is ? for cost layer? // Don't need to use v anyway for (int i = 0; i < _weights.getSize(); i++) { if (_weights[i].getEps() > 0) { bpropWeights(v, i, passType); // Increment its number of updates _weights[i].incNumUpdates(); } } } void LoglikeGaussianCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); if (_use_log_weights) { NVMatrix exp_neg2weights, grads; _weights[0].getW().scale(-2, exp_neg2weights); exp_neg2weights.apply(NVMatrixOps::Exp()); _inputs[0]->eltwiseMultByVector(exp_neg2weights, grads); _prev[inpIdx]->getActsGrad().add(grads, scaleTargets, -_coeff); } else { NVMatrix square_weight, grads; _weights[0].getW().apply(NVMatrixOps::Square(), square_weight); _inputs[0]->eltwiseDivideByVector(square_weight, grads); // 2 is cancelled by 1/2 for each term _prev[inpIdx]->getActsGrad().add(grads, scaleTargets, -_coeff); } } void LoglikeGaussianCostLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) { NVMatrix res; assert(inpIdx== 0); int ncases = getActs().getNumCols(); if (_close_form_freq <= 0) { NVMatrix res_avg; float scaleInc = (_weights[inpIdx].getNumUpdates() == 0 && passType != PASS_GC) * _weights[inpIdx].getMom(); float scaleGrads = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / ncases; if (_use_log_weights) { //getActs() == e^(-2t) * || y_i - \hat{y}_i ||^2 getActs().addScalar(-1,1,res); res.sum(1, res_avg); _weights[inpIdx].getInc().add(res_avg, scaleInc, -scaleGrads * _coeff); } else { getActs().addScalar(-1, 1, res); res.sum(1,res_avg); res_avg.eltwiseDivideByVector(_weights[0].getW()); _weights[inpIdx].getInc().add(res_avg, scaleInc, -scaleGrads * _coeff); } } else { //store variance in getInc() NVMatrix abs_inputs; NVMatrix var_inputs; _inputs[inpIdx]->apply(NVMatrixOps::Abs(), abs_inputs); abs_inputs.sum(1, var_inputs); if (_close_form_update_count == 0) { _weights[inpIdx].getInc().add(var_inputs, 0, 1); } else { _weights[inpIdx].getInc().add(var_inputs,1,1); } _close_form_update_count += ncases; printf("abs sum = %.6f\n", var_inputs.sum()); printf("close_form_update_count = %d\n", _close_form_update_count); } } /* * ===================== * SSVMCostLayer Calculate the following cost function max_y \delta(y,y_i) + <\Phi(x_i, y), w > - < \Phi(x_i, y_i),w> * ===================== */ SSVMCostLayer::SSVMCostLayer(ConvNet* convNet, PyObject* paramsDict):CostLayer(convNet, paramsDict, false) { _num_groups = pyDictGetInt(paramsDict, "groups"); } SSVMCostLayer::~SSVMCostLayer() { // pass } void SSVMCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { // calculate the cost once if (inpIdx == 0) { // Only pick one max at the time NVMatrix& ind = *_inputs[0]; NVMatrix& pred = *_inputs[1]; NVMatrix act; int ndata = ind.getNumCols(); pred.copy(act); act.subtract(ind); _act_max_ind.resize(ind); _act_max_ind.apply(NVMatrixOps::Zero()); _act_max_value.resize(_num_groups, ndata); int num_data = ind.getNumCols(); // do sth here computeSSVMCost(ind, act, _act_max_ind, _act_max_value); _costv.clear(); _costv.push_back(_act_max_value.sum() + _num_groups * num_data); } } void SSVMCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 1); // because the pred is the second input NVMatrix& ind = *_inputs[0]; _act_max_ind.subtract(ind); _prev[inpIdx]->getActsGrad().add(_act_max_ind, scaleTargets, -_coeff); } /* * ===================== * LogisticCostLayer Calculate the following cost function log ( 1 + e^(ax) )/a * ===================== */ LogisticCostLayer::LogisticCostLayer(ConvNet* convNet, PyObject* paramsDict):CostLayer(convNet, paramsDict, false) { _a = pyDictGetFloat(paramsDict, "a"); _u = pyDictGetFloat(paramsDict, "u"); _neuron = new SoftReluNeuron(); } LogisticCostLayer::~LogisticCostLayer() { // pass } // Acts will store g = log ( 1 + e^(a(x-u))) // void LogisticCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); if (_u == 0 && _a == 1) { _neuron->activate( *_inputs[0], getActs()); } else { NVMatrix transformed_input; _inputs[0]->copy(transformed_input); if (_u != 0) { transformed_input.addScalar(-_u); } if (_a != 1) { transformed_input.scale(_a); } // I am sure softRelu will neverl use input _neuron->activate(transformed_input, getActs()); } _costv.clear(); _costv.push_back(getActs().sum()/_a); } void LogisticCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); v.resize(_inputs[0]->getNumRows(), _inputs[0]->getNumCols()); v.apply(NVMatrixOps::Zero()); v.addScalar(-_coeff); _neuron->computeInputGrad(v, _prev[0]->getActsGrad(), scaleTargets > 0); } /* * ===================== * ConstCostlayer Calculate the following cost function L(x) = x The gradient will always be one * ===================== */ ConstCostLayer::ConstCostLayer(ConvNet* convNet, PyObject* paramsDict):CostLayer(convNet, paramsDict, false) { } ConstCostLayer::~ConstCostLayer() { // pass } void ConstCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); _inputs[0]->copy(getActs()); _costv.clear(); _costv.push_back(getActs().sum()); } void ConstCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) { assert(inpIdx == 0); v.resize(getActs().getNumRows(), getActs().getNumCols()); v.apply(NVMatrixOps::One()); _prev[0]->getActsGrad().add(v, scaleTargets > 0, -_coeff); }
e387829a27d3d6c71925eb77c1820161a025b68e.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/native/BinaryOps.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { template<typename scalar_t, typename accscalar_t> struct MulScalarFunctor { MulScalarFunctor(accscalar_t b_): b(b_) {} __device__ scalar_t operator() (scalar_t a) const { return a * b; } private: accscalar_t b; }; template<typename scalar_t> struct DivFunctor { __device__ scalar_t operator() (scalar_t a, scalar_t b) const { return a / b; } }; template<typename scalar_t> struct MulFunctor { __device__ scalar_t operator() (scalar_t a, scalar_t b) const { return a * b; } }; // Workaround for the error: '*' in boolean context, suggest '&&' instead [-Werror=int-in-bool-context] template<> struct MulFunctor<bool> { __device__ bool operator() (bool a, bool b) const { return a && b; } }; void div_kernel_cuda(TensorIterator& iter) { if (!isIntegralType(iter.common_dtype(), /*includeBool*/ false) && iter.is_cpu_scalar(2)) { // optimization for floating-point types: if the second operand is a CPU // scalar, compute a * reciprocal(b). Note that this may lose one bit of // precision compared to computing the division. AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "div_cuda", [&]() { using accscalar_t = at::acc_type<scalar_t, true>; auto inv_b = accscalar_t(1.0) / iter.scalar_value<accscalar_t>(2); iter.remove_operand(2); MulScalarFunctor<scalar_t, decltype(inv_b)> f(inv_b); gpu_kernel(iter, f); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, iter.common_dtype(), "div_cuda", [&]() { DivFunctor<scalar_t> f; gpu_kernel_with_scalars(iter, f); }); } } void mul_kernel_cuda(TensorIterator& iter) { if (!isIntegralType(iter.common_dtype(), /*includeBool*/ true) && (iter.is_cpu_scalar(1) || iter.is_cpu_scalar(2))) { //if common dtype is half the scalar constant can overflow in half precision, and yet the result can //still be representable in the half dtype. Cast scalar to acc_type to have better accuracy AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "mul_cuda", [&]() { using accscalar_t = at::acc_type<scalar_t, true>; int scalar_arg = iter.is_cpu_scalar(1) ? 1 : 2; auto b = iter.scalar_value<accscalar_t>(scalar_arg); iter.remove_operand(scalar_arg); const hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(iter.tensor(1))); MulScalarFunctor<scalar_t, decltype(b)> f(b); gpu_kernel(iter, f); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "mul_cuda", [&]() { MulFunctor<scalar_t> f; gpu_kernel_with_scalars(iter, f); }); } } REGISTER_DISPATCH(div_stub, &div_kernel_cuda); REGISTER_DISPATCH(mul_stub, &mul_kernel_cuda); }} // namespace at::native
e387829a27d3d6c71925eb77c1820161a025b68e.cu
#include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/native/BinaryOps.h> #include <c10/cuda/CUDAGuard.h> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { template<typename scalar_t, typename accscalar_t> struct MulScalarFunctor { MulScalarFunctor(accscalar_t b_): b(b_) {} __device__ scalar_t operator() (scalar_t a) const { return a * b; } private: accscalar_t b; }; template<typename scalar_t> struct DivFunctor { __device__ scalar_t operator() (scalar_t a, scalar_t b) const { return a / b; } }; template<typename scalar_t> struct MulFunctor { __device__ scalar_t operator() (scalar_t a, scalar_t b) const { return a * b; } }; // Workaround for the error: '*' in boolean context, suggest '&&' instead [-Werror=int-in-bool-context] template<> struct MulFunctor<bool> { __device__ bool operator() (bool a, bool b) const { return a && b; } }; void div_kernel_cuda(TensorIterator& iter) { if (!isIntegralType(iter.common_dtype(), /*includeBool*/ false) && iter.is_cpu_scalar(2)) { // optimization for floating-point types: if the second operand is a CPU // scalar, compute a * reciprocal(b). Note that this may lose one bit of // precision compared to computing the division. AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "div_cuda", [&]() { using accscalar_t = at::acc_type<scalar_t, true>; auto inv_b = accscalar_t(1.0) / iter.scalar_value<accscalar_t>(2); iter.remove_operand(2); MulScalarFunctor<scalar_t, decltype(inv_b)> f(inv_b); gpu_kernel(iter, f); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kHalf, kBFloat16, iter.common_dtype(), "div_cuda", [&]() { DivFunctor<scalar_t> f; gpu_kernel_with_scalars(iter, f); }); } } void mul_kernel_cuda(TensorIterator& iter) { if (!isIntegralType(iter.common_dtype(), /*includeBool*/ true) && (iter.is_cpu_scalar(1) || iter.is_cpu_scalar(2))) { //if common dtype is half the scalar constant can overflow in half precision, and yet the result can //still be representable in the half dtype. Cast scalar to acc_type to have better accuracy AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "mul_cuda", [&]() { using accscalar_t = at::acc_type<scalar_t, true>; int scalar_arg = iter.is_cpu_scalar(1) ? 1 : 2; auto b = iter.scalar_value<accscalar_t>(scalar_arg); iter.remove_operand(scalar_arg); const cuda::OptionalCUDAGuard device_guard(device_of(iter.tensor(1))); MulScalarFunctor<scalar_t, decltype(b)> f(b); gpu_kernel(iter, f); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "mul_cuda", [&]() { MulFunctor<scalar_t> f; gpu_kernel_with_scalars(iter, f); }); } } REGISTER_DISPATCH(div_stub, &div_kernel_cuda); REGISTER_DISPATCH(mul_stub, &mul_kernel_cuda); }} // namespace at::native
bc70aa29fcd73a72dccf71a4925bd13d88e3e750.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ int brdisOpt (int N, int M, int val) { __shared__ int A[M]; __shared__ int B[M]; for (int i = 0; i < N; ++i) { if ((threadIdx.x + i) % 2 == 0) { for (int j1 = 0; j1 < M; ++j1) { A[j1] = val; } } else { for (int j2 = 0; j2 < M; ++j2) { A[j2] = val * 2; } } int tot = 0; for (int j3 = 0; j3 < M; ++j3) { tot += A[j3]; B[j3] = tot; } if ((threadIdx.x + i) % 2 == 0) { for (int j4 = 0; j4 < M; ++j4) { B[j4] += val; } } else { for (int j5 = 0; j5 < M; ++j5) { B[j5] *= B[j5]; } } } }
bc70aa29fcd73a72dccf71a4925bd13d88e3e750.cu
__global__ int brdisOpt (int N, int M, int val) { __shared__ int A[M]; __shared__ int B[M]; for (int i = 0; i < N; ++i) { if ((threadIdx.x + i) % 2 == 0) { for (int j1 = 0; j1 < M; ++j1) { A[j1] = val; } } else { for (int j2 = 0; j2 < M; ++j2) { A[j2] = val * 2; } } int tot = 0; for (int j3 = 0; j3 < M; ++j3) { tot += A[j3]; B[j3] = tot; } if ((threadIdx.x + i) % 2 == 0) { for (int j4 = 0; j4 < M; ++j4) { B[j4] += val; } } else { for (int j5 = 0; j5 < M; ++j5) { B[j5] *= B[j5]; } } } }
89adca1e241759d87ae53e3a6b536cc65d6c82dc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* */ #include "../include/LSM_QuadHyperPlane.cuh" unsigned int count_QHP_Parameters(int hor) { int ans = 0; ans = (int) (1/2)*(hor * hor + 3* hor + 2); return ans; } __global__ void LSM_QHP_make_tensor_vector(QuadHyperPlane *output, InputVector *input, int *indices) { unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; int next_indices = 0; for(int i = 0; i < HORIZON; i++){ for(int j = i; j < HORIZON; j++){ output[id].tensor_vector[next_indices] = input[indices[id]].Input[i] * input[indices[id]].Input[j] * input[indices[id]].WHM; //output[id].tensor_vector[next_indices] = i * j; output[id].column_vector[next_indices] = input[indices[id]].L * input[indices[id]].Input[i] * input[indices[id]].Input[j] * input[indices[id]].WHM; //output[id].column_vector[next_indices] = 3.0; next_indices += 1; } } for(int i = 0; i < HORIZON; i++){ output[id].tensor_vector[next_indices] = input[indices[id]].Input[i] * input[indices[id]].WHM; //output[id].tensor_vector[next_indices] = i; output[id].column_vector[next_indices] = input[indices[id]].L * input[indices[id]].Input[i] * input[indices[id]].WHM; //output[id].column_vector[next_indices] = (1/2)*i; next_indices += 1; } output[id].tensor_vector[sizeOfParaboloidElements - 1] = 1.0f * input[indices[id]].WHM; output[id].column_vector[sizeOfParaboloidElements - 1] = input[indices[id]].L * input[indices[id]].WHM; __syncthreads(); } __global__ void LSM_QHP_make_regular_matrix(float *outRmatrix, QuadHyperPlane *elements, int sumSet) { unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; outRmatrix[id] = 0.0f; //float temp_here = 0.0f; for(int index = 0; index < sumSet; index++){ outRmatrix[id] += elements[index].tensor_vector[threadIdx.x] * elements[index].tensor_vector[blockIdx.x]; //float temp_here += } //printf("id==%d, ans == %f\n", id, outRmatrix[id]); __syncthreads(); } //blockthread(2021.3.8) __global__ void LSM_QHP_make_regular_matrix_over_ThreadPerBlockLimit(float *outRmatrix, QuadHyperPlane *elements, int sumSet, int Ydimention) { int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int id = iy * Ydimention + ix; outRmatrix[id] = 0.0f; for(int index = 0; index < sumSet; index++){ outRmatrix[id] += elements[index].tensor_vector[ix] * elements[index].tensor_vector[iy]; } __syncthreads(); } __global__ void LSM_QHP_make_regular_vector(float *outRvector, QuadHyperPlane *elements, int sumSet) { unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; outRvector[id] = 0.0f; for(int index = 0; index < sumSet; index++) { outRvector[id] += elements[index].column_vector[id]; } __syncthreads(); } __global__ void LSM_QHP_get_reslt_all_elements(float *outElements, float *inElements) { unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; outElements[id] = inElements[id]; //printf("outElements[%d] == %f\n", id, outElements[id]); __syncthreads(); } __global__ void LSM_QHP_get_Hessian_Result(float *outElements, float *inElements) { unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; float temp_here; /*if(blockIdx.x == 0){ //outElements[id] = inElements[threadIdx.x]; temp_here = inElements[threadIdx.x]; } if(threadIdx.x==0){ //outElements[id] = inElements[blockIdx.x]; temp_here = inElements[blockIdx.x]; } if(threadIdx.x * blockIdx.x != 0){ // int i_id; // i_id = blockIdx.x + (HORIZON - 1) + (threadIdx.x - 1); //outElements[id] = inElements[blockIdx.x + (HORIZON - 1) + (threadIdx.x - 1)]; temp_here = inElements[blockIdx.x + (HORIZON - 1) + (threadIdx.x - 1)]; }*/ int vect_id = blockIdx.x; if(threadIdx.x <= blockIdx.x){ for(int t_id = 0; t_id < threadIdx.x; t_id++){ int sum_a = t_id + 1; vect_id += (HORIZON - sum_a); } //outElements[id] = inElements[vect_id]; temp_here = inElements[vect_id]; }else{ //outElements[id] = 0.0f; temp_here = 0.0f; } if(threadIdx.x != blockIdx.x){ //outElements[id] = outElements[id] / 2; outElements[id] = temp_here / 2; }else{ outElements[id] = temp_here; } //printf("outElements[%d] == %f\n", id, outElements[id]); __syncthreads(); } __global__ void LSM_QHP_transpose(float *Out, float *In) { unsigned int id =threadIdx.x + blockIdx.x * blockDim.x; int In_index = blockIdx.x + threadIdx.x * blockDim.x; Out[id] = In[In_index]; __syncthreads(); } __global__ void LSM_QHP_make_symmetric(float *Out, float *In) { unsigned int id =threadIdx.x + blockIdx.x * blockDim.x; if( blockIdx.x > threadIdx.x) { if(!(Out[id]==In[id])) { Out[id] = In[id]; } } } __global__ void LSM_Hessian_To_Positive_Symmetric(float *Hess) { unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; if(threadIdx.x == blockIdx.x){ Hess[id] += 0.5f; } __syncthreads(); } /* DAT-MethodAns = -2 * G^T * Hessian * HvectorHvector */ __global__ void LSM_QHP_make_Hvector(float *Output, InputVector *datas, float *Hess) { unsigned int id =threadIdx.x + blockIdx.x * blockDim.x; float power_vec_u1[HORIZON] = { }; float power_vec_um[HORIZON] = { }; float squareTerm_u1 = 0.0f; float squareTerm_um = 0.0f; for(int i = 0; i < HORIZON; i++){ for(int k = 0; k < HORIZON; k++){ power_vec_u1[i] += datas[0].Input[k] * Hess[i*HORIZON + k]; power_vec_um[i] += datas[id+1].Input[k] * Hess[i*HORIZON + k]; } squareTerm_u1 += power_vec_u1[i] * datas[0].Input[i]; squareTerm_um += power_vec_um[i] * datas[id+1].Input[i]; } Output[id] = datas[0].L - datas[id + 1].L - squareTerm_u1 + squareTerm_um; //printf("Output[%d] == %f %f %f %f %f\n",id,Output[id], datas[0].L, datas[id + 1].L, squareTerm_u1, squareTerm_um); __syncthreads(); } /* DAT-MethodAns = -2 * G^T * Hessian * Hvector-2*G^T */ __global__ void LSM_QHP_make_transGmatrix(float *Output, InputVector *datas) { unsigned int id =threadIdx.x + blockIdx.x * blockDim.x; Output[id] = -2* (datas[0].Input[blockIdx.x] - datas[threadIdx.x + 1].Input[blockIdx.x]); __syncthreads(); } /* NAKAns = -2 * Hessian * HvectorHvector */ __global__ void LSM_QHP_make_bVector(float *OutVector, float *Elemets, int indecies) { unsigned int id =threadIdx.x + blockIdx.x * blockDim.x; OutVector[id] = Elemets[indecies + id]; //printf("id = %d IN = %f\n", indecies + id, Elemets[indecies + id]); __syncthreads(); }
89adca1e241759d87ae53e3a6b536cc65d6c82dc.cu
/* */ #include "../include/LSM_QuadHyperPlane.cuh" unsigned int count_QHP_Parameters(int hor) { int ans = 0; ans = (int) (1/2)*(hor * hor + 3* hor + 2); return ans; } __global__ void LSM_QHP_make_tensor_vector(QuadHyperPlane *output, InputVector *input, int *indices) { unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; int next_indices = 0; for(int i = 0; i < HORIZON; i++){ for(int j = i; j < HORIZON; j++){ output[id].tensor_vector[next_indices] = input[indices[id]].Input[i] * input[indices[id]].Input[j] * input[indices[id]].WHM; //output[id].tensor_vector[next_indices] = i * j; output[id].column_vector[next_indices] = input[indices[id]].L * input[indices[id]].Input[i] * input[indices[id]].Input[j] * input[indices[id]].WHM; //output[id].column_vector[next_indices] = 3.0; next_indices += 1; } } for(int i = 0; i < HORIZON; i++){ output[id].tensor_vector[next_indices] = input[indices[id]].Input[i] * input[indices[id]].WHM; //output[id].tensor_vector[next_indices] = i; output[id].column_vector[next_indices] = input[indices[id]].L * input[indices[id]].Input[i] * input[indices[id]].WHM; //output[id].column_vector[next_indices] = (1/2)*i; next_indices += 1; } output[id].tensor_vector[sizeOfParaboloidElements - 1] = 1.0f * input[indices[id]].WHM; output[id].column_vector[sizeOfParaboloidElements - 1] = input[indices[id]].L * input[indices[id]].WHM; __syncthreads(); } __global__ void LSM_QHP_make_regular_matrix(float *outRmatrix, QuadHyperPlane *elements, int sumSet) { unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; outRmatrix[id] = 0.0f; //float temp_here = 0.0f; for(int index = 0; index < sumSet; index++){ outRmatrix[id] += elements[index].tensor_vector[threadIdx.x] * elements[index].tensor_vector[blockIdx.x]; //float temp_here += } //printf("id==%d, ans == %f\n", id, outRmatrix[id]); __syncthreads(); } //block内thread数が飽和したので、新たに作成(2021.3.8) __global__ void LSM_QHP_make_regular_matrix_over_ThreadPerBlockLimit(float *outRmatrix, QuadHyperPlane *elements, int sumSet, int Ydimention) { int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int id = iy * Ydimention + ix; outRmatrix[id] = 0.0f; for(int index = 0; index < sumSet; index++){ outRmatrix[id] += elements[index].tensor_vector[ix] * elements[index].tensor_vector[iy]; } __syncthreads(); } __global__ void LSM_QHP_make_regular_vector(float *outRvector, QuadHyperPlane *elements, int sumSet) { unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; outRvector[id] = 0.0f; for(int index = 0; index < sumSet; index++) { outRvector[id] += elements[index].column_vector[id]; } __syncthreads(); } __global__ void LSM_QHP_get_reslt_all_elements(float *outElements, float *inElements) { unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; outElements[id] = inElements[id]; //printf("outElements[%d] == %f\n", id, outElements[id]); __syncthreads(); } __global__ void LSM_QHP_get_Hessian_Result(float *outElements, float *inElements) { unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; float temp_here; /*if(blockIdx.x == 0){ //outElements[id] = inElements[threadIdx.x]; temp_here = inElements[threadIdx.x]; } if(threadIdx.x==0){ //outElements[id] = inElements[blockIdx.x]; temp_here = inElements[blockIdx.x]; } if(threadIdx.x * blockIdx.x != 0){ // int i_id; // i_id = blockIdx.x + (HORIZON - 1) + (threadIdx.x - 1); //outElements[id] = inElements[blockIdx.x + (HORIZON - 1) + (threadIdx.x - 1)]; temp_here = inElements[blockIdx.x + (HORIZON - 1) + (threadIdx.x - 1)]; }*/ int vect_id = blockIdx.x; if(threadIdx.x <= blockIdx.x){ for(int t_id = 0; t_id < threadIdx.x; t_id++){ int sum_a = t_id + 1; vect_id += (HORIZON - sum_a); } //outElements[id] = inElements[vect_id]; temp_here = inElements[vect_id]; }else{ //outElements[id] = 0.0f; temp_here = 0.0f; } if(threadIdx.x != blockIdx.x){ //outElements[id] = outElements[id] / 2; outElements[id] = temp_here / 2; }else{ outElements[id] = temp_here; } //printf("outElements[%d] == %f\n", id, outElements[id]); __syncthreads(); } __global__ void LSM_QHP_transpose(float *Out, float *In) { unsigned int id =threadIdx.x + blockIdx.x * blockDim.x; int In_index = blockIdx.x + threadIdx.x * blockDim.x; Out[id] = In[In_index]; __syncthreads(); } __global__ void LSM_QHP_make_symmetric(float *Out, float *In) { unsigned int id =threadIdx.x + blockIdx.x * blockDim.x; if( blockIdx.x > threadIdx.x) { if(!(Out[id]==In[id])) { Out[id] = In[id]; } } } __global__ void LSM_Hessian_To_Positive_Symmetric(float *Hess) { unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; if(threadIdx.x == blockIdx.x){ Hess[id] += 0.5f; } __syncthreads(); } /* DAT-Methodで使用するAns = -2 * G^T * Hessian * Hvectorの Hvectorを計算 */ __global__ void LSM_QHP_make_Hvector(float *Output, InputVector *datas, float *Hess) { unsigned int id =threadIdx.x + blockIdx.x * blockDim.x; float power_vec_u1[HORIZON] = { }; float power_vec_um[HORIZON] = { }; float squareTerm_u1 = 0.0f; float squareTerm_um = 0.0f; for(int i = 0; i < HORIZON; i++){ for(int k = 0; k < HORIZON; k++){ power_vec_u1[i] += datas[0].Input[k] * Hess[i*HORIZON + k]; power_vec_um[i] += datas[id+1].Input[k] * Hess[i*HORIZON + k]; } squareTerm_u1 += power_vec_u1[i] * datas[0].Input[i]; squareTerm_um += power_vec_um[i] * datas[id+1].Input[i]; } Output[id] = datas[0].L - datas[id + 1].L - squareTerm_u1 + squareTerm_um; //printf("Output[%d] == %f %f %f %f %f\n",id,Output[id], datas[0].L, datas[id + 1].L, squareTerm_u1, squareTerm_um); __syncthreads(); } /* DAT-Methodで使用するAns = -2 * G^T * Hessian * Hvectorの -2*G^T行列を計算 */ __global__ void LSM_QHP_make_transGmatrix(float *Output, InputVector *datas) { unsigned int id =threadIdx.x + blockIdx.x * blockDim.x; Output[id] = -2* (datas[0].Input[blockIdx.x] - datas[threadIdx.x + 1].Input[blockIdx.x]); __syncthreads(); } /* NAKニュートン法で使用するAns = -2 * Hessian * Hvectorの Hvectorベクトルを計算 */ __global__ void LSM_QHP_make_bVector(float *OutVector, float *Elemets, int indecies) { unsigned int id =threadIdx.x + blockIdx.x * blockDim.x; OutVector[id] = Elemets[indecies + id]; //printf("id = %d IN = %f\n", indecies + id, Elemets[indecies + id]); __syncthreads(); }
8a8a04eaefe61e0a3b9ebd7113eeea8f0cff4707.hip
// !!! This is a file automatically generated by hipify!!! #include "global.h" #include "eigener_code.h" #include "randgpu.h" #include "action.h" #include <math.h> #include <stdio.h> #include <stdlib.h> hipDoubleComplex mag(hipDoubleComplex *phi) { int idx; hipDoubleComplex magRes = make_cuDoubleComplex(0.0, 0.0); for (idx=0; idx<nvol; idx++){ magRes = cuCadd(magRes, phi[idx]); } return cuCdiv(magRes, make_cuDoubleComplex((double)nvol, 0.0)); } void spin_update(double delta, double lambda, double kappa, hipDoubleComplex h) { int idx, jdx; int i = 0, max_tries = 1000; double *random_nums; /* 2 for the random vector, 1 for comparison to accept */ /* also 10 for every sweep and nvol for every datapoint */ random_nums = (double *) malloc(3*10*nvol*sizeof(double)); double rand1, rand2, rand3; bool spin_updated = false; int count_success = 0; double accept_percentage = 0.0; double *rnd; while (((accept_percentage < 0.35) || (accept_percentage > 0.45)) && i<max_tries) { i++; printf("delta = %f \t", delta); rnd = randgpu(3*10*nvol); memcpy(random_nums, rnd, 3*10*nvol*sizeof(double)); count_success = 0; accept_percentage = 0.0; /* sweep */ for (idx=0; idx<nvol; idx++) { /* every point ten times */ for (jdx=0; jdx<10; jdx++) { rand1 = random_nums[3*10*idx + 3*jdx + 0]; /* I GUESS... */ rand2 = random_nums[3*10*idx + 3*jdx + 1]; rand3 = random_nums[3*10*idx + 3*jdx + 2]; spin_updated = spin_update_one_point( idx, delta, lambda, kappa, h, rand1, rand2, rand3 ); if (spin_updated) { count_success++; } /* reset */ spin_updated = false; } } printf("Counts: %d/%d\t", count_success, 10*nvol); accept_percentage = ((double) count_success / (double) (10*nvol)); printf("Accepted = %f\n", accept_percentage); if (accept_percentage < 0.35) { delta = 1.05*delta; } else if (accept_percentage > 0.45) { delta = 0.95*delta; } else { break; } } free(random_nums); } /* * return a new local phi */ hipDoubleComplex spin_proposal(int idx, double delta, double rand1, double rand2) { double r1 = 2 * delta * (rand1 - 0.5), r2 = 2 * delta * (rand2 - 0.5); return cuCadd(phi[idx], make_cuDoubleComplex(r1, r2)); } /* * calculate p_a at idx for two random numbers */ double p_a(int idx, double delta, double lambda, double kappa, hipDoubleComplex h, double rand1, double rand2) { /* calculate local action at idx */ double aloc_old_phi = alocal(idx, lambda, kappa, h); /* save old phi at idx */ hipDoubleComplex old_phi = phi[idx]; /* set phi at idx to a proposed new state */ phi[idx] = spin_proposal(idx, delta, rand1, rand2); /* calculate action with new phi */ double aloc_new_phi = alocal(idx, lambda, kappa, h); /* reset old state */ phi[idx] = old_phi; /* return p_a */ if (aloc_new_phi <= aloc_old_phi) { return 1.0; } else { return exp(aloc_old_phi - aloc_new_phi); } } /* * update a local coordinate */ bool spin_update_one_point( int idx, double delta, double lambda, double kappa, hipDoubleComplex h, double rand1, double rand2, double rand3 ) { // printf("TEST SCHREIBEN FUER SPIN UPDATE\n"); /* calculate p_a for the index and the proposed random numbers */ double p_a_val = p_a(idx, delta, lambda, kappa, h, rand1, rand2); /* * compare p_a to rand3 * if p_a <= rand3 accept * else reject (or do nothing) * since the random numbers have equal likelihood this means that we accept a * number with p_a_val percentage */ if (p_a_val <= rand3) { phi[idx] = spin_proposal(idx, delta, rand1, rand2); return true; } else { return false; } } void boltzmann_exp(double delta, double lambda, double kappa, hipDoubleComplex h) { int idx, jdx; int i = 0, max_tries = 1000; double *random_nums; /* 2 for the random vector, 1 for comparison to accept */ /* also 10 for every sweep and nvol for every datapoint */ random_nums = (double *) malloc(3*10*nvol*sizeof(double)); double rand1, rand2, rand3; bool spin_updated = false; int count_success = 0; double accept_percentage = 0.0; double *rnd; double boltz_exp; hipDoubleComplex mag_val; while (((accept_percentage < 0.35) || (accept_percentage > 0.45)) && i<max_tries) { i++; rnd = randgpu(3*10*nvol); memcpy(random_nums, rnd, 3*10*nvol*sizeof(double)); count_success = 0; accept_percentage = 0.0; boltz_exp = action(lambda, kappa, h); mag_val = mag(phi); printf("%f, %f, \n",boltz_exp, cuCabs(mag_val)); /* sweep */ for (idx=0; idx<nvol; idx++) { /* every point ten times */ for (jdx=0; jdx<10; jdx++) { rand1 = random_nums[3*10*idx + 3*jdx + 0]; /* I GUESS... */ rand2 = random_nums[3*10*idx + 3*jdx + 1]; rand3 = random_nums[3*10*idx + 3*jdx + 2]; spin_updated = spin_update_one_point( idx, delta, lambda, kappa, h, rand1, rand2, rand3 ); if (spin_updated) { count_success++; } /* reset */ spin_updated = false; } } accept_percentage = ((double) count_success / (double) (10*nvol)); if (accept_percentage < 0.35) { delta = 1.05*delta; } else if (accept_percentage > 0.45) { delta = 0.95*delta; } else { break; } } free(random_nums); }
8a8a04eaefe61e0a3b9ebd7113eeea8f0cff4707.cu
#include "global.h" #include "eigener_code.h" #include "randgpu.h" #include "action.h" #include <math.h> #include <stdio.h> #include <stdlib.h> cuDoubleComplex mag(cuDoubleComplex *phi) { int idx; cuDoubleComplex magRes = make_cuDoubleComplex(0.0, 0.0); for (idx=0; idx<nvol; idx++){ magRes = cuCadd(magRes, phi[idx]); } return cuCdiv(magRes, make_cuDoubleComplex((double)nvol, 0.0)); } void spin_update(double delta, double lambda, double kappa, cuDoubleComplex h) { int idx, jdx; int i = 0, max_tries = 1000; double *random_nums; /* 2 for the random vector, 1 for comparison to accept */ /* also 10 for every sweep and nvol for every datapoint */ random_nums = (double *) malloc(3*10*nvol*sizeof(double)); double rand1, rand2, rand3; bool spin_updated = false; int count_success = 0; double accept_percentage = 0.0; double *rnd; while (((accept_percentage < 0.35) || (accept_percentage > 0.45)) && i<max_tries) { i++; printf("delta = %f \t", delta); rnd = randgpu(3*10*nvol); memcpy(random_nums, rnd, 3*10*nvol*sizeof(double)); count_success = 0; accept_percentage = 0.0; /* sweep */ for (idx=0; idx<nvol; idx++) { /* every point ten times */ for (jdx=0; jdx<10; jdx++) { rand1 = random_nums[3*10*idx + 3*jdx + 0]; /* I GUESS... */ rand2 = random_nums[3*10*idx + 3*jdx + 1]; rand3 = random_nums[3*10*idx + 3*jdx + 2]; spin_updated = spin_update_one_point( idx, delta, lambda, kappa, h, rand1, rand2, rand3 ); if (spin_updated) { count_success++; } /* reset */ spin_updated = false; } } printf("Counts: %d/%d\t", count_success, 10*nvol); accept_percentage = ((double) count_success / (double) (10*nvol)); printf("Accepted = %f\n", accept_percentage); if (accept_percentage < 0.35) { delta = 1.05*delta; } else if (accept_percentage > 0.45) { delta = 0.95*delta; } else { break; } } free(random_nums); } /* * return a new local phi */ cuDoubleComplex spin_proposal(int idx, double delta, double rand1, double rand2) { double r1 = 2 * delta * (rand1 - 0.5), r2 = 2 * delta * (rand2 - 0.5); return cuCadd(phi[idx], make_cuDoubleComplex(r1, r2)); } /* * calculate p_a at idx for two random numbers */ double p_a(int idx, double delta, double lambda, double kappa, cuDoubleComplex h, double rand1, double rand2) { /* calculate local action at idx */ double aloc_old_phi = alocal(idx, lambda, kappa, h); /* save old phi at idx */ cuDoubleComplex old_phi = phi[idx]; /* set phi at idx to a proposed new state */ phi[idx] = spin_proposal(idx, delta, rand1, rand2); /* calculate action with new phi */ double aloc_new_phi = alocal(idx, lambda, kappa, h); /* reset old state */ phi[idx] = old_phi; /* return p_a */ if (aloc_new_phi <= aloc_old_phi) { return 1.0; } else { return exp(aloc_old_phi - aloc_new_phi); } } /* * update a local coordinate */ bool spin_update_one_point( int idx, double delta, double lambda, double kappa, cuDoubleComplex h, double rand1, double rand2, double rand3 ) { // printf("TEST SCHREIBEN FUER SPIN UPDATE\n"); /* calculate p_a for the index and the proposed random numbers */ double p_a_val = p_a(idx, delta, lambda, kappa, h, rand1, rand2); /* * compare p_a to rand3 * if p_a <= rand3 accept * else reject (or do nothing) * since the random numbers have equal likelihood this means that we accept a * number with p_a_val percentage */ if (p_a_val <= rand3) { phi[idx] = spin_proposal(idx, delta, rand1, rand2); return true; } else { return false; } } void boltzmann_exp(double delta, double lambda, double kappa, cuDoubleComplex h) { int idx, jdx; int i = 0, max_tries = 1000; double *random_nums; /* 2 for the random vector, 1 for comparison to accept */ /* also 10 for every sweep and nvol for every datapoint */ random_nums = (double *) malloc(3*10*nvol*sizeof(double)); double rand1, rand2, rand3; bool spin_updated = false; int count_success = 0; double accept_percentage = 0.0; double *rnd; double boltz_exp; cuDoubleComplex mag_val; while (((accept_percentage < 0.35) || (accept_percentage > 0.45)) && i<max_tries) { i++; rnd = randgpu(3*10*nvol); memcpy(random_nums, rnd, 3*10*nvol*sizeof(double)); count_success = 0; accept_percentage = 0.0; boltz_exp = action(lambda, kappa, h); mag_val = mag(phi); printf("%f, %f, \n",boltz_exp, cuCabs(mag_val)); /* sweep */ for (idx=0; idx<nvol; idx++) { /* every point ten times */ for (jdx=0; jdx<10; jdx++) { rand1 = random_nums[3*10*idx + 3*jdx + 0]; /* I GUESS... */ rand2 = random_nums[3*10*idx + 3*jdx + 1]; rand3 = random_nums[3*10*idx + 3*jdx + 2]; spin_updated = spin_update_one_point( idx, delta, lambda, kappa, h, rand1, rand2, rand3 ); if (spin_updated) { count_success++; } /* reset */ spin_updated = false; } } accept_percentage = ((double) count_success / (double) (10*nvol)); if (accept_percentage < 0.35) { delta = 1.05*delta; } else if (accept_percentage > 0.45) { delta = 0.95*delta; } else { break; } } free(random_nums); }
b00e3baaac9550ba8b34b608b5a4c4f62cec68ca.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <stdio.h> #include <stdlib.h> #include "graph.h" #include "book.h" #include <iostream> using namespace std; float Run_Kernels(TGraph*); int InitMemory(TGraph*); int InitDeviceSettings(TGraph*); void PrintStat(TGraph*); void ReleaseCards(TGraph*); int Create_Graph(TGraph*, char path[256], int id=0); int main(int argc, char* argv[]) { cout << "Graph traversal on GPU.\n\n"; vdata size, memgraph; float time; vector<TGraph> graph; int total = 6, num = 0; graph.resize(total-num); char open[256]; while (num < total) { //sprintf((char*)open, "C:\\graphs\\input%d.txt", num); sprintf((char*)open, "\\\\FILE-SERVER\\raid_root\\graphs\\input%d.txt", num+3); Create_Graph(&graph[num], open); PrintStat(&graph[num]); StartIteration(&graph[num]); num++; } num = 0; while (num < total) { printf("%d from %d vertex travelled.\n", graph[num].result[0], graph[num].size); if (graph[num].result[0] >= CPUITERATIONS*BLOCKS*graph[num].numdevices) { //time = Run_Kernels(&graph[num]); //printf("Iterations completed in %.3fms\n", time); for (int i = 0; i < graph[num].numdevices; i++) { hipSetDevice(i); ERROR( hipMemcpyAsync(graph[num].devices[i].devVisited, graph[num].visited, graph[num].memory[i+1].memvisit, hipMemcpyHostToDevice, graph[0].devices[i].stream) ); hipLaunchKernelGGL(( Iteration), dim3(BLOCKS), dim3(CPUITERATIONS), 0, graph[0].devices[i].stream, graph[num].devices[i].devGraph, graph[num].devices[i].devResult, graph[num].devices[i].devVisited, graph[num].size, graph[num].devices[i].DeviceID); } } else { cout << "Insufficent vertex to run kernels.\n"; } printf("%d from %d vertex travelled.\n", graph[num].result[0], graph[num].size); num++; } ReleaseCards(&graph[0]); return 0; } float Run_Kernels(TGraph *self) { float timer; hipEvent_t start, stop; HANDLE_ERROR( hipEventCreate(&start) ); HANDLE_ERROR( hipEventCreate(&stop) ); for (int i = 0; i < self->numdevices; i++) { hipSetDevice(i); ERROR( hipMemcpyAsync(self->devices[i].devVisited, self->visited, self->memory[i+1].memvisit, hipMemcpyHostToDevice, self->devices[i].stream) ); } HANDLE_ERROR( hipEventRecord(start, 0) ); for (int i = 0; i < self->numdevices; i++) { hipSetDevice(i); //hipMemcpyAsync(self->devices[i].devResult+1, self->result+1+i* ); hipLaunchKernelGGL(( Iteration), dim3(BLOCKS), dim3(CPUITERATIONS), 0, self->devices[i].stream, self->devices[i].devGraph, self->devices[i].devResult, self->devices[i].devVisited, self->size, self->devices[i].DeviceID); } hipDeviceSynchronize(); HANDLE_ERROR( hipEventRecord(stop, 0) ); HANDLE_ERROR( hipEventSynchronize(stop) ); HANDLE_ERROR( hipEventElapsedTime(&timer, start, stop) ); return timer; } void PrintStat(TGraph *self) { printf("\nsize of vdata : %d\n", sizeof(vdata)); printf("vertex in graph : %d\n", self->size); printf("arcs in graph : %d\n\n", GetArcsCount(self)); printf("size of graph : %3.3fMb\n", (float)self->memory[0].memgraph/1048576); printf("size of visited : %3.3fMb\n", (float)self->memory[0].memvisit/1048576); printf("size of result : %3.3fMb\n", (float)self->memory[0].memresult/1048576); printf("Total allocated : %3.3fMb\n\n", (float)(self->memory[0].memgraph+self->memory[0].memvisit+self->memory[0].memresult)/1048576); } int InitMemory(TGraph *self) { self->memory.resize(self->numdevices+1); vdata checkvis = 0, checkres = 0; self->memory[0].memgraph = (GetArcsCount(self)+2*GetVertexCount(self))*sizeof(vdata); self->memory[0].memvisit = GetVertexCount(self)*sizeof(char); self->memory[0].memresult = (GetVertexCount(self)+1+self->numdevices)*sizeof(vdata); for (int i = 1; i < self->numdevices+1; i++) { self->memory[i].memgraph = self->memory[0].memgraph; self->memory[i].memvisit = (GetVertexCount(self))*sizeof(char); //self->memory[i].memresult = GetVertexCount(self)*sizeof(vdata)/self->numdevices+sizeof(vdata); checkvis += self->memory[i].memvisit; //checkres += self->memory[i].memresult; } //self->memory[1].memresult += self->memory[0].memresult-checkres; //self->memory[1].memvisit += self->memory[0].memvisit-checkvis; self->memory[0].memvisit = checkvis; return 0; } int InitDeviceSettings(TGraph *self) { hipDeviceProp_t prop; self->devices.resize(self->numdevices); hipSetDevice(0); hipSetDeviceFlags(hipDeviceMapHost); hipGetDeviceProperties(&prop, 0); hipStreamCreate(&(self->devices[0].stream)); self->devices[0].DeviceID = 0; self->devices[0].start = 0; self->devices[0].stop = self->size/self->numdevices+self->size%self->numdevices; /*ERROR(hipHostMalloc((void **) &(self->devices[0].result), self->memory[1].memresult, hipHostMallocWriteCombined|hipHostMallocMapped) ); ERROR(hipHostGetDevicePointer(&(self->devices[0].devResult), self->devices[0].result, 0));*/ ERROR(hipHostGetDevicePointer(&(self->devices[0].devResult), self->result, 0)); ERROR(hipHostGetDevicePointer(&self->devices[0].devGraph, self->graph, 0)); self->devices[0].name = (char*)malloc(256); sprintf(self->devices[0].name, "%s", prop.name); ERROR(hipMalloc((void **) &(self->devices[0].devVisited), self->memory[1].memvisit)); printf("%s binded.\n", self->devices[0].name); for (int i = 1; i < self->numdevices; i++) { hipSetDevice(i); hipGetDeviceProperties(&prop, i); hipSetDeviceFlags(hipDeviceMapHost); hipStreamCreate(&(self->devices[i].stream)); self->devices[i].DeviceID = i; self->devices[i].start = self->devices[i-1].stop+1; self->devices[i].stop = self->devices[i].start+self->size/self->numdevices; /*ERROR(hipHostMalloc((void **) &(self->devices[i].result), self->memory[i+1].memresult, hipHostMallocWriteCombined|hipHostMallocMapped) ); ERROR(hipHostGetDevicePointer(&(self->devices[i].devResult), self->devices[i].result, 0));*/ ERROR(hipHostGetDevicePointer(&(self->devices[i].devResult), self->result, 0)); self->devices[i].name = (char*)malloc(256); sprintf(self->devices[i].name, "%s", prop.name); ERROR(hipMalloc((void **) &(self->devices[i].devVisited), self->memory[i+1].memvisit)); ERROR(hipHostGetDevicePointer(&self->devices[i].devGraph, self->graph, 0)); printf("%s binded.\n", self->devices[i].name); } return 0; } int Create_Graph(TGraph *self, char path[256], int id) { self->id = id; hipSetDevice(0); printf("Opening %s\n", path); file_input(self, path); printf("Graph loaded.\n\n"); ERROR(hipGetDeviceCount(&self->numdevices)); InitMemory(self); ERROR(hipHostMalloc((void **) &self->result, //CPUITERATIONS*BLOCKS*self->numdevices+1, self->memory[0].memresult, hipHostMallocWriteCombined| hipHostMallocMapped| hipHostMallocPortable )); ERROR(hipHostMalloc((void **) &(self->visited), self->memory[0].memvisit, hipHostMallocWriteCombined|hipHostMallocMapped)); InitDeviceSettings(self); return 0; } void ReleaseCards(TGraph *self) { hipDeviceProp_t prop; for (int i = 0; i < self->numdevices; i++) { hipSetDevice(i); hipGetDeviceProperties(&prop, i); ERROR(hipStreamDestroy(self->devices[i].stream)); ERROR(hipDeviceReset()); printf("%s released.\n", prop.name); } }
b00e3baaac9550ba8b34b608b5a4c4f62cec68ca.cu
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <stdio.h> #include <stdlib.h> #include "graph.h" #include "book.h" #include <iostream> using namespace std; float Run_Kernels(TGraph*); int InitMemory(TGraph*); int InitDeviceSettings(TGraph*); void PrintStat(TGraph*); void ReleaseCards(TGraph*); int Create_Graph(TGraph*, char path[256], int id=0); int main(int argc, char* argv[]) { cout << "Graph traversal on GPU.\n\n"; vdata size, memgraph; float time; vector<TGraph> graph; int total = 6, num = 0; graph.resize(total-num); char open[256]; while (num < total) { //sprintf((char*)open, "C:\\graphs\\input%d.txt", num); sprintf((char*)open, "\\\\FILE-SERVER\\raid_root\\graphs\\input%d.txt", num+3); Create_Graph(&graph[num], open); PrintStat(&graph[num]); StartIteration(&graph[num]); num++; } num = 0; while (num < total) { printf("%d from %d vertex travelled.\n", graph[num].result[0], graph[num].size); if (graph[num].result[0] >= CPUITERATIONS*BLOCKS*graph[num].numdevices) { //time = Run_Kernels(&graph[num]); //printf("Iterations completed in %.3fms\n", time); for (int i = 0; i < graph[num].numdevices; i++) { cudaSetDevice(i); ERROR( cudaMemcpyAsync(graph[num].devices[i].devVisited, graph[num].visited, graph[num].memory[i+1].memvisit, cudaMemcpyHostToDevice, graph[0].devices[i].stream) ); Iteration<<<BLOCKS, CPUITERATIONS, 0, graph[0].devices[i].stream>>> (graph[num].devices[i].devGraph, graph[num].devices[i].devResult, graph[num].devices[i].devVisited, graph[num].size, graph[num].devices[i].DeviceID); } } else { cout << "Insufficent vertex to run kernels.\n"; } printf("%d from %d vertex travelled.\n", graph[num].result[0], graph[num].size); num++; } ReleaseCards(&graph[0]); return 0; } float Run_Kernels(TGraph *self) { float timer; cudaEvent_t start, stop; HANDLE_ERROR( cudaEventCreate(&start) ); HANDLE_ERROR( cudaEventCreate(&stop) ); for (int i = 0; i < self->numdevices; i++) { cudaSetDevice(i); ERROR( cudaMemcpyAsync(self->devices[i].devVisited, self->visited, self->memory[i+1].memvisit, cudaMemcpyHostToDevice, self->devices[i].stream) ); } HANDLE_ERROR( cudaEventRecord(start, 0) ); for (int i = 0; i < self->numdevices; i++) { cudaSetDevice(i); //cudaMemcpyAsync(self->devices[i].devResult+1, self->result+1+i* ); Iteration<<<BLOCKS, CPUITERATIONS, 0, self->devices[i].stream>>> (self->devices[i].devGraph, self->devices[i].devResult, self->devices[i].devVisited, self->size, self->devices[i].DeviceID); } cudaDeviceSynchronize(); HANDLE_ERROR( cudaEventRecord(stop, 0) ); HANDLE_ERROR( cudaEventSynchronize(stop) ); HANDLE_ERROR( cudaEventElapsedTime(&timer, start, stop) ); return timer; } void PrintStat(TGraph *self) { printf("\nsize of vdata : %d\n", sizeof(vdata)); printf("vertex in graph : %d\n", self->size); printf("arcs in graph : %d\n\n", GetArcsCount(self)); printf("size of graph : %3.3fMb\n", (float)self->memory[0].memgraph/1048576); printf("size of visited : %3.3fMb\n", (float)self->memory[0].memvisit/1048576); printf("size of result : %3.3fMb\n", (float)self->memory[0].memresult/1048576); printf("Total allocated : %3.3fMb\n\n", (float)(self->memory[0].memgraph+self->memory[0].memvisit+self->memory[0].memresult)/1048576); } int InitMemory(TGraph *self) { self->memory.resize(self->numdevices+1); vdata checkvis = 0, checkres = 0; self->memory[0].memgraph = (GetArcsCount(self)+2*GetVertexCount(self))*sizeof(vdata); self->memory[0].memvisit = GetVertexCount(self)*sizeof(char); self->memory[0].memresult = (GetVertexCount(self)+1+self->numdevices)*sizeof(vdata); for (int i = 1; i < self->numdevices+1; i++) { self->memory[i].memgraph = self->memory[0].memgraph; self->memory[i].memvisit = (GetVertexCount(self))*sizeof(char); //self->memory[i].memresult = GetVertexCount(self)*sizeof(vdata)/self->numdevices+sizeof(vdata); checkvis += self->memory[i].memvisit; //checkres += self->memory[i].memresult; } //self->memory[1].memresult += self->memory[0].memresult-checkres; //self->memory[1].memvisit += self->memory[0].memvisit-checkvis; self->memory[0].memvisit = checkvis; return 0; } int InitDeviceSettings(TGraph *self) { cudaDeviceProp prop; self->devices.resize(self->numdevices); cudaSetDevice(0); cudaSetDeviceFlags(cudaDeviceMapHost); cudaGetDeviceProperties(&prop, 0); cudaStreamCreate(&(self->devices[0].stream)); self->devices[0].DeviceID = 0; self->devices[0].start = 0; self->devices[0].stop = self->size/self->numdevices+self->size%self->numdevices; /*ERROR(cudaHostAlloc((void **) &(self->devices[0].result), self->memory[1].memresult, cudaHostAllocWriteCombined|cudaHostAllocMapped) ); ERROR(cudaHostGetDevicePointer(&(self->devices[0].devResult), self->devices[0].result, 0));*/ ERROR(cudaHostGetDevicePointer(&(self->devices[0].devResult), self->result, 0)); ERROR(cudaHostGetDevicePointer(&self->devices[0].devGraph, self->graph, 0)); self->devices[0].name = (char*)malloc(256); sprintf(self->devices[0].name, "%s", prop.name); ERROR(cudaMalloc((void **) &(self->devices[0].devVisited), self->memory[1].memvisit)); printf("%s binded.\n", self->devices[0].name); for (int i = 1; i < self->numdevices; i++) { cudaSetDevice(i); cudaGetDeviceProperties(&prop, i); cudaSetDeviceFlags(cudaDeviceMapHost); cudaStreamCreate(&(self->devices[i].stream)); self->devices[i].DeviceID = i; self->devices[i].start = self->devices[i-1].stop+1; self->devices[i].stop = self->devices[i].start+self->size/self->numdevices; /*ERROR(cudaHostAlloc((void **) &(self->devices[i].result), self->memory[i+1].memresult, cudaHostAllocWriteCombined|cudaHostAllocMapped) ); ERROR(cudaHostGetDevicePointer(&(self->devices[i].devResult), self->devices[i].result, 0));*/ ERROR(cudaHostGetDevicePointer(&(self->devices[i].devResult), self->result, 0)); self->devices[i].name = (char*)malloc(256); sprintf(self->devices[i].name, "%s", prop.name); ERROR(cudaMalloc((void **) &(self->devices[i].devVisited), self->memory[i+1].memvisit)); ERROR(cudaHostGetDevicePointer(&self->devices[i].devGraph, self->graph, 0)); printf("%s binded.\n", self->devices[i].name); } return 0; } int Create_Graph(TGraph *self, char path[256], int id) { self->id = id; cudaSetDevice(0); printf("Opening %s\n", path); file_input(self, path); printf("Graph loaded.\n\n"); ERROR(cudaGetDeviceCount(&self->numdevices)); InitMemory(self); ERROR(cudaHostAlloc((void **) &self->result, //CPUITERATIONS*BLOCKS*self->numdevices+1, self->memory[0].memresult, cudaHostAllocWriteCombined| cudaHostAllocMapped| cudaHostAllocPortable )); ERROR(cudaHostAlloc((void **) &(self->visited), self->memory[0].memvisit, cudaHostAllocWriteCombined|cudaHostAllocMapped)); InitDeviceSettings(self); return 0; } void ReleaseCards(TGraph *self) { cudaDeviceProp prop; for (int i = 0; i < self->numdevices; i++) { cudaSetDevice(i); cudaGetDeviceProperties(&prop, i); ERROR(cudaStreamDestroy(self->devices[i].stream)); ERROR(cudaDeviceReset()); printf("%s released.\n", prop.name); } }
101279be213a2aec33391fd2ca82995b887204f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/StaticUtils.h> #include <faiss/impl/FaissAssert.h> #include <faiss/gpu/impl/ComputeModes.cuh> #include <faiss/gpu/utils/ConversionOperators.cuh> #include <faiss/gpu/utils/DeviceDefs.cuh> #include <faiss/gpu/utils/Float16.cuh> #include <faiss/gpu/utils/MathOperators.cuh> #include <faiss/gpu/utils/PtxUtils.cuh> #include <faiss/gpu/utils/Reductions.cuh> #include <algorithm> namespace faiss { namespace gpu { /********************************************* Compute Mode Centroids *********************************************/ template <typename T> __global__ void kmb_ave_kernel(Tensor<T, 5, true, int> centroids, Tensor<T, 4, true, int> ave){ // height,width int h = blockIdx.x; int w = blockIdx.y; int b = blockIdx.z; // get dims to comp indices from thread int nftrs = centroids.getSize(0); int nclusters = centroids.getSize(1); int dim = nftrs; T inv_nclusters = 1./nclusters; // helpers int fIdx,cIdx; T ave_val; // set clusters for (int tIdx = threadIdx.x; tIdx < dim; tIdx += blockDim.x){ fIdx = tIdx % nftrs; ave_val = 0; for (int cIdx = 0; cIdx < nclusters; ++cIdx){ ave_val += centroids[fIdx][cIdx][b][h][w]; } ave[fIdx][b][h][w] = Math<T>::mul(ave_val,inv_nclusters); } } template <typename T> void kmb_ave(Tensor<T, 5, true, int> centroids, Tensor<T, 4, true, int> ave, hipStream_t stream){ // shapes int nftrs = centroids.getSize(0); int nclusters = centroids.getSize(1); int bBatch = centroids.getSize(2); int hBatch = centroids.getSize(3); int wBatch = centroids.getSize(4); // threads int maxThreads = (int)getMaxThreadsCurrentDevice(); int dim = nftrs; int numThreads = ::min(dim, maxThreads); // launch auto grid = dim3(hBatch,wBatch,bBatch); auto block = dim3(numThreads); // launch kernel hipLaunchKernelGGL(( kmb_ave_kernel), dim3(grid),dim3(block),0,stream, centroids,ave); // error check CUDA_TEST_ERROR(); } void kmb_ave(Tensor<float, 5, true, int> centroids, Tensor<float, 4, true, int> ave, hipStream_t stream){ kmb_ave<float>(centroids,ave,stream); } void kmb_ave(Tensor<half, 5, true, int> centroids, Tensor<half, 4, true, int> ave, hipStream_t stream){ kmb_ave<half>(centroids,ave,stream); } } // namespace gpu } // namespace faiss
101279be213a2aec33391fd2ca82995b887204f6.cu
#include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/StaticUtils.h> #include <faiss/impl/FaissAssert.h> #include <faiss/gpu/impl/ComputeModes.cuh> #include <faiss/gpu/utils/ConversionOperators.cuh> #include <faiss/gpu/utils/DeviceDefs.cuh> #include <faiss/gpu/utils/Float16.cuh> #include <faiss/gpu/utils/MathOperators.cuh> #include <faiss/gpu/utils/PtxUtils.cuh> #include <faiss/gpu/utils/Reductions.cuh> #include <algorithm> namespace faiss { namespace gpu { /********************************************* Compute Mode Centroids *********************************************/ template <typename T> __global__ void kmb_ave_kernel(Tensor<T, 5, true, int> centroids, Tensor<T, 4, true, int> ave){ // height,width int h = blockIdx.x; int w = blockIdx.y; int b = blockIdx.z; // get dims to comp indices from thread int nftrs = centroids.getSize(0); int nclusters = centroids.getSize(1); int dim = nftrs; T inv_nclusters = 1./nclusters; // helpers int fIdx,cIdx; T ave_val; // set clusters for (int tIdx = threadIdx.x; tIdx < dim; tIdx += blockDim.x){ fIdx = tIdx % nftrs; ave_val = 0; for (int cIdx = 0; cIdx < nclusters; ++cIdx){ ave_val += centroids[fIdx][cIdx][b][h][w]; } ave[fIdx][b][h][w] = Math<T>::mul(ave_val,inv_nclusters); } } template <typename T> void kmb_ave(Tensor<T, 5, true, int> centroids, Tensor<T, 4, true, int> ave, cudaStream_t stream){ // shapes int nftrs = centroids.getSize(0); int nclusters = centroids.getSize(1); int bBatch = centroids.getSize(2); int hBatch = centroids.getSize(3); int wBatch = centroids.getSize(4); // threads int maxThreads = (int)getMaxThreadsCurrentDevice(); int dim = nftrs; int numThreads = std::min(dim, maxThreads); // launch auto grid = dim3(hBatch,wBatch,bBatch); auto block = dim3(numThreads); // launch kernel kmb_ave_kernel<<<grid,block,0,stream>>>(centroids,ave); // error check CUDA_TEST_ERROR(); } void kmb_ave(Tensor<float, 5, true, int> centroids, Tensor<float, 4, true, int> ave, cudaStream_t stream){ kmb_ave<float>(centroids,ave,stream); } void kmb_ave(Tensor<half, 5, true, int> centroids, Tensor<half, 4, true, int> ave, cudaStream_t stream){ kmb_ave<half>(centroids,ave,stream); } } // namespace gpu } // namespace faiss
fdd2a49f4b463a2ab14e670f4ddd06b940329db8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2012-2017 VideoStitch SAS // Copyright (c) 2018 stitchEm #include "gpu/render/geometry.hpp" #include "backend/cuda/gpuKernelDef.h" #include "../deviceBuffer.hpp" #include "../deviceStream.hpp" #include "../surface.hpp" #include "cuda/util.hpp" namespace VideoStitch { namespace Render { #include <backend/common/render/geometry.gpu> template <> Status drawLine(GPU::Surface& dst, int64_t width, int64_t height, float aX, float aY, float bX, float bY, float t, uint32_t color, GPU::Stream stream) { dim3 dimBlock(16, 16, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); hipLaunchKernelGGL(( lineSourceKernel), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get().surface(), (unsigned)width, (unsigned)height, aX, aY, bX, bY, t, color); return CUDA_STATUS; } template <> Status drawLine(GPU::Buffer<uint32_t>& dst, int64_t width, int64_t height, float aX, float aY, float bX, float bY, float t, uint32_t color, GPU::Stream stream) { dim3 dimBlock(16, 16, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); hipLaunchKernelGGL(( lineKernel), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get(), (unsigned)width, (unsigned)height, aX, aY, bX, bY, t, color); return CUDA_STATUS; } template <> Status drawDisk(GPU::Surface& dst, int64_t width, int64_t height, float aX, float aY, float thickness, uint32_t color, GPU::Stream stream) { dim3 threadsPerBlock(16, 16, 1); dim3 blocksPerGrid((unsigned)Cuda::ceilDiv(width, threadsPerBlock.x), (unsigned)Cuda::ceilDiv(height, threadsPerBlock.y), 1); hipLaunchKernelGGL(( diskSourceKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, stream.get(), dst.get().surface(), (unsigned)width, (unsigned)height, (float)aX, (float)aY, thickness, color); return CUDA_STATUS; } template <> Status drawDisk(GPU::Buffer<uint32_t>& dst, int64_t width, int64_t height, float aX, float aY, float thickness, uint32_t color, GPU::Stream stream) { dim3 threadsPerBlock(16, 16, 1); dim3 blocksPerGrid((unsigned)Cuda::ceilDiv(width, threadsPerBlock.x), (unsigned)Cuda::ceilDiv(height, threadsPerBlock.y), 1); hipLaunchKernelGGL(( diskKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, stream.get(), dst.get(), (unsigned)width, (unsigned)height, (float)aX, (float)aY, thickness, color); return CUDA_STATUS; } #define CIRCLE_FN(fnName, kernelName) \ template <> \ Status fnName(GPU::Surface& dst, int64_t width, int64_t height, float centerX, float centerY, float innerSqrRadius, \ float outerSqrRadius, uint32_t color, GPU::Stream stream) { \ dim3 dimBlock(16, 16, 1); \ dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); \ hipLaunchKernelGGL(( kernelName##Source), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get().surface(), (unsigned)width, (unsigned)height, \ centerX, centerY, innerSqrRadius, outerSqrRadius, \ color); \ return CUDA_STATUS; \ } \ template <> \ Status fnName(GPU::Buffer<uint32_t>& dst, int64_t width, int64_t height, float centerX, float centerY, \ float innerSqrRadius, float outerSqrRadius, uint32_t color, GPU::Stream stream) { \ dim3 dimBlock(16, 16, 1); \ dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); \ hipLaunchKernelGGL(( kernelName), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get(), (unsigned)width, (unsigned)height, centerX, centerY, \ innerSqrRadius, outerSqrRadius, color); \ return CUDA_STATUS; \ } CIRCLE_FN(drawCircle, circleKernel) CIRCLE_FN(drawCircleTop, circleTKernel) CIRCLE_FN(drawCircleBottom, circleBKernel) CIRCLE_FN(drawCircleTopRight, circleTRKernel) CIRCLE_FN(drawCircleBottomRight, circleBRKernel) } // namespace Render } // namespace VideoStitch
fdd2a49f4b463a2ab14e670f4ddd06b940329db8.cu
// Copyright (c) 2012-2017 VideoStitch SAS // Copyright (c) 2018 stitchEm #include "gpu/render/geometry.hpp" #include "backend/cuda/gpuKernelDef.h" #include "../deviceBuffer.hpp" #include "../deviceStream.hpp" #include "../surface.hpp" #include "cuda/util.hpp" namespace VideoStitch { namespace Render { #include <backend/common/render/geometry.gpu> template <> Status drawLine(GPU::Surface& dst, int64_t width, int64_t height, float aX, float aY, float bX, float bY, float t, uint32_t color, GPU::Stream stream) { dim3 dimBlock(16, 16, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); lineSourceKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get().surface(), (unsigned)width, (unsigned)height, aX, aY, bX, bY, t, color); return CUDA_STATUS; } template <> Status drawLine(GPU::Buffer<uint32_t>& dst, int64_t width, int64_t height, float aX, float aY, float bX, float bY, float t, uint32_t color, GPU::Stream stream) { dim3 dimBlock(16, 16, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); lineKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), (unsigned)width, (unsigned)height, aX, aY, bX, bY, t, color); return CUDA_STATUS; } template <> Status drawDisk(GPU::Surface& dst, int64_t width, int64_t height, float aX, float aY, float thickness, uint32_t color, GPU::Stream stream) { dim3 threadsPerBlock(16, 16, 1); dim3 blocksPerGrid((unsigned)Cuda::ceilDiv(width, threadsPerBlock.x), (unsigned)Cuda::ceilDiv(height, threadsPerBlock.y), 1); diskSourceKernel<<<blocksPerGrid, threadsPerBlock, 0, stream.get()>>>( dst.get().surface(), (unsigned)width, (unsigned)height, (float)aX, (float)aY, thickness, color); return CUDA_STATUS; } template <> Status drawDisk(GPU::Buffer<uint32_t>& dst, int64_t width, int64_t height, float aX, float aY, float thickness, uint32_t color, GPU::Stream stream) { dim3 threadsPerBlock(16, 16, 1); dim3 blocksPerGrid((unsigned)Cuda::ceilDiv(width, threadsPerBlock.x), (unsigned)Cuda::ceilDiv(height, threadsPerBlock.y), 1); diskKernel<<<blocksPerGrid, threadsPerBlock, 0, stream.get()>>>(dst.get(), (unsigned)width, (unsigned)height, (float)aX, (float)aY, thickness, color); return CUDA_STATUS; } #define CIRCLE_FN(fnName, kernelName) \ template <> \ Status fnName(GPU::Surface& dst, int64_t width, int64_t height, float centerX, float centerY, float innerSqrRadius, \ float outerSqrRadius, uint32_t color, GPU::Stream stream) { \ dim3 dimBlock(16, 16, 1); \ dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); \ kernelName##Source<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get().surface(), (unsigned)width, (unsigned)height, \ centerX, centerY, innerSqrRadius, outerSqrRadius, \ color); \ return CUDA_STATUS; \ } \ template <> \ Status fnName(GPU::Buffer<uint32_t>& dst, int64_t width, int64_t height, float centerX, float centerY, \ float innerSqrRadius, float outerSqrRadius, uint32_t color, GPU::Stream stream) { \ dim3 dimBlock(16, 16, 1); \ dim3 dimGrid((unsigned)Cuda::ceilDiv(width, dimBlock.x), (unsigned)Cuda::ceilDiv(height, dimBlock.y), 1); \ kernelName<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), (unsigned)width, (unsigned)height, centerX, centerY, \ innerSqrRadius, outerSqrRadius, color); \ return CUDA_STATUS; \ } CIRCLE_FN(drawCircle, circleKernel) CIRCLE_FN(drawCircleTop, circleTKernel) CIRCLE_FN(drawCircleBottom, circleBKernel) CIRCLE_FN(drawCircleTopRight, circleTRKernel) CIRCLE_FN(drawCircleBottomRight, circleBRKernel) } // namespace Render } // namespace VideoStitch
403e50c3a235a4fc6ebdb5967fb0bf5b60cff7b7.hip
// !!! This is a file automatically generated by hipify!!! // ****************************************** // implicit time stepping implementation of 2D diffusion problem // Ben Cumming, CSCS // ***************************************** // A small benchmark app that solves the 2D fisher equation using second-order // finite differences. // Syntax: ./main nx ny nt t #include <algorithm> #include <iostream> #include <sstream> #include <fstream> #include <cstdio> #include <cmath> #include <cstdlib> #include <cstring> #include <omp.h> #include "data.h" #include "linalg.h" #include "operators.h" #include "stats.h" using namespace data; using namespace linalg; using namespace operators; using namespace stats; // read command line arguments static void readcmdline(Discretization& options, int argc, char* argv[]) { if (argc<5 || argc>6 ) { std::cerr << "Usage: main nx ny nt t\n"; std::cerr << " nx number of gridpoints in x-direction\n"; std::cerr << " ny number of gridpoints in y-direction\n"; std::cerr << " nt number of timesteps\n"; std::cerr << " t total time\n"; std::cerr << " v [optional] turn on verbose output\n"; exit(1); } // read nx options.nx = atoi(argv[1]); if (options.nx < 1) { std::cerr << "nx must be positive integer\n"; exit(-1); } // read ny options.ny = atoi(argv[2]); if (options.ny < 1) { std::cerr << "ny must be positive integer\n"; exit(-1); } options.N = options.nx*options.ny; // read nt options.nt = atoi(argv[3]); if (options.nt < 1) { std::cerr << "nt must be positive integer\n"; exit(-1); } // read total time double t = atof(argv[4]); if (t < 0) { std::cerr << "t must be positive real value\n"; exit(-1); } verbose_output = false; if( argc==6 ) { verbose_output = true; } // compute timestep size options.dt = t / options.nt; // compute the distance between grid points // assume that x dimension has length 1.0 options.dx = 1. / (options.nx - 1); // set alpha, assume diffusion coefficient D is 1 options.alpha = (options.dx * options.dx) / (1. * options.dt); } // ============================================================================== int main(int argc, char* argv[]) { // read command line arguments readcmdline(options, argc, argv); int nx = options.nx; int ny = options.ny; int nt = options.nt; // initialize cuda int device_count; cuda_check_status( hipGetDeviceCount(&device_count) ); if(device_count < 1) { std::cerr << "error: there should be at least one device per node" << std::endl; exit(-1); } cuda_check_status( hipSetDevice(0) ); // get the cublas handle to force cublas initialization outside the main time // stepping loop, to ensure that the timing doesn't count initialization costs auto handle = cublas_handle(); // set iteration parameters int max_cg_iters = 200; int max_newton_iters = 50; double tolerance = 1.e-6; std::cout << "========================================================================" << std::endl; std::cout << " Welcome to mini-stencil!" << std::endl; std::cout << "version :: C++ with CUDA" << std::endl; std::cout << "mesh :: " << options.nx << " * " << options.ny << " dx = " << options.dx << std::endl; std::cout << "time :: " << nt << " time steps from 0 .. " << options.nt*options.dt << std::endl;; std::cout << "iteration :: " << "CG " << max_cg_iters << ", Newton " << max_newton_iters << ", tolerance " << tolerance << std::endl;; std::cout << "========================================================================" << std::endl; // allocate global fields x_new.init(nx,ny); x_old.init(nx,ny); bndN.init(nx,1); bndS.init(nx,1); bndE.init(ny,1); bndW.init(ny,1); Field b(nx,ny); Field deltax(nx,ny); // set dirichlet boundary conditions to 0 all around ss_fill(bndN, 0.); ss_fill(bndS, 0.); ss_fill(bndE, 0.); ss_fill(bndW, 0.); // set the initial condition // a circle of concentration 0.1 centred at (xdim/4, ydim/4) with radius // no larger than 1/8 of both xdim and ydim ss_fill(x_new, 0.); double xc = 1.0 / 4.0; double yc = (ny - 1) * options.dx / 4; double radius = fmin(xc, yc) / 2.0; for (int j = 0; j < ny; j++) { double y = (j - 1) * options.dx; for (int i = 0; i < nx; i++) { double x = (i - 1) * options.dx; if ((x - xc) * (x - xc) + (y - yc) * (y - yc) < radius * radius) x_new[i+nx*j] = 0.1; } } // TODO : ensure that the gpu copy of x_new has the up to date values // that were just created flops_bc = 0; flops_diff = 0; flops_blas1 = 0; iters_cg = 0; iters_newton = 0; // start timer double timespent = -omp_get_wtime(); // main timeloop for (int timestep = 1; timestep <= nt; timestep++) { // set x_new and x_old to be the solution ss_copy(x_old, x_new); double residual; bool converged = false; int it; for (it=0; it<max_newton_iters; it++) { // compute residual : requires both x_new and x_old diffusion(x_new, b); residual = ss_norm2(b); // check for convergence if (residual < tolerance) { converged = true; break; } // solve linear system to get -deltax bool cg_converged = false; ss_cg(deltax, b, max_cg_iters, tolerance, cg_converged); // check that the CG solver converged if (!cg_converged) break; // update solution ss_axpy(x_new, -1.0, deltax); } iters_newton += it+1; // output some statistics if (converged && verbose_output) { std::cout << "step " << timestep << " required " << it << " iterations for residual " << residual << std::endl; } if (!converged) { std::cerr << "step " << timestep << " ERROR : nonlinear iterations failed to converge" << std::endl;; break; } } // get times timespent += omp_get_wtime(); //////////////////////////////////////////////////////////////////// // write final solution to BOV file for visualization //////////////////////////////////////////////////////////////////// // binary data FILE* output = fopen("output.bin", "w"); x_new.update_host(); fwrite(x_new.host_data(), sizeof(double), nx * ny, output); fclose(output); // meta data std::ofstream fid("output.bov"); fid << "TIME: 0.0" << std::endl; fid << "DATA_FILE: output.bin" << std::endl; fid << "DATA_SIZE: " << options.nx << ", " << options.ny << ", 1" << std::endl;; fid << "DATA_FORMAT: DOUBLE" << std::endl; fid << "VARIABLE: phi" << std::endl; fid << "DATA_ENDIAN: LITTLE" << std::endl; fid << "CENTERING: nodal" << std::endl; fid << "BRICK_SIZE: 1.0 " << (options.ny-1)*options.dx << " 1.0" << std::endl; // print table sumarizing results std::cout << "--------------------------------------------------------------------------------" << std::endl; std::cout << "simulation took " << timespent << " seconds" << std::endl; std::cout << int(iters_cg) << " conjugate gradient iterations, at rate of " << float(iters_cg)/timespent << " iters/second" << std::endl; std::cout << iters_newton << " newton iterations" << std::endl; std::cout << "--------------------------------------------------------------------------------" << std::endl; std::cout << "Goodbye!" << std::endl; return 0; }
403e50c3a235a4fc6ebdb5967fb0bf5b60cff7b7.cu
// ****************************************** // implicit time stepping implementation of 2D diffusion problem // Ben Cumming, CSCS // ***************************************** // A small benchmark app that solves the 2D fisher equation using second-order // finite differences. // Syntax: ./main nx ny nt t #include <algorithm> #include <iostream> #include <sstream> #include <fstream> #include <cstdio> #include <cmath> #include <cstdlib> #include <cstring> #include <omp.h> #include "data.h" #include "linalg.h" #include "operators.h" #include "stats.h" using namespace data; using namespace linalg; using namespace operators; using namespace stats; // read command line arguments static void readcmdline(Discretization& options, int argc, char* argv[]) { if (argc<5 || argc>6 ) { std::cerr << "Usage: main nx ny nt t\n"; std::cerr << " nx number of gridpoints in x-direction\n"; std::cerr << " ny number of gridpoints in y-direction\n"; std::cerr << " nt number of timesteps\n"; std::cerr << " t total time\n"; std::cerr << " v [optional] turn on verbose output\n"; exit(1); } // read nx options.nx = atoi(argv[1]); if (options.nx < 1) { std::cerr << "nx must be positive integer\n"; exit(-1); } // read ny options.ny = atoi(argv[2]); if (options.ny < 1) { std::cerr << "ny must be positive integer\n"; exit(-1); } options.N = options.nx*options.ny; // read nt options.nt = atoi(argv[3]); if (options.nt < 1) { std::cerr << "nt must be positive integer\n"; exit(-1); } // read total time double t = atof(argv[4]); if (t < 0) { std::cerr << "t must be positive real value\n"; exit(-1); } verbose_output = false; if( argc==6 ) { verbose_output = true; } // compute timestep size options.dt = t / options.nt; // compute the distance between grid points // assume that x dimension has length 1.0 options.dx = 1. / (options.nx - 1); // set alpha, assume diffusion coefficient D is 1 options.alpha = (options.dx * options.dx) / (1. * options.dt); } // ============================================================================== int main(int argc, char* argv[]) { // read command line arguments readcmdline(options, argc, argv); int nx = options.nx; int ny = options.ny; int nt = options.nt; // initialize cuda int device_count; cuda_check_status( cudaGetDeviceCount(&device_count) ); if(device_count < 1) { std::cerr << "error: there should be at least one device per node" << std::endl; exit(-1); } cuda_check_status( cudaSetDevice(0) ); // get the cublas handle to force cublas initialization outside the main time // stepping loop, to ensure that the timing doesn't count initialization costs auto handle = cublas_handle(); // set iteration parameters int max_cg_iters = 200; int max_newton_iters = 50; double tolerance = 1.e-6; std::cout << "========================================================================" << std::endl; std::cout << " Welcome to mini-stencil!" << std::endl; std::cout << "version :: C++ with CUDA" << std::endl; std::cout << "mesh :: " << options.nx << " * " << options.ny << " dx = " << options.dx << std::endl; std::cout << "time :: " << nt << " time steps from 0 .. " << options.nt*options.dt << std::endl;; std::cout << "iteration :: " << "CG " << max_cg_iters << ", Newton " << max_newton_iters << ", tolerance " << tolerance << std::endl;; std::cout << "========================================================================" << std::endl; // allocate global fields x_new.init(nx,ny); x_old.init(nx,ny); bndN.init(nx,1); bndS.init(nx,1); bndE.init(ny,1); bndW.init(ny,1); Field b(nx,ny); Field deltax(nx,ny); // set dirichlet boundary conditions to 0 all around ss_fill(bndN, 0.); ss_fill(bndS, 0.); ss_fill(bndE, 0.); ss_fill(bndW, 0.); // set the initial condition // a circle of concentration 0.1 centred at (xdim/4, ydim/4) with radius // no larger than 1/8 of both xdim and ydim ss_fill(x_new, 0.); double xc = 1.0 / 4.0; double yc = (ny - 1) * options.dx / 4; double radius = fmin(xc, yc) / 2.0; for (int j = 0; j < ny; j++) { double y = (j - 1) * options.dx; for (int i = 0; i < nx; i++) { double x = (i - 1) * options.dx; if ((x - xc) * (x - xc) + (y - yc) * (y - yc) < radius * radius) x_new[i+nx*j] = 0.1; } } // TODO : ensure that the gpu copy of x_new has the up to date values // that were just created flops_bc = 0; flops_diff = 0; flops_blas1 = 0; iters_cg = 0; iters_newton = 0; // start timer double timespent = -omp_get_wtime(); // main timeloop for (int timestep = 1; timestep <= nt; timestep++) { // set x_new and x_old to be the solution ss_copy(x_old, x_new); double residual; bool converged = false; int it; for (it=0; it<max_newton_iters; it++) { // compute residual : requires both x_new and x_old diffusion(x_new, b); residual = ss_norm2(b); // check for convergence if (residual < tolerance) { converged = true; break; } // solve linear system to get -deltax bool cg_converged = false; ss_cg(deltax, b, max_cg_iters, tolerance, cg_converged); // check that the CG solver converged if (!cg_converged) break; // update solution ss_axpy(x_new, -1.0, deltax); } iters_newton += it+1; // output some statistics if (converged && verbose_output) { std::cout << "step " << timestep << " required " << it << " iterations for residual " << residual << std::endl; } if (!converged) { std::cerr << "step " << timestep << " ERROR : nonlinear iterations failed to converge" << std::endl;; break; } } // get times timespent += omp_get_wtime(); //////////////////////////////////////////////////////////////////// // write final solution to BOV file for visualization //////////////////////////////////////////////////////////////////// // binary data FILE* output = fopen("output.bin", "w"); x_new.update_host(); fwrite(x_new.host_data(), sizeof(double), nx * ny, output); fclose(output); // meta data std::ofstream fid("output.bov"); fid << "TIME: 0.0" << std::endl; fid << "DATA_FILE: output.bin" << std::endl; fid << "DATA_SIZE: " << options.nx << ", " << options.ny << ", 1" << std::endl;; fid << "DATA_FORMAT: DOUBLE" << std::endl; fid << "VARIABLE: phi" << std::endl; fid << "DATA_ENDIAN: LITTLE" << std::endl; fid << "CENTERING: nodal" << std::endl; fid << "BRICK_SIZE: 1.0 " << (options.ny-1)*options.dx << " 1.0" << std::endl; // print table sumarizing results std::cout << "--------------------------------------------------------------------------------" << std::endl; std::cout << "simulation took " << timespent << " seconds" << std::endl; std::cout << int(iters_cg) << " conjugate gradient iterations, at rate of " << float(iters_cg)/timespent << " iters/second" << std::endl; std::cout << iters_newton << " newton iterations" << std::endl; std::cout << "--------------------------------------------------------------------------------" << std::endl; std::cout << "Goodbye!" << std::endl; return 0; }
69825f48b78a06d387f213c1e2bcee13e4be3513.hip
// !!! This is a file automatically generated by hipify!!! // System includes #include <stdio.h> #include <assert.h> #include <malloc.h> #include <math.h> #include <stdlib.h> // CUDA runtime #include <hip/hip_runtime.h> // Helper functions and utilities to work with CUDA #include "helper_functions.h" #include "helper_cuda.h" // setting the number of trials in the monte carlo simulation: #ifndef NUMTRIALS #define NUMTRIALS ( 1024*1024 ) #endif #ifndef BLOCKSIZE #define BLOCKSIZE 32 // number of threads per block #endif #define NUMBLOCKS ( NUMTRIALS / BLOCKSIZE ) // ranges for the random numbers: const float XCMIN = 0.0; const float XCMAX = 2.0; const float YCMIN = 0.0; const float YCMAX = 2.0; const float RMIN = 0.5; const float RMAX = 2.0; // function prototypes: float Ranf( float, float ); int Ranf( int, int ); void TimeOfDaySeed( ); __global__ void MonteCarlo( float *Xcs, float *Ycs, float *Rs, int *Hits ) { unsigned int wgNumber = blockIdx.x; unsigned int wgDimension = blockDim.x; unsigned int threadNum = threadIdx.x; unsigned int gid = wgNumber*wgDimension + threadNum; // all the monte carlo stuff goes in here // if we make it all the way through, then Hits[gid] = 1 // randomize the location and radius of the circle: float xc = Xcs[gid]; float yc = Ycs[gid]; float r = Rs[gid]; float tn = tanf( (float)( (M_PI/180.) * 30. ) ); Hits[gid] = 0; // solve for the intersection using the quadratic formula: float a = 1. + tn * tn; float b = -2. * (xc + yc * tn); float c = xc * xc + yc * yc - r * r; float d = b * b - 4. * a * c; // cascading if-statements: // if you used "continue;" in project #1, change to this style because, // if there is no for-loop, then there is nowhere to continue to if (d >= 0) { // hits the circle: // get the first intersection: d = sqrt(d); float t1 = (-b + d) / (2. * a); // time to intersect the circle float t2 = (-b - d) / (2. * a); // time to intersect the circle float tmin = t1 < t2 ? t1 : t2; // only care about the first intersection // If tmin is less than 0., then the circle completely engulfs the laser pointer. (Case B) Continue on to the next trial in the for-loop. if (tmin >= 0) { // where does it intersect the circle? float xcir = tmin; float ycir = tmin * tn; // get the unitized normal vector at the point of intersection: float nx = xcir - xc; float ny = ycir - yc; float nxy = sqrt(nx * nx + ny * ny); nx /= nxy; // unit vector ny /= nxy; // unit vector // get the unitized incoming vector: float inx = xcir - 0.; float iny = ycir - 0.; float in = sqrt(inx * inx + iny * iny); inx /= in; // unit vector iny /= in; // unit vector // get the outgoing (bounced) vector: float dot = inx * nx + iny * ny; // float outx = inx - 2. * nx * dot; // angle of reflection = angle of incidence` float outy = iny - 2. * ny * dot; // angle of reflection = angle of incidence` // find out if it hits the infinite plate: float t = ( 0. - ycir ) / outy; if (t >= 0.) { Hits[gid] = 1; } } } } // main program: int main( int argc, char* argv[ ] ) { TimeOfDaySeed( ); int dev = findCudaDevice(argc, (const char **)argv); // allocate host memory: float *hXcs = new float[NUMTRIALS]; float *hYcs = new float[NUMTRIALS]; float * hRs = new float[NUMTRIALS]; int *hHits = new int[NUMTRIALS]; // fill the random-value arrays: for( int n = 0; n < NUMTRIALS; n++ ) { hXcs[n] = Ranf( XCMIN, XCMAX ); hYcs[n] = Ranf( YCMIN, YCMAX ); hRs[n] = Ranf( RMIN, RMAX ); } // allocate device memory: float *dXcs, *dYcs, *dRs; int *dHits; dim3 dimsXcs( NUMTRIALS, 1, 1 ); dim3 dimsYcs( NUMTRIALS, 1, 1 ); dim3 dimsRs( NUMTRIALS, 1, 1 ); dim3 dimsHits( NUMTRIALS, 1, 1 ); hipError_t status; status = hipMalloc( (void **)(&dXcs), NUMTRIALS*sizeof(float) ); checkCudaErrors( status ); status = hipMalloc( (void **)(&dYcs), NUMTRIALS*sizeof(float) ); checkCudaErrors( status ); status = hipMalloc( (void **)(&dRs), NUMTRIALS*sizeof(float) ); checkCudaErrors( status ); status = hipMalloc( (void **)(&dHits), NUMTRIALS *sizeof(int) ); checkCudaErrors( status ); // copy host memory to the device: status = hipMemcpy( dXcs, hXcs, NUMTRIALS*sizeof(float), hipMemcpyHostToDevice ); checkCudaErrors( status ); status = hipMemcpy( dYcs, hYcs, NUMTRIALS*sizeof(float), hipMemcpyHostToDevice ); checkCudaErrors( status ); status = hipMemcpy( dRs, hRs, NUMTRIALS*sizeof(float), hipMemcpyHostToDevice ); checkCudaErrors( status ); // setup the execution parameters: dim3 threads(BLOCKSIZE, 1, 1 ); dim3 grid(NUMBLOCKS, 1, 1 ); // create and start timer hipDeviceSynchronize( ); // allocate CUDA events that we'll use for timing: hipEvent_t start, stop; status = hipEventCreate( &start ); checkCudaErrors( status ); status = hipEventCreate( &stop ); checkCudaErrors( status ); // record the start event: status = hipEventRecord( start, NULL ); checkCudaErrors( status ); // execute the kernel: hipLaunchKernelGGL(( MonteCarlo), dim3(grid), dim3(threads) , 0, 0, dXcs, dYcs, dRs, dHits ); // record the stop event: status = hipEventRecord( stop, NULL ); checkCudaErrors( status ); // wait for the stop event to complete: status = hipEventSynchronize( stop ); checkCudaErrors( status ); float msecTotal = 0.0f; status = hipEventElapsedTime( &msecTotal, start, stop ); checkCudaErrors( status ); // compute and print the performance double secondsTotal = 0.001 * (double)msecTotal; double trialsPerSecond = (float)NUMTRIALS / secondsTotal; double megaTrialsPerSecond = trialsPerSecond / 1000000.; fprintf(stderr, "%10d\t%10.4lf\t%d\t", NUMTRIALS, megaTrialsPerSecond, BLOCKSIZE); // copy result from the device to the host: status = hipMemcpy( hHits, dHits, NUMTRIALS *sizeof(int), hipMemcpyDeviceToHost ); checkCudaErrors( status ); hipDeviceSynchronize( ); // compute the probability: int numHits = 0; for(int i = 0; i < NUMTRIALS; i++ ) { numHits += hHits[i]; } float probability = 100.f * (float)numHits / (float)NUMTRIALS; fprintf(stderr, "%6.3f\n", probability ); // clean up memory: delete [ ] hXcs; delete [ ] hYcs; delete [ ] hRs; delete [ ] hHits; status = hipFree( dXcs ); status = hipFree( dYcs ); status = hipFree( dRs ); status = hipFree( dHits ); checkCudaErrors( status ); return 0; } float Ranf( float low, float high ) { float r = (float) rand(); // 0 - RAND_MAX float t = r / (float) RAND_MAX; // 0. - 1. return low + t * ( high - low ); } int Ranf( int ilow, int ihigh ) { float low = (float)ilow; float high = ceil( (float)ihigh ); return (int) Ranf(low,high); } void TimeOfDaySeed( ) { struct tm y2k = { 0 }; y2k.tm_hour = 0; y2k.tm_min = 0; y2k.tm_sec = 0; y2k.tm_year = 100; y2k.tm_mon = 0; y2k.tm_mday = 1; time_t timer; time( &timer ); double seconds = difftime( timer, mktime(&y2k) ); unsigned int seed = (unsigned int)( 1000.*seconds ); // milliseconds srand( seed ); }
69825f48b78a06d387f213c1e2bcee13e4be3513.cu
// System includes #include <stdio.h> #include <assert.h> #include <malloc.h> #include <math.h> #include <stdlib.h> // CUDA runtime #include <cuda_runtime.h> // Helper functions and utilities to work with CUDA #include "helper_functions.h" #include "helper_cuda.h" // setting the number of trials in the monte carlo simulation: #ifndef NUMTRIALS #define NUMTRIALS ( 1024*1024 ) #endif #ifndef BLOCKSIZE #define BLOCKSIZE 32 // number of threads per block #endif #define NUMBLOCKS ( NUMTRIALS / BLOCKSIZE ) // ranges for the random numbers: const float XCMIN = 0.0; const float XCMAX = 2.0; const float YCMIN = 0.0; const float YCMAX = 2.0; const float RMIN = 0.5; const float RMAX = 2.0; // function prototypes: float Ranf( float, float ); int Ranf( int, int ); void TimeOfDaySeed( ); __global__ void MonteCarlo( float *Xcs, float *Ycs, float *Rs, int *Hits ) { unsigned int wgNumber = blockIdx.x; unsigned int wgDimension = blockDim.x; unsigned int threadNum = threadIdx.x; unsigned int gid = wgNumber*wgDimension + threadNum; // all the monte carlo stuff goes in here // if we make it all the way through, then Hits[gid] = 1 // randomize the location and radius of the circle: float xc = Xcs[gid]; float yc = Ycs[gid]; float r = Rs[gid]; float tn = tanf( (float)( (M_PI/180.) * 30. ) ); Hits[gid] = 0; // solve for the intersection using the quadratic formula: float a = 1. + tn * tn; float b = -2. * (xc + yc * tn); float c = xc * xc + yc * yc - r * r; float d = b * b - 4. * a * c; // cascading if-statements: // if you used "continue;" in project #1, change to this style because, // if there is no for-loop, then there is nowhere to continue to if (d >= 0) { // hits the circle: // get the first intersection: d = sqrt(d); float t1 = (-b + d) / (2. * a); // time to intersect the circle float t2 = (-b - d) / (2. * a); // time to intersect the circle float tmin = t1 < t2 ? t1 : t2; // only care about the first intersection // If tmin is less than 0., then the circle completely engulfs the laser pointer. (Case B) Continue on to the next trial in the for-loop. if (tmin >= 0) { // where does it intersect the circle? float xcir = tmin; float ycir = tmin * tn; // get the unitized normal vector at the point of intersection: float nx = xcir - xc; float ny = ycir - yc; float nxy = sqrt(nx * nx + ny * ny); nx /= nxy; // unit vector ny /= nxy; // unit vector // get the unitized incoming vector: float inx = xcir - 0.; float iny = ycir - 0.; float in = sqrt(inx * inx + iny * iny); inx /= in; // unit vector iny /= in; // unit vector // get the outgoing (bounced) vector: float dot = inx * nx + iny * ny; // float outx = inx - 2. * nx * dot; // angle of reflection = angle of incidence` float outy = iny - 2. * ny * dot; // angle of reflection = angle of incidence` // find out if it hits the infinite plate: float t = ( 0. - ycir ) / outy; if (t >= 0.) { Hits[gid] = 1; } } } } // main program: int main( int argc, char* argv[ ] ) { TimeOfDaySeed( ); int dev = findCudaDevice(argc, (const char **)argv); // allocate host memory: float *hXcs = new float[NUMTRIALS]; float *hYcs = new float[NUMTRIALS]; float * hRs = new float[NUMTRIALS]; int *hHits = new int[NUMTRIALS]; // fill the random-value arrays: for( int n = 0; n < NUMTRIALS; n++ ) { hXcs[n] = Ranf( XCMIN, XCMAX ); hYcs[n] = Ranf( YCMIN, YCMAX ); hRs[n] = Ranf( RMIN, RMAX ); } // allocate device memory: float *dXcs, *dYcs, *dRs; int *dHits; dim3 dimsXcs( NUMTRIALS, 1, 1 ); dim3 dimsYcs( NUMTRIALS, 1, 1 ); dim3 dimsRs( NUMTRIALS, 1, 1 ); dim3 dimsHits( NUMTRIALS, 1, 1 ); cudaError_t status; status = cudaMalloc( (void **)(&dXcs), NUMTRIALS*sizeof(float) ); checkCudaErrors( status ); status = cudaMalloc( (void **)(&dYcs), NUMTRIALS*sizeof(float) ); checkCudaErrors( status ); status = cudaMalloc( (void **)(&dRs), NUMTRIALS*sizeof(float) ); checkCudaErrors( status ); status = cudaMalloc( (void **)(&dHits), NUMTRIALS *sizeof(int) ); checkCudaErrors( status ); // copy host memory to the device: status = cudaMemcpy( dXcs, hXcs, NUMTRIALS*sizeof(float), cudaMemcpyHostToDevice ); checkCudaErrors( status ); status = cudaMemcpy( dYcs, hYcs, NUMTRIALS*sizeof(float), cudaMemcpyHostToDevice ); checkCudaErrors( status ); status = cudaMemcpy( dRs, hRs, NUMTRIALS*sizeof(float), cudaMemcpyHostToDevice ); checkCudaErrors( status ); // setup the execution parameters: dim3 threads(BLOCKSIZE, 1, 1 ); dim3 grid(NUMBLOCKS, 1, 1 ); // create and start timer cudaDeviceSynchronize( ); // allocate CUDA events that we'll use for timing: cudaEvent_t start, stop; status = cudaEventCreate( &start ); checkCudaErrors( status ); status = cudaEventCreate( &stop ); checkCudaErrors( status ); // record the start event: status = cudaEventRecord( start, NULL ); checkCudaErrors( status ); // execute the kernel: MonteCarlo<<< grid, threads >>>( dXcs, dYcs, dRs, dHits ); // record the stop event: status = cudaEventRecord( stop, NULL ); checkCudaErrors( status ); // wait for the stop event to complete: status = cudaEventSynchronize( stop ); checkCudaErrors( status ); float msecTotal = 0.0f; status = cudaEventElapsedTime( &msecTotal, start, stop ); checkCudaErrors( status ); // compute and print the performance double secondsTotal = 0.001 * (double)msecTotal; double trialsPerSecond = (float)NUMTRIALS / secondsTotal; double megaTrialsPerSecond = trialsPerSecond / 1000000.; fprintf(stderr, "%10d\t%10.4lf\t%d\t", NUMTRIALS, megaTrialsPerSecond, BLOCKSIZE); // copy result from the device to the host: status = cudaMemcpy( hHits, dHits, NUMTRIALS *sizeof(int), cudaMemcpyDeviceToHost ); checkCudaErrors( status ); cudaDeviceSynchronize( ); // compute the probability: int numHits = 0; for(int i = 0; i < NUMTRIALS; i++ ) { numHits += hHits[i]; } float probability = 100.f * (float)numHits / (float)NUMTRIALS; fprintf(stderr, "%6.3f\n", probability ); // clean up memory: delete [ ] hXcs; delete [ ] hYcs; delete [ ] hRs; delete [ ] hHits; status = cudaFree( dXcs ); status = cudaFree( dYcs ); status = cudaFree( dRs ); status = cudaFree( dHits ); checkCudaErrors( status ); return 0; } float Ranf( float low, float high ) { float r = (float) rand(); // 0 - RAND_MAX float t = r / (float) RAND_MAX; // 0. - 1. return low + t * ( high - low ); } int Ranf( int ilow, int ihigh ) { float low = (float)ilow; float high = ceil( (float)ihigh ); return (int) Ranf(low,high); } void TimeOfDaySeed( ) { struct tm y2k = { 0 }; y2k.tm_hour = 0; y2k.tm_min = 0; y2k.tm_sec = 0; y2k.tm_year = 100; y2k.tm_mon = 0; y2k.tm_mday = 1; time_t timer; time( &timer ); double seconds = difftime( timer, mktime(&y2k) ); unsigned int seed = (unsigned int)( 1000.*seconds ); // milliseconds srand( seed ); }
782cc0edad3bd2e1cd1962fefd82622afa529192.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--gridDim=8320 --blockDim=256 #include "common.h" __global__ void modulateAndNormalize_kernel( fComplex *d_Dst, fComplex *d_Src, int dataSize, float c ) { const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= dataSize) { return; } fComplex a = d_Src[i]; fComplex b = d_Dst[i]; mulAndScale(a, b, c); d_Dst[i] = a; }
782cc0edad3bd2e1cd1962fefd82622afa529192.cu
//pass //--gridDim=8320 --blockDim=256 #include "common.h" __global__ void modulateAndNormalize_kernel( fComplex *d_Dst, fComplex *d_Src, int dataSize, float c ) { const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= dataSize) { return; } fComplex a = d_Src[i]; fComplex b = d_Dst[i]; mulAndScale(a, b, c); d_Dst[i] = a; }
d8efd23bac32f9fe57b73485e740faafd9e1adfb.hip
// !!! This is a file automatically generated by hipify!!! /**TODO: Add copyright*/ #if COMPILE_WITH_CUDA #include <TensorBase/ml/TensorAxisGpu.h> using namespace TensorBase; using namespace std; void test_constructorGpuPrimitiveT() { TensorAxisGpuPrimitiveT<int>* ptr = nullptr; TensorAxisGpuPrimitiveT<int>* nullPointer = nullptr; ptr = new TensorAxisGpuPrimitiveT<int>(); gpuCheckNotEqual(ptr, nullPointer); delete ptr; } void test_destructorGpuPrimitiveT() { TensorAxisGpuPrimitiveT<int>* ptr = nullptr; ptr = new TensorAxisGpuPrimitiveT<int>(); delete ptr; } void test_constructor1GpuPrimitiveT() { Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<int, 2> labels(3, 5); labels.setConstant(1); TensorAxisGpuPrimitiveT<int> tensoraxis("1", dimensions, labels); gpuCheckEqual(tensoraxis.getId(), -1); gpuCheckEqual(tensoraxis.getName(), "1"); gpuCheckEqual(tensoraxis.getNDimensions(), 3); gpuCheckEqual(tensoraxis.getNLabels(), 5); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); gpuCheckEqual(tensoraxis.getDimensions()(2), "TensorDimension3"); gpuCheckEqual(tensoraxis.getLabels()(0, 0), 1); gpuCheckEqual(tensoraxis.getLabels()(2, 4), 1); } void test_constructor2GpuPrimitiveT() { Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<int, 2> labels(3, 5); labels.setConstant(1); TensorAxisGpuPrimitiveT<int> tensoraxis("1", 1, 1); gpuCheckEqual(tensoraxis.getId(), -1); gpuCheckEqual(tensoraxis.getName(), "1"); gpuCheckEqual(tensoraxis.getNDimensions(), 1); gpuCheckEqual(tensoraxis.getNLabels(), 1); tensoraxis.setDimensions(dimensions); tensoraxis.setLabels(labels); gpuCheckEqual(tensoraxis.getId(), -1); gpuCheckEqual(tensoraxis.getName(), "1"); gpuCheckEqual(tensoraxis.getNDimensions(), 3); gpuCheckEqual(tensoraxis.getNLabels(), 5); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); gpuCheckEqual(tensoraxis.getDimensions()(2), "TensorDimension3"); gpuCheckEqual(tensoraxis.getLabels()(0, 0), 1); gpuCheckEqual(tensoraxis.getLabels()(2, 4), 1); } void test_gettersAndSettersGpuPrimitiveT() { TensorAxisGpuPrimitiveT<int> tensoraxis; // Check defaults gpuCheckEqual(tensoraxis.getId(), -1); gpuCheckEqual(tensoraxis.getName(), ""); gpuCheckEqual(tensoraxis.getNLabels(), 0); gpuCheckEqual(tensoraxis.getNDimensions(), 0); // Check getters/setters tensoraxis.setId(1); tensoraxis.setName("1"); Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<int, 2> labels(3, 5); labels.setConstant(1); tensoraxis.setDimensionsAndLabels(dimensions, labels); gpuCheckEqual(tensoraxis.getId(), 1); gpuCheckEqual(tensoraxis.getName(), "1"); gpuCheckEqual(tensoraxis.getNDimensions(), 3); gpuCheckEqual(tensoraxis.getNLabels(), 5); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); gpuCheckEqual(tensoraxis.getDimensions()(2), "TensorDimension3"); gpuCheckEqual(tensoraxis.getLabels()(0, 0), 1); gpuCheckEqual(tensoraxis.getLabels()(2, 4), 1); } void test_copyGpuPrimitiveT() { Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<int, 2> labels(3, 5); labels.setConstant(1); TensorAxisGpuPrimitiveT<int> tensoraxis1("1", dimensions, labels); // Initialize the device hipStream_t stream; gpuErrchk(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Test expected auto tensoraxis_copy = tensoraxis1.copyToHost(device); tensoraxis1.syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); gpuCheck(*(tensoraxis_copy.get()) == tensoraxis1); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 5; ++j) { gpuCheckEqual(tensoraxis_copy->getLabels()(i, j), labels(i, j)); } } auto tensoraxis2_copy = tensoraxis1.copyToDevice(device); tensoraxis2_copy->syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); gpuErrchk(hipStreamDestroy(stream)); gpuCheck(*(tensoraxis2_copy.get()) == tensoraxis1); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 5; ++j) { gpuCheckEqual(tensoraxis2_copy->getLabels()(i, j), labels(i, j)); } } } void test_deleteFromAxisGpuPrimitiveT() { // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; Eigen::Tensor<int, 2> labels(n_dimensions, n_labels); int iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j) = iter; ++iter; } } TensorAxisGpuPrimitiveT<int> tensoraxis("1", dimensions, labels); // Setup the selection indices and the expected labels int n_select_labels = 3; Eigen::Tensor<int, 1> indices_values(n_labels); Eigen::Tensor<int, 2> labels_test(n_dimensions, n_select_labels); iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { if (j % 2 == 0) { indices_values(j) = j + 1; labels_test(i, j/2) = iter; } else { indices_values(j) = 0; } ++iter; } } TensorDataGpuPrimitiveT<int, 1> indices(Eigen::array<Eigen::Index, 1>({ n_labels })); indices.setData(indices_values); std::shared_ptr<TensorDataGpuPrimitiveT<int, 1>> indices_ptr = std::make_shared<TensorDataGpuPrimitiveT<int, 1>>(indices); // Initialize the device hipStream_t stream; gpuErrchk(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Test indices_ptr->syncDData(device); tensoraxis.syncDData(device); tensoraxis.deleteFromAxis(indices_ptr, device); tensoraxis.syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); gpuErrchk(hipStreamDestroy(stream)); gpuCheckEqual(tensoraxis.getNDimensions(), n_dimensions); gpuCheckEqual(tensoraxis.getNLabels(), n_select_labels); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_select_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels_test(i, j)); // FIXME } } } void test_appendLabelsToAxis1GpuPrimitiveT() { // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; Eigen::Tensor<int, 2> labels(n_dimensions, n_labels); int iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j) = iter; ++iter; } } TensorAxisGpuPrimitiveT<int> tensoraxis("1", dimensions, labels); // Setup the new labels int n_new_labels = 2; Eigen::Tensor<int, 2> labels_values(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_new_labels })); iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_new_labels; ++j) { labels_values(i, j) = iter; ++iter; } } TensorDataGpuPrimitiveT<int, 2> labels_new(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_new_labels })); labels_new.setData(labels_values); std::shared_ptr<TensorDataGpuPrimitiveT<int, 2>> labels_new_ptr = std::make_shared<TensorDataGpuPrimitiveT<int, 2>>(labels_new); // Initialize the device hipStream_t stream; gpuErrchk(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Test labels_new_ptr->syncDData(device); tensoraxis.syncDData(device); tensoraxis.appendLabelsToAxis(labels_new_ptr, device); tensoraxis.syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); gpuErrchk(hipStreamDestroy(stream)); gpuCheckEqual(tensoraxis.getNDimensions(), n_dimensions); gpuCheckEqual(tensoraxis.getNLabels(), n_labels + n_new_labels); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels(i, j)); } } for (int i = 0; i < n_dimensions; ++i) { for (int j = n_labels; j < tensoraxis.getNLabels(); ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels_values(i, j - n_labels)); } } } void test_appendLabelsToAxis2GpuPrimitiveT() { // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; TensorAxisGpuPrimitiveT<int> tensoraxis("1", n_dimensions, 0); tensoraxis.setDimensions(dimensions); // Setup the new labels Eigen::Tensor<int, 2> labels(n_dimensions, n_labels); int iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j) = iter; ++iter; } } TensorDataGpuPrimitiveT<int, 2> labels_new(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_labels })); labels_new.setData(labels); std::shared_ptr<TensorDataGpuPrimitiveT<int, 2>> labels_new_ptr = std::make_shared<TensorDataGpuPrimitiveT<int, 2>>(labels_new); // Initialize the device hipStream_t stream; gpuErrchk(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Test labels_new_ptr->syncDData(device); tensoraxis.syncDData(device); tensoraxis.appendLabelsToAxis(labels_new_ptr, device); tensoraxis.syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); gpuErrchk(hipStreamDestroy(stream)); gpuCheckEqual(tensoraxis.getNDimensions(), n_dimensions); gpuCheckEqual(tensoraxis.getNLabels(), n_labels); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels(i, j)); } } } void test_makeSortIndicesGpuPrimitiveT() { // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; Eigen::Tensor<int, 2> labels(n_dimensions, n_labels); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j) = j; } } TensorAxisGpuPrimitiveT<int> tensoraxis("1", dimensions, labels); // Initialize the device hipStream_t stream; gpuErrchk(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // setup the sort indices Eigen::Tensor<int, 1> indices_view_values(n_labels); for (int i = 0; i < n_labels; ++i) indices_view_values(i) = i + 1; TensorDataGpuPrimitiveT<int, 1> indices_view(Eigen::array<Eigen::Index, 1>({ n_labels })); indices_view.setData(indices_view_values); std::shared_ptr<TensorData<int, Eigen::GpuDevice, 1>> indices_view_ptr = std::make_shared<TensorDataGpuPrimitiveT<int, 1>>(indices_view); indices_view_ptr->syncDData(device); tensoraxis.syncDData(device); // make the expected indices Eigen::Tensor<int, 2> indices_sort_test(n_dimensions, n_labels); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { indices_sort_test(i, j) = i + j * n_dimensions + 1; } } // test making the sort indices std::shared_ptr<TensorData<int, Eigen::GpuDevice, 2>> indices_sort; tensoraxis.makeSortIndices(indices_view_ptr, indices_sort, device); indices_sort->syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(indices_sort->getData()(i, j), indices_sort_test(i, j)); } } gpuErrchk(hipStreamDestroy(stream)); } void test_sortLabelsGpuPrimitiveT() { // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; Eigen::Tensor<int, 2> labels(n_dimensions, n_labels); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j) = j; } } TensorAxisGpuPrimitiveT<int> tensoraxis("1", dimensions, labels); // Initialize the device hipStream_t stream; gpuErrchk(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // setup the sort indices Eigen::Tensor<int, 1> indices_view_values(n_labels); for (int i = 0; i < n_labels; ++i) indices_view_values(i) = i + 1; TensorDataGpuPrimitiveT<int, 1> indices_view(Eigen::array<Eigen::Index, 1>({ n_labels })); indices_view.setData(indices_view_values); std::shared_ptr<TensorData<int, Eigen::GpuDevice, 1>> indices_view_ptr = std::make_shared<TensorDataGpuPrimitiveT<int, 1>>(indices_view); indices_view_ptr->syncDData(device); tensoraxis.syncDData(device); // test sorting ASC tensoraxis.sortLabels(indices_view_ptr, device); tensoraxis.syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels(i, j)); } } // make the expected labels Eigen::Tensor<int, 2> labels_sort_test(n_dimensions, n_labels); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels_sort_test(i, j) = n_labels - j - 1; } } indices_view_ptr->setDataStatus(true, false); for (int i = 0; i < n_labels; ++i) indices_view_ptr->getData()(i) = n_labels - i; indices_view_ptr->syncDData(device); // test sorting DESC tensoraxis.setDataStatus(false, true); tensoraxis.sortLabels(indices_view_ptr, device); tensoraxis.syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels_sort_test(i, j)); } } gpuErrchk(hipStreamDestroy(stream)); } void test_storeAndLoadLabelsGpuPrimitiveT() { // Setup the axis Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<int, 2> labels(3, 5); labels.setConstant(1); TensorAxisGpuPrimitiveT<int> tensoraxis_io("1", dimensions, labels); // Store the axis data hipStream_t stream; gpuErrchk(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); tensoraxis_io.storeLabelsBinary("axis", device); // Load the axis data TensorAxisGpuPrimitiveT<int> tensoraxis("1", 3, 5); tensoraxis.loadLabelsBinary("axis", device); gpuCheckEqual(tensoraxis.getId(), -1); gpuCheckEqual(tensoraxis.getName(), "1"); gpuCheckEqual(tensoraxis.getNDimensions(), 3); gpuCheckEqual(tensoraxis.getNLabels(), 5); //gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); // Not loaded //gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); // Not loaded //gpuCheckEqual(tensoraxis.getDimensions()(2), "TensorDimension3"); // Not loaded gpuCheckEqual(tensoraxis.getLabels()(0, 0), 1); gpuCheckEqual(tensoraxis.getLabels()(2, 4), 1); gpuErrchk(hipStreamDestroy(stream)); } void test_getLabelsAsStringsGpuPrimitiveT() { // Setup the device hipStream_t stream; gpuErrchk(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Setup the axis Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<int, 2> labels(3, 5); labels.setConstant(1); TensorAxisGpuPrimitiveT<int> tensoraxis("1", dimensions, labels); // Test getLabelsAsString tensoraxis.syncDData(device); std::vector<std::string> labels_str = tensoraxis.getLabelsAsStrings(device); tensoraxis.syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); int iter = 0; for (int i = 0; i < 3; i++) { for (int j = 0; j < 5; j++) { gpuCheckEqual(labels_str.at(iter), std::to_string(tensoraxis.getLabels()(i, j))); } ++iter; } // Using Char Eigen::Tensor<char, 2> labels_char(3, 5); labels_char.setConstant('a'); TensorAxisGpuPrimitiveT<char> tensoraxis_char("1", dimensions, labels_char); // Test getLabelsAsString tensoraxis_char.syncDData(device); std::vector<std::string> labels_char_str = tensoraxis_char.getLabelsAsStrings(device); tensoraxis_char.syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); iter = 0; for (int i = 0; i < 3; i++) { for (int j = 0; j < 5; j++) { gpuCheckEqual(labels_char_str.at(iter), std::to_string(tensoraxis_char.getLabels()(i, j))); } ++iter; } // Use TensorArray8 Eigen::Tensor<TensorArrayGpu8<int>, 2> labels_array(3, 5); labels_array.setConstant(TensorArrayGpu8<int>({ 1,2,3,4,5,6,7,8 })); TensorAxisGpuClassT<TensorArrayGpu8, int> tensoraxis_array("1", dimensions, labels_array); // Test getLabelsAsString tensoraxis_array.syncDData(device); std::vector<std::string> labels_array_str = tensoraxis_array.getLabelsAsStrings(device); tensoraxis_array.syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); iter = 0; for (int i = 0; i < 3; i++) { for (int j = 0; j < 5; j++) { gpuCheckEqual(labels_array_str.at(iter), labels_array(i, j).getTensorArrayAsString()); } ++iter; } gpuErrchk(hipStreamDestroy(stream)); } void test_appendLabelsToAxisFromCsv1GpuPrimitiveT() { // Setup the device hipStream_t stream; gpuErrchk(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; Eigen::Tensor<int, 2> labels(n_dimensions, n_labels); int iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j) = iter; ++iter; } } TensorAxisGpuPrimitiveT<int> tensoraxis("1", dimensions, labels); // Setup the new labels int n_new_labels = 4; Eigen::Tensor<std::string, 2> labels_values(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_new_labels })); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_new_labels; ++j) { if (j < 2) labels_values(i, j) = std::to_string(i + j * n_dimensions + iter); else labels_values(i, j) = std::to_string(i + (j - 2) * n_dimensions + iter); // duplicates } } // Test tensoraxis.syncDData(device); tensoraxis.appendLabelsToAxisFromCsv(labels_values, device); tensoraxis.syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); gpuCheckEqual(tensoraxis.getNDimensions(), n_dimensions); gpuCheckEqual(tensoraxis.getNLabels(), n_labels + 2); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels(i, j)); } } for (int i = 0; i < n_dimensions; ++i) { for (int j = n_labels; j < tensoraxis.getNLabels(); ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), std::stoi(labels_values(i, j - n_labels))); } } gpuErrchk(hipStreamDestroy(stream)); } void test_appendLabelsToAxisFromCsv2GpuPrimitiveT() { // Setup the device hipStream_t stream; gpuErrchk(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; TensorAxisGpuPrimitiveT<int> tensoraxis("1", n_dimensions, 0); tensoraxis.setDimensions(dimensions); // Setup the new labels Eigen::Tensor<std::string, 2> labels_values(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_labels })); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels_values(i, j) = std::to_string(i + j * n_dimensions); } } // Test tensoraxis.syncDData(device); tensoraxis.appendLabelsToAxisFromCsv(labels_values, device); tensoraxis.syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); gpuCheckEqual(tensoraxis.getNDimensions(), n_dimensions); gpuCheckEqual(tensoraxis.getNLabels(), n_labels); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), i + j * n_dimensions); } } gpuErrchk(hipStreamDestroy(stream)); } void test_constructorGpuClassT() { TensorAxisGpuClassT<TensorArrayGpu8, int>* ptr = nullptr; TensorAxisGpuClassT<TensorArrayGpu8, int>* nullPointer = nullptr; ptr = new TensorAxisGpuClassT<TensorArrayGpu8, int>(); gpuCheckNotEqual(ptr, nullPointer); delete ptr; } void test_destructorGpuClassT() { TensorAxisGpuClassT<TensorArrayGpu8, int>* ptr = nullptr; ptr = new TensorAxisGpuClassT<TensorArrayGpu8, int>(); delete ptr; } void test_constructor1GpuClassT() { Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<TensorArrayGpu8<int>, 2> labels(3, 5); labels.setConstant(TensorArrayGpu8<int>({1, 1, 1, 1, 1, 1, 1, 1 })); TensorAxisGpuClassT<TensorArrayGpu8, int> tensoraxis("1", dimensions, labels); gpuCheckEqual(tensoraxis.getId(), -1); gpuCheckEqual(tensoraxis.getName(), "1"); gpuCheckEqual(tensoraxis.getNDimensions(), 3); gpuCheckEqual(tensoraxis.getNLabels(), 5); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); gpuCheckEqual(tensoraxis.getDimensions()(2), "TensorDimension3"); gpuCheckEqual(tensoraxis.getLabels()(0, 0).getTensorArray()(0), 1); gpuCheckEqual(tensoraxis.getLabels()(2, 4).getTensorArray()(0), 1); } void test_constructor2GpuClassT() { Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<TensorArrayGpu8<int>, 2> labels(3, 5); labels.setConstant(TensorArrayGpu8<int>({ 1, 1, 1, 1, 1, 1, 1, 1 })); TensorAxisGpuClassT<TensorArrayGpu8, int> tensoraxis("1", 1, 1); gpuCheckEqual(tensoraxis.getId(), -1); gpuCheckEqual(tensoraxis.getName(), "1"); gpuCheckEqual(tensoraxis.getNDimensions(), 1); gpuCheckEqual(tensoraxis.getNLabels(), 1); tensoraxis.setDimensions(dimensions); tensoraxis.setLabels(labels); gpuCheckEqual(tensoraxis.getId(), -1); gpuCheckEqual(tensoraxis.getName(), "1"); gpuCheckEqual(tensoraxis.getNDimensions(), 3); gpuCheckEqual(tensoraxis.getNLabels(), 5); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); gpuCheckEqual(tensoraxis.getDimensions()(2), "TensorDimension3"); gpuCheckEqual(tensoraxis.getLabels()(0, 0).getTensorArray()(0), 1); gpuCheckEqual(tensoraxis.getLabels()(2, 4).getTensorArray()(0), 1); } void test_gettersAndSettersGpuClassT() { TensorAxisGpuClassT<TensorArrayGpu8, int> tensoraxis; // Check defaults gpuCheckEqual(tensoraxis.getId(), -1); gpuCheckEqual(tensoraxis.getName(), ""); gpuCheckEqual(tensoraxis.getNLabels(), 0); gpuCheckEqual(tensoraxis.getNDimensions(), 0); // Check getters/setters tensoraxis.setId(1); tensoraxis.setName("1"); Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<TensorArrayGpu8<int>, 2> labels(3, 5); labels.setConstant(TensorArrayGpu8<int>({ 1, 1, 1, 1, 1, 1, 1, 1 })); tensoraxis.setDimensionsAndLabels(dimensions, labels); gpuCheckEqual(tensoraxis.getId(), 1); gpuCheckEqual(tensoraxis.getName(), "1"); gpuCheckEqual(tensoraxis.getNDimensions(), 3); gpuCheckEqual(tensoraxis.getNLabels(), 5); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); gpuCheckEqual(tensoraxis.getDimensions()(2), "TensorDimension3"); gpuCheckEqual(tensoraxis.getLabels()(0, 0).getTensorArray()(0), 1); gpuCheckEqual(tensoraxis.getLabels()(2, 4).getTensorArray()(0), 1); } void test_copyGpuClassT() { Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<TensorArrayGpu8<int>, 2> labels(3, 5); labels.setConstant(TensorArrayGpu8<int>({ 1, 1, 1, 1, 1, 1, 1, 1 })); TensorAxisGpuClassT<TensorArrayGpu8, int> tensoraxis1("1", dimensions, labels); // Initialize the device hipStream_t stream; gpuErrchk(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Test expected auto tensoraxis_copy = tensoraxis1.copyToHost(device); tensoraxis1.syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); gpuCheck(*(tensoraxis_copy.get()) == tensoraxis1); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 5; ++j) { gpuCheckEqual(tensoraxis_copy->getLabels()(i, j), labels(i, j)); } } auto tensoraxis2_copy = tensoraxis1.copyToDevice(device); tensoraxis2_copy->syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); gpuErrchk(hipStreamDestroy(stream)); gpuCheck(*(tensoraxis2_copy.get()) == tensoraxis1); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 5; ++j) { gpuCheckEqual(tensoraxis2_copy->getLabels()(i, j), labels(i, j)); } } } void test_deleteFromAxisGpuClassT() { // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; Eigen::Tensor<TensorArrayGpu8<char>, 2> labels(n_dimensions, n_labels); int iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j).setTensorArray(std::to_string(iter)); ++iter; } } TensorAxisGpuClassT<TensorArrayGpu8, char> tensoraxis("1", dimensions, labels); // Setup the selection indices and the expected labels int n_select_labels = 3; Eigen::Tensor<int, 1> indices_values(n_labels); Eigen::Tensor<TensorArrayGpu8<char>, 2> labels_test(n_dimensions, n_select_labels); iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { if (j % 2 == 0) { indices_values(j) = j + 1; labels_test(i, j / 2).setTensorArray(std::to_string(iter)); } else { indices_values(j) = 0; } ++iter; } } TensorDataGpuPrimitiveT<int, 1> indices(Eigen::array<Eigen::Index, 1>({ n_labels })); indices.setData(indices_values); std::shared_ptr<TensorDataGpuPrimitiveT<int, 1>> indices_ptr = std::make_shared<TensorDataGpuPrimitiveT<int, 1>>(indices); // Initialize the device hipStream_t stream; gpuErrchk(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Test indices_ptr->syncDData(device); tensoraxis.syncDData(device); tensoraxis.deleteFromAxis(indices_ptr, device); tensoraxis.syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); gpuErrchk(hipStreamDestroy(stream)); gpuCheckEqual(tensoraxis.getNDimensions(), n_dimensions); gpuCheckEqual(tensoraxis.getNLabels(), n_select_labels); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_select_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels_test(i, j)); } } } void test_appendLabelsToAxis1GpuClassT() { // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; Eigen::Tensor<TensorArrayGpu8<char>, 2> labels(n_dimensions, n_labels); int iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j).setTensorArray(std::to_string(iter)); ++iter; } } TensorAxisGpuClassT<TensorArrayGpu8, char> tensoraxis("1", dimensions, labels); // Setup the new labels int n_new_labels = 2; Eigen::Tensor<TensorArrayGpu8<char>, 2> labels_values(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_new_labels })); iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_new_labels; ++j) { labels_values(i, j).setTensorArray(std::to_string(iter)); ++iter; } } TensorDataGpuClassT<TensorArrayGpu8, char, 2> labels_new(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_new_labels })); labels_new.setData(labels_values); std::shared_ptr<TensorDataGpuClassT<TensorArrayGpu8, char, 2>> labels_new_ptr = std::make_shared<TensorDataGpuClassT<TensorArrayGpu8, char, 2>>(labels_new); // Initialize the device hipStream_t stream; gpuErrchk(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Test labels_new_ptr->syncDData(device); tensoraxis.syncDData(device); tensoraxis.appendLabelsToAxis(labels_new_ptr, device); tensoraxis.syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); gpuErrchk(hipStreamDestroy(stream)); gpuCheckEqual(tensoraxis.getNDimensions(), n_dimensions); gpuCheckEqual(tensoraxis.getNLabels(), n_labels + n_new_labels); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels(i, j)); } } for (int i = 0; i < n_dimensions; ++i) { for (int j = n_labels; j < tensoraxis.getNLabels(); ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels_values(i, j - n_labels)); } } } void test_appendLabelsToAxis2GpuClassT() { // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; TensorAxisGpuClassT<TensorArrayGpu8, char> tensoraxis("1", n_dimensions, 0); tensoraxis.setDimensions(dimensions); // Setup the new labels Eigen::Tensor<TensorArrayGpu8<char>, 2> labels_values(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_labels })); int iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels_values(i, j).setTensorArray(std::to_string(iter)); ++iter; } } TensorDataGpuClassT<TensorArrayGpu8, char, 2> labels_new(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_labels })); labels_new.setData(labels_values); std::shared_ptr<TensorDataGpuClassT<TensorArrayGpu8, char, 2>> labels_new_ptr = std::make_shared<TensorDataGpuClassT<TensorArrayGpu8, char, 2>>(labels_new); // Initialize the device hipStream_t stream; gpuErrchk(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Test labels_new_ptr->syncDData(device); tensoraxis.syncDData(device); tensoraxis.appendLabelsToAxis(labels_new_ptr, device); tensoraxis.syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); gpuErrchk(hipStreamDestroy(stream)); gpuCheckEqual(tensoraxis.getNDimensions(), n_dimensions); gpuCheckEqual(tensoraxis.getNLabels(), n_labels); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels_values(i, j)); } } } void test_makeSortIndicesGpuClassT() { // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; Eigen::Tensor<TensorArrayGpu8<char>, 2> labels(n_dimensions, n_labels); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j).setTensorArray(std::to_string(j)); } } TensorAxisGpuClassT<TensorArrayGpu8, char> tensoraxis("1", dimensions, labels); // Initialize the device hipStream_t stream; gpuErrchk(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // setup the sort indices Eigen::Tensor<int, 1> indices_view_values(n_labels); for (int i = 0; i < n_labels; ++i) indices_view_values(i) = i + 1; TensorDataGpuPrimitiveT<int, 1> indices_view(Eigen::array<Eigen::Index, 1>({ n_labels })); indices_view.setData(indices_view_values); std::shared_ptr<TensorData<int, Eigen::GpuDevice, 1>> indices_view_ptr = std::make_shared<TensorDataGpuPrimitiveT<int, 1>>(indices_view); indices_view_ptr->syncDData(device); tensoraxis.syncDData(device); // make the expected indices Eigen::Tensor<int, 2> indices_sort_test(n_dimensions, n_labels); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { indices_sort_test(i, j) = i + j * n_dimensions + 1; } } // test making the sort indices std::shared_ptr<TensorData<int, Eigen::GpuDevice, 2>> indices_sort; tensoraxis.makeSortIndices(indices_view_ptr, indices_sort, device); indices_sort->syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(indices_sort->getData()(i, j), indices_sort_test(i, j)); } } gpuErrchk(hipStreamDestroy(stream)); } void test_sortLabelsGpuClassT() { // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; Eigen::Tensor<TensorArrayGpu8<char>, 2> labels(n_dimensions, n_labels); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j).setTensorArray(std::to_string(j)); } } TensorAxisGpuClassT<TensorArrayGpu8, char> tensoraxis("1", dimensions, labels); // Initialize the device hipStream_t stream; gpuErrchk(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // setup the sort indices Eigen::Tensor<int, 1> indices_view_values(n_labels); for (int i = 0; i < n_labels; ++i) indices_view_values(i) = i + 1; TensorDataGpuPrimitiveT<int, 1> indices_view(Eigen::array<Eigen::Index, 1>({ n_labels })); indices_view.setData(indices_view_values); std::shared_ptr<TensorData<int, Eigen::GpuDevice, 1>> indices_view_ptr = std::make_shared<TensorDataGpuPrimitiveT<int, 1>>(indices_view); indices_view_ptr->syncDData(device); tensoraxis.syncDData(device); // test sorting ASC tensoraxis.sortLabels(indices_view_ptr, device); tensoraxis.syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels(i, j)); } } // make the expected labels Eigen::Tensor<TensorArrayGpu8<char>, 2> labels_sort_test(n_dimensions, n_labels); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels_sort_test(i, j).setTensorArray(std::to_string(n_labels - j - 1)); } } indices_view_ptr->setDataStatus(true, false); for (int i = 0; i < n_labels; ++i) indices_view_ptr->getData()(i) = n_labels - i; indices_view_ptr->syncDData(device); // test sorting DESC tensoraxis.setDataStatus(false, true); tensoraxis.sortLabels(indices_view_ptr, device); tensoraxis.syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels_sort_test(i, j)); } } gpuErrchk(hipStreamDestroy(stream)); } void test_storeAndLoadLabelsGpuClassT() { // Setup the axis Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<TensorArrayGpu8<int>, 2> labels(3, 5); labels.setConstant(TensorArrayGpu8<int>({ 1, 1, 1, 1, 1, 1, 1, 1 })); TensorAxisGpuClassT<TensorArrayGpu8, int> tensoraxis_io("1", dimensions, labels); // Store the axis data hipStream_t stream; gpuErrchk(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); tensoraxis_io.storeLabelsBinary("axis", device); // Load the axis data TensorAxisGpuClassT<TensorArrayGpu8, int> tensoraxis("1", 3, 5); tensoraxis.loadLabelsBinary("axis", device); gpuCheckEqual(tensoraxis.getId(), -1); gpuCheckEqual(tensoraxis.getName(), "1"); gpuCheckEqual(tensoraxis.getNDimensions(), 3); gpuCheckEqual(tensoraxis.getNLabels(), 5); //gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); // Not loaded //gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); // Not loaded //gpuCheckEqual(tensoraxis.getDimensions()(2), "TensorDimension3"); // Not loaded gpuCheckEqual(tensoraxis.getLabels()(0, 0).getTensorArray()(0), 1); gpuCheckEqual(tensoraxis.getLabels()(2, 4).getTensorArray()(0), 1); gpuErrchk(hipStreamDestroy(stream)); } void test_appendLabelsToAxisFromCsv1GpuClassT() { // Setup the device hipStream_t stream; gpuErrchk(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; Eigen::Tensor<TensorArrayGpu8<char>, 2> labels(n_dimensions, n_labels); int iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j) = TensorArrayGpu8<char>(std::to_string(iter)); ++iter; } } TensorAxisGpuClassT<TensorArrayGpu8, char> tensoraxis("1", dimensions, labels); // Setup the new labels int n_new_labels = 4; Eigen::Tensor<std::string, 2> labels_values(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_new_labels })); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_new_labels; ++j) { if (j < 2) labels_values(i, j) = std::to_string(i + j * n_dimensions + iter); else labels_values(i, j) = std::to_string(i + (j - 2) * n_dimensions + iter); // duplicates } } // Test tensoraxis.syncDData(device); tensoraxis.appendLabelsToAxisFromCsv(labels_values, device); tensoraxis.syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); gpuCheckEqual(tensoraxis.getNDimensions(), n_dimensions); gpuCheckEqual(tensoraxis.getNLabels(), n_labels + 2); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), TensorArrayGpu8<char>(labels(i, j))); } } for (int i = 0; i < n_dimensions; ++i) { for (int j = n_labels; j < tensoraxis.getNLabels(); ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), TensorArrayGpu8<char>(labels_values(i, j - n_labels))); } } gpuErrchk(hipStreamDestroy(stream)); } void test_appendLabelsToAxisFromCsv2GpuClassT() { // Setup the device hipStream_t stream; gpuErrchk(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; TensorAxisGpuClassT<TensorArrayGpu8, char> tensoraxis("1", n_dimensions, 0); tensoraxis.setDimensions(dimensions); // Setup the new labels Eigen::Tensor<std::string, 2> labels_values(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_labels })); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels_values(i, j) = std::to_string(i + j * n_dimensions); } } // Test tensoraxis.syncDData(device); tensoraxis.appendLabelsToAxisFromCsv(labels_values, device); tensoraxis.syncHData(device); gpuErrchk(hipStreamSynchronize(stream)); gpuCheckEqual(tensoraxis.getNDimensions(), n_dimensions); gpuCheckEqual(tensoraxis.getNLabels(), n_labels); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), TensorArrayGpu8<char>(std::to_string(i + j * n_dimensions))); } } gpuErrchk(hipStreamDestroy(stream)); } int main(int argc, char** argv) { gpuErrchk(hipDeviceReset()); test_constructorGpuPrimitiveT(); test_destructorGpuPrimitiveT(); test_constructor1GpuPrimitiveT(); test_constructor2GpuPrimitiveT(); test_gettersAndSettersGpuPrimitiveT(); test_copyGpuPrimitiveT(); test_deleteFromAxisGpuPrimitiveT(); test_appendLabelsToAxis1GpuPrimitiveT(); test_appendLabelsToAxis2GpuPrimitiveT(); test_makeSortIndicesGpuPrimitiveT(); test_sortLabelsGpuPrimitiveT(); test_storeAndLoadLabelsGpuPrimitiveT(); test_appendLabelsToAxisFromCsv1GpuPrimitiveT(); test_appendLabelsToAxisFromCsv2GpuPrimitiveT(); gpuErrchk(hipDeviceReset()); test_constructorGpuClassT(); test_destructorGpuClassT(); test_constructor1GpuClassT(); test_constructor2GpuClassT(); test_gettersAndSettersGpuClassT(); test_copyGpuClassT(); test_deleteFromAxisGpuClassT(); test_appendLabelsToAxis1GpuClassT(); test_appendLabelsToAxis2GpuClassT(); test_makeSortIndicesGpuClassT(); test_sortLabelsGpuClassT(); test_storeAndLoadLabelsGpuClassT(); test_appendLabelsToAxisFromCsv1GpuClassT(); test_appendLabelsToAxisFromCsv2GpuClassT(); return 0; } #endif
d8efd23bac32f9fe57b73485e740faafd9e1adfb.cu
/**TODO: Add copyright*/ #if COMPILE_WITH_CUDA #include <TensorBase/ml/TensorAxisGpu.h> using namespace TensorBase; using namespace std; void test_constructorGpuPrimitiveT() { TensorAxisGpuPrimitiveT<int>* ptr = nullptr; TensorAxisGpuPrimitiveT<int>* nullPointer = nullptr; ptr = new TensorAxisGpuPrimitiveT<int>(); gpuCheckNotEqual(ptr, nullPointer); delete ptr; } void test_destructorGpuPrimitiveT() { TensorAxisGpuPrimitiveT<int>* ptr = nullptr; ptr = new TensorAxisGpuPrimitiveT<int>(); delete ptr; } void test_constructor1GpuPrimitiveT() { Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<int, 2> labels(3, 5); labels.setConstant(1); TensorAxisGpuPrimitiveT<int> tensoraxis("1", dimensions, labels); gpuCheckEqual(tensoraxis.getId(), -1); gpuCheckEqual(tensoraxis.getName(), "1"); gpuCheckEqual(tensoraxis.getNDimensions(), 3); gpuCheckEqual(tensoraxis.getNLabels(), 5); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); gpuCheckEqual(tensoraxis.getDimensions()(2), "TensorDimension3"); gpuCheckEqual(tensoraxis.getLabels()(0, 0), 1); gpuCheckEqual(tensoraxis.getLabels()(2, 4), 1); } void test_constructor2GpuPrimitiveT() { Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<int, 2> labels(3, 5); labels.setConstant(1); TensorAxisGpuPrimitiveT<int> tensoraxis("1", 1, 1); gpuCheckEqual(tensoraxis.getId(), -1); gpuCheckEqual(tensoraxis.getName(), "1"); gpuCheckEqual(tensoraxis.getNDimensions(), 1); gpuCheckEqual(tensoraxis.getNLabels(), 1); tensoraxis.setDimensions(dimensions); tensoraxis.setLabels(labels); gpuCheckEqual(tensoraxis.getId(), -1); gpuCheckEqual(tensoraxis.getName(), "1"); gpuCheckEqual(tensoraxis.getNDimensions(), 3); gpuCheckEqual(tensoraxis.getNLabels(), 5); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); gpuCheckEqual(tensoraxis.getDimensions()(2), "TensorDimension3"); gpuCheckEqual(tensoraxis.getLabels()(0, 0), 1); gpuCheckEqual(tensoraxis.getLabels()(2, 4), 1); } void test_gettersAndSettersGpuPrimitiveT() { TensorAxisGpuPrimitiveT<int> tensoraxis; // Check defaults gpuCheckEqual(tensoraxis.getId(), -1); gpuCheckEqual(tensoraxis.getName(), ""); gpuCheckEqual(tensoraxis.getNLabels(), 0); gpuCheckEqual(tensoraxis.getNDimensions(), 0); // Check getters/setters tensoraxis.setId(1); tensoraxis.setName("1"); Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<int, 2> labels(3, 5); labels.setConstant(1); tensoraxis.setDimensionsAndLabels(dimensions, labels); gpuCheckEqual(tensoraxis.getId(), 1); gpuCheckEqual(tensoraxis.getName(), "1"); gpuCheckEqual(tensoraxis.getNDimensions(), 3); gpuCheckEqual(tensoraxis.getNLabels(), 5); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); gpuCheckEqual(tensoraxis.getDimensions()(2), "TensorDimension3"); gpuCheckEqual(tensoraxis.getLabels()(0, 0), 1); gpuCheckEqual(tensoraxis.getLabels()(2, 4), 1); } void test_copyGpuPrimitiveT() { Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<int, 2> labels(3, 5); labels.setConstant(1); TensorAxisGpuPrimitiveT<int> tensoraxis1("1", dimensions, labels); // Initialize the device cudaStream_t stream; gpuErrchk(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Test expected auto tensoraxis_copy = tensoraxis1.copyToHost(device); tensoraxis1.syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); gpuCheck(*(tensoraxis_copy.get()) == tensoraxis1); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 5; ++j) { gpuCheckEqual(tensoraxis_copy->getLabels()(i, j), labels(i, j)); } } auto tensoraxis2_copy = tensoraxis1.copyToDevice(device); tensoraxis2_copy->syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); gpuErrchk(cudaStreamDestroy(stream)); gpuCheck(*(tensoraxis2_copy.get()) == tensoraxis1); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 5; ++j) { gpuCheckEqual(tensoraxis2_copy->getLabels()(i, j), labels(i, j)); } } } void test_deleteFromAxisGpuPrimitiveT() { // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; Eigen::Tensor<int, 2> labels(n_dimensions, n_labels); int iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j) = iter; ++iter; } } TensorAxisGpuPrimitiveT<int> tensoraxis("1", dimensions, labels); // Setup the selection indices and the expected labels int n_select_labels = 3; Eigen::Tensor<int, 1> indices_values(n_labels); Eigen::Tensor<int, 2> labels_test(n_dimensions, n_select_labels); iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { if (j % 2 == 0) { indices_values(j) = j + 1; labels_test(i, j/2) = iter; } else { indices_values(j) = 0; } ++iter; } } TensorDataGpuPrimitiveT<int, 1> indices(Eigen::array<Eigen::Index, 1>({ n_labels })); indices.setData(indices_values); std::shared_ptr<TensorDataGpuPrimitiveT<int, 1>> indices_ptr = std::make_shared<TensorDataGpuPrimitiveT<int, 1>>(indices); // Initialize the device cudaStream_t stream; gpuErrchk(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Test indices_ptr->syncDData(device); tensoraxis.syncDData(device); tensoraxis.deleteFromAxis(indices_ptr, device); tensoraxis.syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); gpuErrchk(cudaStreamDestroy(stream)); gpuCheckEqual(tensoraxis.getNDimensions(), n_dimensions); gpuCheckEqual(tensoraxis.getNLabels(), n_select_labels); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_select_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels_test(i, j)); // FIXME } } } void test_appendLabelsToAxis1GpuPrimitiveT() { // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; Eigen::Tensor<int, 2> labels(n_dimensions, n_labels); int iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j) = iter; ++iter; } } TensorAxisGpuPrimitiveT<int> tensoraxis("1", dimensions, labels); // Setup the new labels int n_new_labels = 2; Eigen::Tensor<int, 2> labels_values(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_new_labels })); iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_new_labels; ++j) { labels_values(i, j) = iter; ++iter; } } TensorDataGpuPrimitiveT<int, 2> labels_new(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_new_labels })); labels_new.setData(labels_values); std::shared_ptr<TensorDataGpuPrimitiveT<int, 2>> labels_new_ptr = std::make_shared<TensorDataGpuPrimitiveT<int, 2>>(labels_new); // Initialize the device cudaStream_t stream; gpuErrchk(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Test labels_new_ptr->syncDData(device); tensoraxis.syncDData(device); tensoraxis.appendLabelsToAxis(labels_new_ptr, device); tensoraxis.syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); gpuErrchk(cudaStreamDestroy(stream)); gpuCheckEqual(tensoraxis.getNDimensions(), n_dimensions); gpuCheckEqual(tensoraxis.getNLabels(), n_labels + n_new_labels); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels(i, j)); } } for (int i = 0; i < n_dimensions; ++i) { for (int j = n_labels; j < tensoraxis.getNLabels(); ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels_values(i, j - n_labels)); } } } void test_appendLabelsToAxis2GpuPrimitiveT() { // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; TensorAxisGpuPrimitiveT<int> tensoraxis("1", n_dimensions, 0); tensoraxis.setDimensions(dimensions); // Setup the new labels Eigen::Tensor<int, 2> labels(n_dimensions, n_labels); int iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j) = iter; ++iter; } } TensorDataGpuPrimitiveT<int, 2> labels_new(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_labels })); labels_new.setData(labels); std::shared_ptr<TensorDataGpuPrimitiveT<int, 2>> labels_new_ptr = std::make_shared<TensorDataGpuPrimitiveT<int, 2>>(labels_new); // Initialize the device cudaStream_t stream; gpuErrchk(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Test labels_new_ptr->syncDData(device); tensoraxis.syncDData(device); tensoraxis.appendLabelsToAxis(labels_new_ptr, device); tensoraxis.syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); gpuErrchk(cudaStreamDestroy(stream)); gpuCheckEqual(tensoraxis.getNDimensions(), n_dimensions); gpuCheckEqual(tensoraxis.getNLabels(), n_labels); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels(i, j)); } } } void test_makeSortIndicesGpuPrimitiveT() { // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; Eigen::Tensor<int, 2> labels(n_dimensions, n_labels); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j) = j; } } TensorAxisGpuPrimitiveT<int> tensoraxis("1", dimensions, labels); // Initialize the device cudaStream_t stream; gpuErrchk(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // setup the sort indices Eigen::Tensor<int, 1> indices_view_values(n_labels); for (int i = 0; i < n_labels; ++i) indices_view_values(i) = i + 1; TensorDataGpuPrimitiveT<int, 1> indices_view(Eigen::array<Eigen::Index, 1>({ n_labels })); indices_view.setData(indices_view_values); std::shared_ptr<TensorData<int, Eigen::GpuDevice, 1>> indices_view_ptr = std::make_shared<TensorDataGpuPrimitiveT<int, 1>>(indices_view); indices_view_ptr->syncDData(device); tensoraxis.syncDData(device); // make the expected indices Eigen::Tensor<int, 2> indices_sort_test(n_dimensions, n_labels); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { indices_sort_test(i, j) = i + j * n_dimensions + 1; } } // test making the sort indices std::shared_ptr<TensorData<int, Eigen::GpuDevice, 2>> indices_sort; tensoraxis.makeSortIndices(indices_view_ptr, indices_sort, device); indices_sort->syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(indices_sort->getData()(i, j), indices_sort_test(i, j)); } } gpuErrchk(cudaStreamDestroy(stream)); } void test_sortLabelsGpuPrimitiveT() { // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; Eigen::Tensor<int, 2> labels(n_dimensions, n_labels); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j) = j; } } TensorAxisGpuPrimitiveT<int> tensoraxis("1", dimensions, labels); // Initialize the device cudaStream_t stream; gpuErrchk(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // setup the sort indices Eigen::Tensor<int, 1> indices_view_values(n_labels); for (int i = 0; i < n_labels; ++i) indices_view_values(i) = i + 1; TensorDataGpuPrimitiveT<int, 1> indices_view(Eigen::array<Eigen::Index, 1>({ n_labels })); indices_view.setData(indices_view_values); std::shared_ptr<TensorData<int, Eigen::GpuDevice, 1>> indices_view_ptr = std::make_shared<TensorDataGpuPrimitiveT<int, 1>>(indices_view); indices_view_ptr->syncDData(device); tensoraxis.syncDData(device); // test sorting ASC tensoraxis.sortLabels(indices_view_ptr, device); tensoraxis.syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels(i, j)); } } // make the expected labels Eigen::Tensor<int, 2> labels_sort_test(n_dimensions, n_labels); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels_sort_test(i, j) = n_labels - j - 1; } } indices_view_ptr->setDataStatus(true, false); for (int i = 0; i < n_labels; ++i) indices_view_ptr->getData()(i) = n_labels - i; indices_view_ptr->syncDData(device); // test sorting DESC tensoraxis.setDataStatus(false, true); tensoraxis.sortLabels(indices_view_ptr, device); tensoraxis.syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels_sort_test(i, j)); } } gpuErrchk(cudaStreamDestroy(stream)); } void test_storeAndLoadLabelsGpuPrimitiveT() { // Setup the axis Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<int, 2> labels(3, 5); labels.setConstant(1); TensorAxisGpuPrimitiveT<int> tensoraxis_io("1", dimensions, labels); // Store the axis data cudaStream_t stream; gpuErrchk(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); tensoraxis_io.storeLabelsBinary("axis", device); // Load the axis data TensorAxisGpuPrimitiveT<int> tensoraxis("1", 3, 5); tensoraxis.loadLabelsBinary("axis", device); gpuCheckEqual(tensoraxis.getId(), -1); gpuCheckEqual(tensoraxis.getName(), "1"); gpuCheckEqual(tensoraxis.getNDimensions(), 3); gpuCheckEqual(tensoraxis.getNLabels(), 5); //gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); // Not loaded //gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); // Not loaded //gpuCheckEqual(tensoraxis.getDimensions()(2), "TensorDimension3"); // Not loaded gpuCheckEqual(tensoraxis.getLabels()(0, 0), 1); gpuCheckEqual(tensoraxis.getLabels()(2, 4), 1); gpuErrchk(cudaStreamDestroy(stream)); } void test_getLabelsAsStringsGpuPrimitiveT() { // Setup the device cudaStream_t stream; gpuErrchk(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Setup the axis Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<int, 2> labels(3, 5); labels.setConstant(1); TensorAxisGpuPrimitiveT<int> tensoraxis("1", dimensions, labels); // Test getLabelsAsString tensoraxis.syncDData(device); std::vector<std::string> labels_str = tensoraxis.getLabelsAsStrings(device); tensoraxis.syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); int iter = 0; for (int i = 0; i < 3; i++) { for (int j = 0; j < 5; j++) { gpuCheckEqual(labels_str.at(iter), std::to_string(tensoraxis.getLabels()(i, j))); } ++iter; } // Using Char Eigen::Tensor<char, 2> labels_char(3, 5); labels_char.setConstant('a'); TensorAxisGpuPrimitiveT<char> tensoraxis_char("1", dimensions, labels_char); // Test getLabelsAsString tensoraxis_char.syncDData(device); std::vector<std::string> labels_char_str = tensoraxis_char.getLabelsAsStrings(device); tensoraxis_char.syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); iter = 0; for (int i = 0; i < 3; i++) { for (int j = 0; j < 5; j++) { gpuCheckEqual(labels_char_str.at(iter), std::to_string(tensoraxis_char.getLabels()(i, j))); } ++iter; } // Use TensorArray8 Eigen::Tensor<TensorArrayGpu8<int>, 2> labels_array(3, 5); labels_array.setConstant(TensorArrayGpu8<int>({ 1,2,3,4,5,6,7,8 })); TensorAxisGpuClassT<TensorArrayGpu8, int> tensoraxis_array("1", dimensions, labels_array); // Test getLabelsAsString tensoraxis_array.syncDData(device); std::vector<std::string> labels_array_str = tensoraxis_array.getLabelsAsStrings(device); tensoraxis_array.syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); iter = 0; for (int i = 0; i < 3; i++) { for (int j = 0; j < 5; j++) { gpuCheckEqual(labels_array_str.at(iter), labels_array(i, j).getTensorArrayAsString()); } ++iter; } gpuErrchk(cudaStreamDestroy(stream)); } void test_appendLabelsToAxisFromCsv1GpuPrimitiveT() { // Setup the device cudaStream_t stream; gpuErrchk(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; Eigen::Tensor<int, 2> labels(n_dimensions, n_labels); int iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j) = iter; ++iter; } } TensorAxisGpuPrimitiveT<int> tensoraxis("1", dimensions, labels); // Setup the new labels int n_new_labels = 4; Eigen::Tensor<std::string, 2> labels_values(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_new_labels })); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_new_labels; ++j) { if (j < 2) labels_values(i, j) = std::to_string(i + j * n_dimensions + iter); else labels_values(i, j) = std::to_string(i + (j - 2) * n_dimensions + iter); // duplicates } } // Test tensoraxis.syncDData(device); tensoraxis.appendLabelsToAxisFromCsv(labels_values, device); tensoraxis.syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); gpuCheckEqual(tensoraxis.getNDimensions(), n_dimensions); gpuCheckEqual(tensoraxis.getNLabels(), n_labels + 2); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels(i, j)); } } for (int i = 0; i < n_dimensions; ++i) { for (int j = n_labels; j < tensoraxis.getNLabels(); ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), std::stoi(labels_values(i, j - n_labels))); } } gpuErrchk(cudaStreamDestroy(stream)); } void test_appendLabelsToAxisFromCsv2GpuPrimitiveT() { // Setup the device cudaStream_t stream; gpuErrchk(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; TensorAxisGpuPrimitiveT<int> tensoraxis("1", n_dimensions, 0); tensoraxis.setDimensions(dimensions); // Setup the new labels Eigen::Tensor<std::string, 2> labels_values(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_labels })); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels_values(i, j) = std::to_string(i + j * n_dimensions); } } // Test tensoraxis.syncDData(device); tensoraxis.appendLabelsToAxisFromCsv(labels_values, device); tensoraxis.syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); gpuCheckEqual(tensoraxis.getNDimensions(), n_dimensions); gpuCheckEqual(tensoraxis.getNLabels(), n_labels); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), i + j * n_dimensions); } } gpuErrchk(cudaStreamDestroy(stream)); } void test_constructorGpuClassT() { TensorAxisGpuClassT<TensorArrayGpu8, int>* ptr = nullptr; TensorAxisGpuClassT<TensorArrayGpu8, int>* nullPointer = nullptr; ptr = new TensorAxisGpuClassT<TensorArrayGpu8, int>(); gpuCheckNotEqual(ptr, nullPointer); delete ptr; } void test_destructorGpuClassT() { TensorAxisGpuClassT<TensorArrayGpu8, int>* ptr = nullptr; ptr = new TensorAxisGpuClassT<TensorArrayGpu8, int>(); delete ptr; } void test_constructor1GpuClassT() { Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<TensorArrayGpu8<int>, 2> labels(3, 5); labels.setConstant(TensorArrayGpu8<int>({1, 1, 1, 1, 1, 1, 1, 1 })); TensorAxisGpuClassT<TensorArrayGpu8, int> tensoraxis("1", dimensions, labels); gpuCheckEqual(tensoraxis.getId(), -1); gpuCheckEqual(tensoraxis.getName(), "1"); gpuCheckEqual(tensoraxis.getNDimensions(), 3); gpuCheckEqual(tensoraxis.getNLabels(), 5); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); gpuCheckEqual(tensoraxis.getDimensions()(2), "TensorDimension3"); gpuCheckEqual(tensoraxis.getLabels()(0, 0).getTensorArray()(0), 1); gpuCheckEqual(tensoraxis.getLabels()(2, 4).getTensorArray()(0), 1); } void test_constructor2GpuClassT() { Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<TensorArrayGpu8<int>, 2> labels(3, 5); labels.setConstant(TensorArrayGpu8<int>({ 1, 1, 1, 1, 1, 1, 1, 1 })); TensorAxisGpuClassT<TensorArrayGpu8, int> tensoraxis("1", 1, 1); gpuCheckEqual(tensoraxis.getId(), -1); gpuCheckEqual(tensoraxis.getName(), "1"); gpuCheckEqual(tensoraxis.getNDimensions(), 1); gpuCheckEqual(tensoraxis.getNLabels(), 1); tensoraxis.setDimensions(dimensions); tensoraxis.setLabels(labels); gpuCheckEqual(tensoraxis.getId(), -1); gpuCheckEqual(tensoraxis.getName(), "1"); gpuCheckEqual(tensoraxis.getNDimensions(), 3); gpuCheckEqual(tensoraxis.getNLabels(), 5); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); gpuCheckEqual(tensoraxis.getDimensions()(2), "TensorDimension3"); gpuCheckEqual(tensoraxis.getLabels()(0, 0).getTensorArray()(0), 1); gpuCheckEqual(tensoraxis.getLabels()(2, 4).getTensorArray()(0), 1); } void test_gettersAndSettersGpuClassT() { TensorAxisGpuClassT<TensorArrayGpu8, int> tensoraxis; // Check defaults gpuCheckEqual(tensoraxis.getId(), -1); gpuCheckEqual(tensoraxis.getName(), ""); gpuCheckEqual(tensoraxis.getNLabels(), 0); gpuCheckEqual(tensoraxis.getNDimensions(), 0); // Check getters/setters tensoraxis.setId(1); tensoraxis.setName("1"); Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<TensorArrayGpu8<int>, 2> labels(3, 5); labels.setConstant(TensorArrayGpu8<int>({ 1, 1, 1, 1, 1, 1, 1, 1 })); tensoraxis.setDimensionsAndLabels(dimensions, labels); gpuCheckEqual(tensoraxis.getId(), 1); gpuCheckEqual(tensoraxis.getName(), "1"); gpuCheckEqual(tensoraxis.getNDimensions(), 3); gpuCheckEqual(tensoraxis.getNLabels(), 5); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); gpuCheckEqual(tensoraxis.getDimensions()(2), "TensorDimension3"); gpuCheckEqual(tensoraxis.getLabels()(0, 0).getTensorArray()(0), 1); gpuCheckEqual(tensoraxis.getLabels()(2, 4).getTensorArray()(0), 1); } void test_copyGpuClassT() { Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<TensorArrayGpu8<int>, 2> labels(3, 5); labels.setConstant(TensorArrayGpu8<int>({ 1, 1, 1, 1, 1, 1, 1, 1 })); TensorAxisGpuClassT<TensorArrayGpu8, int> tensoraxis1("1", dimensions, labels); // Initialize the device cudaStream_t stream; gpuErrchk(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Test expected auto tensoraxis_copy = tensoraxis1.copyToHost(device); tensoraxis1.syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); gpuCheck(*(tensoraxis_copy.get()) == tensoraxis1); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 5; ++j) { gpuCheckEqual(tensoraxis_copy->getLabels()(i, j), labels(i, j)); } } auto tensoraxis2_copy = tensoraxis1.copyToDevice(device); tensoraxis2_copy->syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); gpuErrchk(cudaStreamDestroy(stream)); gpuCheck(*(tensoraxis2_copy.get()) == tensoraxis1); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 5; ++j) { gpuCheckEqual(tensoraxis2_copy->getLabels()(i, j), labels(i, j)); } } } void test_deleteFromAxisGpuClassT() { // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; Eigen::Tensor<TensorArrayGpu8<char>, 2> labels(n_dimensions, n_labels); int iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j).setTensorArray(std::to_string(iter)); ++iter; } } TensorAxisGpuClassT<TensorArrayGpu8, char> tensoraxis("1", dimensions, labels); // Setup the selection indices and the expected labels int n_select_labels = 3; Eigen::Tensor<int, 1> indices_values(n_labels); Eigen::Tensor<TensorArrayGpu8<char>, 2> labels_test(n_dimensions, n_select_labels); iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { if (j % 2 == 0) { indices_values(j) = j + 1; labels_test(i, j / 2).setTensorArray(std::to_string(iter)); } else { indices_values(j) = 0; } ++iter; } } TensorDataGpuPrimitiveT<int, 1> indices(Eigen::array<Eigen::Index, 1>({ n_labels })); indices.setData(indices_values); std::shared_ptr<TensorDataGpuPrimitiveT<int, 1>> indices_ptr = std::make_shared<TensorDataGpuPrimitiveT<int, 1>>(indices); // Initialize the device cudaStream_t stream; gpuErrchk(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Test indices_ptr->syncDData(device); tensoraxis.syncDData(device); tensoraxis.deleteFromAxis(indices_ptr, device); tensoraxis.syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); gpuErrchk(cudaStreamDestroy(stream)); gpuCheckEqual(tensoraxis.getNDimensions(), n_dimensions); gpuCheckEqual(tensoraxis.getNLabels(), n_select_labels); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_select_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels_test(i, j)); } } } void test_appendLabelsToAxis1GpuClassT() { // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; Eigen::Tensor<TensorArrayGpu8<char>, 2> labels(n_dimensions, n_labels); int iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j).setTensorArray(std::to_string(iter)); ++iter; } } TensorAxisGpuClassT<TensorArrayGpu8, char> tensoraxis("1", dimensions, labels); // Setup the new labels int n_new_labels = 2; Eigen::Tensor<TensorArrayGpu8<char>, 2> labels_values(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_new_labels })); iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_new_labels; ++j) { labels_values(i, j).setTensorArray(std::to_string(iter)); ++iter; } } TensorDataGpuClassT<TensorArrayGpu8, char, 2> labels_new(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_new_labels })); labels_new.setData(labels_values); std::shared_ptr<TensorDataGpuClassT<TensorArrayGpu8, char, 2>> labels_new_ptr = std::make_shared<TensorDataGpuClassT<TensorArrayGpu8, char, 2>>(labels_new); // Initialize the device cudaStream_t stream; gpuErrchk(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Test labels_new_ptr->syncDData(device); tensoraxis.syncDData(device); tensoraxis.appendLabelsToAxis(labels_new_ptr, device); tensoraxis.syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); gpuErrchk(cudaStreamDestroy(stream)); gpuCheckEqual(tensoraxis.getNDimensions(), n_dimensions); gpuCheckEqual(tensoraxis.getNLabels(), n_labels + n_new_labels); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels(i, j)); } } for (int i = 0; i < n_dimensions; ++i) { for (int j = n_labels; j < tensoraxis.getNLabels(); ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels_values(i, j - n_labels)); } } } void test_appendLabelsToAxis2GpuClassT() { // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; TensorAxisGpuClassT<TensorArrayGpu8, char> tensoraxis("1", n_dimensions, 0); tensoraxis.setDimensions(dimensions); // Setup the new labels Eigen::Tensor<TensorArrayGpu8<char>, 2> labels_values(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_labels })); int iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels_values(i, j).setTensorArray(std::to_string(iter)); ++iter; } } TensorDataGpuClassT<TensorArrayGpu8, char, 2> labels_new(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_labels })); labels_new.setData(labels_values); std::shared_ptr<TensorDataGpuClassT<TensorArrayGpu8, char, 2>> labels_new_ptr = std::make_shared<TensorDataGpuClassT<TensorArrayGpu8, char, 2>>(labels_new); // Initialize the device cudaStream_t stream; gpuErrchk(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Test labels_new_ptr->syncDData(device); tensoraxis.syncDData(device); tensoraxis.appendLabelsToAxis(labels_new_ptr, device); tensoraxis.syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); gpuErrchk(cudaStreamDestroy(stream)); gpuCheckEqual(tensoraxis.getNDimensions(), n_dimensions); gpuCheckEqual(tensoraxis.getNLabels(), n_labels); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels_values(i, j)); } } } void test_makeSortIndicesGpuClassT() { // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; Eigen::Tensor<TensorArrayGpu8<char>, 2> labels(n_dimensions, n_labels); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j).setTensorArray(std::to_string(j)); } } TensorAxisGpuClassT<TensorArrayGpu8, char> tensoraxis("1", dimensions, labels); // Initialize the device cudaStream_t stream; gpuErrchk(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // setup the sort indices Eigen::Tensor<int, 1> indices_view_values(n_labels); for (int i = 0; i < n_labels; ++i) indices_view_values(i) = i + 1; TensorDataGpuPrimitiveT<int, 1> indices_view(Eigen::array<Eigen::Index, 1>({ n_labels })); indices_view.setData(indices_view_values); std::shared_ptr<TensorData<int, Eigen::GpuDevice, 1>> indices_view_ptr = std::make_shared<TensorDataGpuPrimitiveT<int, 1>>(indices_view); indices_view_ptr->syncDData(device); tensoraxis.syncDData(device); // make the expected indices Eigen::Tensor<int, 2> indices_sort_test(n_dimensions, n_labels); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { indices_sort_test(i, j) = i + j * n_dimensions + 1; } } // test making the sort indices std::shared_ptr<TensorData<int, Eigen::GpuDevice, 2>> indices_sort; tensoraxis.makeSortIndices(indices_view_ptr, indices_sort, device); indices_sort->syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(indices_sort->getData()(i, j), indices_sort_test(i, j)); } } gpuErrchk(cudaStreamDestroy(stream)); } void test_sortLabelsGpuClassT() { // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; Eigen::Tensor<TensorArrayGpu8<char>, 2> labels(n_dimensions, n_labels); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j).setTensorArray(std::to_string(j)); } } TensorAxisGpuClassT<TensorArrayGpu8, char> tensoraxis("1", dimensions, labels); // Initialize the device cudaStream_t stream; gpuErrchk(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // setup the sort indices Eigen::Tensor<int, 1> indices_view_values(n_labels); for (int i = 0; i < n_labels; ++i) indices_view_values(i) = i + 1; TensorDataGpuPrimitiveT<int, 1> indices_view(Eigen::array<Eigen::Index, 1>({ n_labels })); indices_view.setData(indices_view_values); std::shared_ptr<TensorData<int, Eigen::GpuDevice, 1>> indices_view_ptr = std::make_shared<TensorDataGpuPrimitiveT<int, 1>>(indices_view); indices_view_ptr->syncDData(device); tensoraxis.syncDData(device); // test sorting ASC tensoraxis.sortLabels(indices_view_ptr, device); tensoraxis.syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels(i, j)); } } // make the expected labels Eigen::Tensor<TensorArrayGpu8<char>, 2> labels_sort_test(n_dimensions, n_labels); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels_sort_test(i, j).setTensorArray(std::to_string(n_labels - j - 1)); } } indices_view_ptr->setDataStatus(true, false); for (int i = 0; i < n_labels; ++i) indices_view_ptr->getData()(i) = n_labels - i; indices_view_ptr->syncDData(device); // test sorting DESC tensoraxis.setDataStatus(false, true); tensoraxis.sortLabels(indices_view_ptr, device); tensoraxis.syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), labels_sort_test(i, j)); } } gpuErrchk(cudaStreamDestroy(stream)); } void test_storeAndLoadLabelsGpuClassT() { // Setup the axis Eigen::Tensor<std::string, 1> dimensions(3); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; dimensions(2) = "TensorDimension3"; Eigen::Tensor<TensorArrayGpu8<int>, 2> labels(3, 5); labels.setConstant(TensorArrayGpu8<int>({ 1, 1, 1, 1, 1, 1, 1, 1 })); TensorAxisGpuClassT<TensorArrayGpu8, int> tensoraxis_io("1", dimensions, labels); // Store the axis data cudaStream_t stream; gpuErrchk(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); tensoraxis_io.storeLabelsBinary("axis", device); // Load the axis data TensorAxisGpuClassT<TensorArrayGpu8, int> tensoraxis("1", 3, 5); tensoraxis.loadLabelsBinary("axis", device); gpuCheckEqual(tensoraxis.getId(), -1); gpuCheckEqual(tensoraxis.getName(), "1"); gpuCheckEqual(tensoraxis.getNDimensions(), 3); gpuCheckEqual(tensoraxis.getNLabels(), 5); //gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); // Not loaded //gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); // Not loaded //gpuCheckEqual(tensoraxis.getDimensions()(2), "TensorDimension3"); // Not loaded gpuCheckEqual(tensoraxis.getLabels()(0, 0).getTensorArray()(0), 1); gpuCheckEqual(tensoraxis.getLabels()(2, 4).getTensorArray()(0), 1); gpuErrchk(cudaStreamDestroy(stream)); } void test_appendLabelsToAxisFromCsv1GpuClassT() { // Setup the device cudaStream_t stream; gpuErrchk(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; Eigen::Tensor<TensorArrayGpu8<char>, 2> labels(n_dimensions, n_labels); int iter = 0; for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels(i, j) = TensorArrayGpu8<char>(std::to_string(iter)); ++iter; } } TensorAxisGpuClassT<TensorArrayGpu8, char> tensoraxis("1", dimensions, labels); // Setup the new labels int n_new_labels = 4; Eigen::Tensor<std::string, 2> labels_values(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_new_labels })); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_new_labels; ++j) { if (j < 2) labels_values(i, j) = std::to_string(i + j * n_dimensions + iter); else labels_values(i, j) = std::to_string(i + (j - 2) * n_dimensions + iter); // duplicates } } // Test tensoraxis.syncDData(device); tensoraxis.appendLabelsToAxisFromCsv(labels_values, device); tensoraxis.syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); gpuCheckEqual(tensoraxis.getNDimensions(), n_dimensions); gpuCheckEqual(tensoraxis.getNLabels(), n_labels + 2); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), TensorArrayGpu8<char>(labels(i, j))); } } for (int i = 0; i < n_dimensions; ++i) { for (int j = n_labels; j < tensoraxis.getNLabels(); ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), TensorArrayGpu8<char>(labels_values(i, j - n_labels))); } } gpuErrchk(cudaStreamDestroy(stream)); } void test_appendLabelsToAxisFromCsv2GpuClassT() { // Setup the device cudaStream_t stream; gpuErrchk(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); Eigen::GpuStreamDevice stream_device(&stream, 0); Eigen::GpuDevice device(&stream_device); // Setup the axis int n_dimensions = 2, n_labels = 5; Eigen::Tensor<std::string, 1> dimensions(n_dimensions); dimensions(0) = "TensorDimension1"; dimensions(1) = "TensorDimension2"; TensorAxisGpuClassT<TensorArrayGpu8, char> tensoraxis("1", n_dimensions, 0); tensoraxis.setDimensions(dimensions); // Setup the new labels Eigen::Tensor<std::string, 2> labels_values(Eigen::array<Eigen::Index, 2>({ n_dimensions, n_labels })); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { labels_values(i, j) = std::to_string(i + j * n_dimensions); } } // Test tensoraxis.syncDData(device); tensoraxis.appendLabelsToAxisFromCsv(labels_values, device); tensoraxis.syncHData(device); gpuErrchk(cudaStreamSynchronize(stream)); gpuCheckEqual(tensoraxis.getNDimensions(), n_dimensions); gpuCheckEqual(tensoraxis.getNLabels(), n_labels); gpuCheckEqual(tensoraxis.getDimensions()(0), "TensorDimension1"); gpuCheckEqual(tensoraxis.getDimensions()(1), "TensorDimension2"); for (int i = 0; i < n_dimensions; ++i) { for (int j = 0; j < n_labels; ++j) { gpuCheckEqual(tensoraxis.getLabels()(i, j), TensorArrayGpu8<char>(std::to_string(i + j * n_dimensions))); } } gpuErrchk(cudaStreamDestroy(stream)); } int main(int argc, char** argv) { gpuErrchk(cudaDeviceReset()); test_constructorGpuPrimitiveT(); test_destructorGpuPrimitiveT(); test_constructor1GpuPrimitiveT(); test_constructor2GpuPrimitiveT(); test_gettersAndSettersGpuPrimitiveT(); test_copyGpuPrimitiveT(); test_deleteFromAxisGpuPrimitiveT(); test_appendLabelsToAxis1GpuPrimitiveT(); test_appendLabelsToAxis2GpuPrimitiveT(); test_makeSortIndicesGpuPrimitiveT(); test_sortLabelsGpuPrimitiveT(); test_storeAndLoadLabelsGpuPrimitiveT(); test_appendLabelsToAxisFromCsv1GpuPrimitiveT(); test_appendLabelsToAxisFromCsv2GpuPrimitiveT(); gpuErrchk(cudaDeviceReset()); test_constructorGpuClassT(); test_destructorGpuClassT(); test_constructor1GpuClassT(); test_constructor2GpuClassT(); test_gettersAndSettersGpuClassT(); test_copyGpuClassT(); test_deleteFromAxisGpuClassT(); test_appendLabelsToAxis1GpuClassT(); test_appendLabelsToAxis2GpuClassT(); test_makeSortIndicesGpuClassT(); test_sortLabelsGpuClassT(); test_storeAndLoadLabelsGpuClassT(); test_appendLabelsToAxisFromCsv1GpuClassT(); test_appendLabelsToAxisFromCsv2GpuClassT(); return 0; } #endif
7fdc7082a42609a7adef39e20eceeed3c89591e5.hip
// !!! This is a file automatically generated by hipify!!! #ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <ctime> // includes, project // includes, kernels #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define MAX_TILE_SIZE 1024 //////////////////////////////////////////////////////////////////////////////// // declaration, forward double* read_array(const char* filename, int len) { double *x = (double*) malloc(len * sizeof(double)); FILE *fp = fopen(filename, "r"); for (int i = 0; i < len; i++) { fscanf(fp, "%lf", &x[i]); } fclose(fp); return x; } __global__ void computeOnDevice(double* dA,double* dB, double* dC, int nRows, int tileSize, float* incTime) { __shared__ float ds_M[MAX_TILE_SIZE]; __shared__ float ds_N[MAX_TILE_SIZE]; int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y, Row = by * tileSize + ty, Col = bx * tileSize + tx; double Pvalue = 0; for (int m = 0; m < (nRows-1)/tileSize+1; ++m) { if (Row < nRows && m*tileSize+tx < nRows) ds_M[ty * tileSize + tx] = dA[Row*nRows + m*tileSize+tx]; else ds_M[ty * tileSize + tx] = 0; if (Col < nRows && m*tileSize+ty < nRows) ds_N[ty * tileSize + tx] = dB[(m*tileSize+ty)*nRows+Col]; else ds_N[ty * tileSize + tx] = 0; __syncthreads(); for (int k = 0; k < tileSize; ++k) Pvalue += ds_M[ty * tileSize + k] * ds_N[k * tileSize + tx]; __syncthreads(); } if (Row < nRows && Col < nRows) dC[Row*nRows+Col] = Pvalue; return;//Placeholder } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { if(argc!=2) { printf("Usage: ./problem2 N\n"); return 0; } int nRows = 1024; int num_elements = nRows*nRows; int tileSize = atoi(argv[1]); //change this for scaling analysis float incTime=0; // Time for GPU double* hA = read_array("inputA.inp",num_elements); double* hB = read_array("inputB.inp",num_elements); double* hC = (double*) malloc(num_elements * sizeof(double)); dim3 dimGrid((nRows - 1) / tileSize + 1, (nRows - 1) / tileSize + 1, 1); dim3 dimBlock(tileSize, tileSize, 1); double * dA, *dB, *dC; hipError_t error = hipMalloc((void**)&dA, sizeof(double)*num_elements); error = hipMalloc((void**)&dB, sizeof(double)*num_elements); error = hipMalloc((void**)&dC, sizeof(double)*num_elements); hipMemcpy(dA, hA, sizeof(double)*num_elements, hipMemcpyHostToDevice); hipMemcpy(dB, hB, sizeof(double)*num_elements, hipMemcpyHostToDevice); hipEvent_t startEvent_inc, stopEvent_inc; hipEventCreate(&startEvent_inc); hipEventCreate(&stopEvent_inc); hipEventRecord(startEvent_inc,0); // starting timing for inclusive // **===-------- Modify the body of this function -----------===** hipLaunchKernelGGL(( computeOnDevice), dim3(dimGrid), dim3(dimBlock), 0, 0, dA, dB, dC, nRows, tileSize, &incTime); // **===-----------------------------------------------------------===** hipDeviceSynchronize(); hipMemcpy(hC, dC, sizeof(double)*num_elements, hipMemcpyDeviceToHost); hipEventRecord(stopEvent_inc,0); //ending timing for inclusive hipEventSynchronize(stopEvent_inc); hipEventElapsedTime(&incTime, startEvent_inc, stopEvent_inc); printf("%lf\n%f\n%d\n",hC[num_elements - 1],incTime,tileSize); // cleanup memory free(hA); free(hB); free(hC); return 0; }
7fdc7082a42609a7adef39e20eceeed3c89591e5.cu
#ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <ctime> // includes, project // includes, kernels #include <cuda.h> #include <cuda_runtime.h> #define MAX_TILE_SIZE 1024 //////////////////////////////////////////////////////////////////////////////// // declaration, forward double* read_array(const char* filename, int len) { double *x = (double*) malloc(len * sizeof(double)); FILE *fp = fopen(filename, "r"); for (int i = 0; i < len; i++) { fscanf(fp, "%lf", &x[i]); } fclose(fp); return x; } __global__ void computeOnDevice(double* dA,double* dB, double* dC, int nRows, int tileSize, float* incTime) { __shared__ float ds_M[MAX_TILE_SIZE]; __shared__ float ds_N[MAX_TILE_SIZE]; int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y, Row = by * tileSize + ty, Col = bx * tileSize + tx; double Pvalue = 0; for (int m = 0; m < (nRows-1)/tileSize+1; ++m) { if (Row < nRows && m*tileSize+tx < nRows) ds_M[ty * tileSize + tx] = dA[Row*nRows + m*tileSize+tx]; else ds_M[ty * tileSize + tx] = 0; if (Col < nRows && m*tileSize+ty < nRows) ds_N[ty * tileSize + tx] = dB[(m*tileSize+ty)*nRows+Col]; else ds_N[ty * tileSize + tx] = 0; __syncthreads(); for (int k = 0; k < tileSize; ++k) Pvalue += ds_M[ty * tileSize + k] * ds_N[k * tileSize + tx]; __syncthreads(); } if (Row < nRows && Col < nRows) dC[Row*nRows+Col] = Pvalue; return;//Placeholder } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { if(argc!=2) { printf("Usage: ./problem2 N\n"); return 0; } int nRows = 1024; int num_elements = nRows*nRows; int tileSize = atoi(argv[1]); //change this for scaling analysis float incTime=0; // Time for GPU double* hA = read_array("inputA.inp",num_elements); double* hB = read_array("inputB.inp",num_elements); double* hC = (double*) malloc(num_elements * sizeof(double)); dim3 dimGrid((nRows - 1) / tileSize + 1, (nRows - 1) / tileSize + 1, 1); dim3 dimBlock(tileSize, tileSize, 1); double * dA, *dB, *dC; cudaError error = cudaMalloc((void**)&dA, sizeof(double)*num_elements); error = cudaMalloc((void**)&dB, sizeof(double)*num_elements); error = cudaMalloc((void**)&dC, sizeof(double)*num_elements); cudaMemcpy(dA, hA, sizeof(double)*num_elements, cudaMemcpyHostToDevice); cudaMemcpy(dB, hB, sizeof(double)*num_elements, cudaMemcpyHostToDevice); cudaEvent_t startEvent_inc, stopEvent_inc; cudaEventCreate(&startEvent_inc); cudaEventCreate(&stopEvent_inc); cudaEventRecord(startEvent_inc,0); // starting timing for inclusive // **===-------- Modify the body of this function -----------===** computeOnDevice<<<dimGrid, dimBlock>>>(dA, dB, dC, nRows, tileSize, &incTime); // **===-----------------------------------------------------------===** cudaThreadSynchronize(); cudaMemcpy(hC, dC, sizeof(double)*num_elements, cudaMemcpyDeviceToHost); cudaEventRecord(stopEvent_inc,0); //ending timing for inclusive cudaEventSynchronize(stopEvent_inc); cudaEventElapsedTime(&incTime, startEvent_inc, stopEvent_inc); printf("%lf\n%f\n%d\n",hC[num_elements - 1],incTime,tileSize); // cleanup memory free(hA); free(hB); free(hC); return 0; }
9611f460df774392adf8ab618e2fa4955acf6bb9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <hip/hip_runtime_api.h> #define Tile_Width 16 // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } #endif return result; } __global__ void matMul(float* Pd, float* Md, float* Nd, int Width) { float Pvalue = 0.0; int j = blockIdx.x * Tile_Width + threadIdx.x; int i = blockIdx.y * Tile_Width + threadIdx.y; for (int k = 0; k < Width; ++k) { Pvalue += Md[j * Width + k] * Nd[k * Width + i]; } Pd[j * Width + i] = Pvalue; } // Allocates a matrix with random float entries. void randomInit(float* data, int size) { for (int k = 0; k < size; ++k) { data[k] = (float)drand48(); } } int main(int argc, char* argv[]) { if (argc != 3) { fprintf(stderr, "Syntax: %s <matrix size Width> <device id>\n", argv[0]); return EXIT_FAILURE; } int Width = atoi(argv[1]); int devId = atoi(argv[2]); checkCuda( hipSetDevice(devId) ); hipDeviceReset(); // allocate host memory for matrices M and N printf("Allocate host memory for matrices M and N...\n"); float* M = (float*) malloc(Width * Width * sizeof(float)); float* N = (float*) malloc(Width * Width * sizeof(float)); float* P = (float*) malloc(Width * Width * sizeof(float)); // set seed for drand48() srand48(42); // initialize host matrices printf("Initialize host matrices...\n"); randomInit(M, Width*Width); randomInit(N, Width*Width); // allocate device matrices (linearized) printf("Allocate device matrices (linearized)...\n"); float* Md = NULL; float* Nd = NULL; float* Pd = NULL; checkCuda( hipMalloc((void**) &Md, Width * Width * sizeof(float)) ); checkCuda( hipMalloc((void**) &Nd, Width * Width * sizeof(float)) ); checkCuda( hipMalloc((void**) &Pd, Width * Width * sizeof(float)) ); // copy host memory to device checkCuda( hipMemcpy(Md, M, Width*Width*sizeof(float), hipMemcpyHostToDevice) ); checkCuda( hipMemcpy(Nd, N, Width*Width*sizeof(float), hipMemcpyHostToDevice) ); // execute the kernel printf("Execute the kernel...\n"); int GridSize = (Width + Tile_Width-1) / Tile_Width; dim3 gridDim(GridSize, GridSize); dim3 blockDim(Tile_Width, Tile_Width); hipProfilerStart(); hipLaunchKernelGGL(( matMul), dim3(gridDim), dim3(blockDim) , 0, 0, Pd, Md, Nd, Width); hipProfilerStop(); // copy result from device to host checkCuda( hipMemcpy( P, Pd, Width * Width * sizeof(float),hipMemcpyDeviceToHost) ); hipDeviceProp_t prop; checkCuda( hipGetDeviceProperties(&prop, devId) ); printf("Device: %s\n", prop.name); /* print result FILE *ptr_file; ptr_file =fopen("matMul_gpu_globalmem_uncoalesced.out", "w"); if (!ptr_file) return 1; for (int i=0; i < Width; i++){ for (int j=0; j < Width; j++) fprintf(ptr_file,"%6.2f ", P[i * Width + j]); fprintf(ptr_file,"\n"); } fclose(ptr_file);*/ // clean up memory free(M); free(N); free(P); checkCuda( hipFree(Md) ); checkCuda( hipFree(Nd) ); checkCuda( hipFree(Pd) ); return 0; }
9611f460df774392adf8ab618e2fa4955acf6bb9.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <cuda_profiler_api.h> #define Tile_Width 16 // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } #endif return result; } __global__ void matMul(float* Pd, float* Md, float* Nd, int Width) { float Pvalue = 0.0; int j = blockIdx.x * Tile_Width + threadIdx.x; int i = blockIdx.y * Tile_Width + threadIdx.y; for (int k = 0; k < Width; ++k) { Pvalue += Md[j * Width + k] * Nd[k * Width + i]; } Pd[j * Width + i] = Pvalue; } // Allocates a matrix with random float entries. void randomInit(float* data, int size) { for (int k = 0; k < size; ++k) { data[k] = (float)drand48(); } } int main(int argc, char* argv[]) { if (argc != 3) { fprintf(stderr, "Syntax: %s <matrix size Width> <device id>\n", argv[0]); return EXIT_FAILURE; } int Width = atoi(argv[1]); int devId = atoi(argv[2]); checkCuda( cudaSetDevice(devId) ); cudaDeviceReset(); // allocate host memory for matrices M and N printf("Allocate host memory for matrices M and N...\n"); float* M = (float*) malloc(Width * Width * sizeof(float)); float* N = (float*) malloc(Width * Width * sizeof(float)); float* P = (float*) malloc(Width * Width * sizeof(float)); // set seed for drand48() srand48(42); // initialize host matrices printf("Initialize host matrices...\n"); randomInit(M, Width*Width); randomInit(N, Width*Width); // allocate device matrices (linearized) printf("Allocate device matrices (linearized)...\n"); float* Md = NULL; float* Nd = NULL; float* Pd = NULL; checkCuda( cudaMalloc((void**) &Md, Width * Width * sizeof(float)) ); checkCuda( cudaMalloc((void**) &Nd, Width * Width * sizeof(float)) ); checkCuda( cudaMalloc((void**) &Pd, Width * Width * sizeof(float)) ); // copy host memory to device checkCuda( cudaMemcpy(Md, M, Width*Width*sizeof(float), cudaMemcpyHostToDevice) ); checkCuda( cudaMemcpy(Nd, N, Width*Width*sizeof(float), cudaMemcpyHostToDevice) ); // execute the kernel printf("Execute the kernel...\n"); int GridSize = (Width + Tile_Width-1) / Tile_Width; dim3 gridDim(GridSize, GridSize); dim3 blockDim(Tile_Width, Tile_Width); cudaProfilerStart(); matMul<<< gridDim, blockDim >>>(Pd, Md, Nd, Width); cudaProfilerStop(); // copy result from device to host checkCuda( cudaMemcpy( P, Pd, Width * Width * sizeof(float),cudaMemcpyDeviceToHost) ); cudaDeviceProp prop; checkCuda( cudaGetDeviceProperties(&prop, devId) ); printf("Device: %s\n", prop.name); /* print result FILE *ptr_file; ptr_file =fopen("matMul_gpu_globalmem_uncoalesced.out", "w"); if (!ptr_file) return 1; for (int i=0; i < Width; i++){ for (int j=0; j < Width; j++) fprintf(ptr_file,"%6.2f ", P[i * Width + j]); fprintf(ptr_file,"\n"); } fclose(ptr_file);*/ // clean up memory free(M); free(N); free(P); checkCuda( cudaFree(Md) ); checkCuda( cudaFree(Nd) ); checkCuda( cudaFree(Pd) ); return 0; }
1b5bee6132c1dfaddb5e6994d4b575e85fca0411.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utils.h" #include <cstdio> // TODO: see if there's a better way to do all of this // TODO: consider inlining the following two functions __device__ static double add(double x, double y) { return x + y; } __device__ static double max_wrapper(double x, double y) { return fmax(x, y); } template <typename F> __device__ static void parallel_reduce(double* __restrict__ arr, double* __restrict__ result, unsigned n, F func) { unsigned idx = threadIdx.x; __shared__ double* prefix_arr; if(idx == 0) { prefix_arr = new double[n]; memcpy(prefix_arr, arr, sizeof(double) * n); } __syncthreads(); for(unsigned i = 1; i < n; i *= 2) { if(idx >= i) { //prefix_arr[idx] += prefix_arr[idx-i]; prefix_arr[idx] = func(prefix_arr[idx], prefix_arr[idx-i]); } __syncthreads(); } if(idx == 0) { *result = prefix_arr[n-1]; delete[] prefix_arr; } } // works for single block with multiple threads __device__ void prefix_sum(double* __restrict__ arr, double* __restrict__ result, unsigned n) { parallel_reduce(arr, result, n, add); } // works for single block with multiple threads __device__ void parallel_max(double* __restrict__ arr, double* __restrict__ result, unsigned n) { parallel_reduce(arr, result, n, max_wrapper); }
1b5bee6132c1dfaddb5e6994d4b575e85fca0411.cu
#include "utils.h" #include <cstdio> // TODO: see if there's a better way to do all of this // TODO: consider inlining the following two functions __device__ static double add(double x, double y) { return x + y; } __device__ static double max_wrapper(double x, double y) { return fmax(x, y); } template <typename F> __device__ static void parallel_reduce(double* __restrict__ arr, double* __restrict__ result, unsigned n, F func) { unsigned idx = threadIdx.x; __shared__ double* prefix_arr; if(idx == 0) { prefix_arr = new double[n]; memcpy(prefix_arr, arr, sizeof(double) * n); } __syncthreads(); for(unsigned i = 1; i < n; i *= 2) { if(idx >= i) { //prefix_arr[idx] += prefix_arr[idx-i]; prefix_arr[idx] = func(prefix_arr[idx], prefix_arr[idx-i]); } __syncthreads(); } if(idx == 0) { *result = prefix_arr[n-1]; delete[] prefix_arr; } } // works for single block with multiple threads __device__ void prefix_sum(double* __restrict__ arr, double* __restrict__ result, unsigned n) { parallel_reduce(arr, result, n, add); } // works for single block with multiple threads __device__ void parallel_max(double* __restrict__ arr, double* __restrict__ result, unsigned n) { parallel_reduce(arr, result, n, max_wrapper); }
b220cc33c79fb27767adb2978c6ea46b543d1309.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ATen/native/TensorTransformations.h" #include "ATen/hip/detail/IndexUtils.cuh" #include "ATen/NativeFunctions.h" #include "ATen/hip/HIPApplyUtils.cuh" #include "ATen/hip/HIPContext.h" #include <cstddef> #include <vector> namespace at { namespace native { #define AT_APPLY_THREADS_PER_BLOCK 32 * 16 #define AT_APPLY_BLOCKS_PER_SM 4 template <typename scalar_t, typename IndexType> #if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__ __launch_bounds__(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM) #endif __global__ void kernel_pointwise_flip_apply2(const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info, cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info, IndexType N, int flip_dim, IndexType total_dims) { for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) { IndexType dst_offset = 0; if (flip_dim == 0) { // flip 1st dim dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0]; } else { // flip last dim IndexType i = total_dims - 1; dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]); } out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index]; } } template <typename scalar_t> __global__ void flip_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t* flip_dims, int64_t flip_dims_size, int64_t* strides, int64_t* strides_contiguous, int64_t* shape, int64_t total_dims) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index >= N) { return; } int64_t cur_indices = linear_index, rem = 0, dst_offset = 0; for (int64_t i = 0; i < total_dims; i++) { int64_t temp = cur_indices; cur_indices = cur_indices / strides_contiguous[i]; rem = temp - cur_indices * strides_contiguous[i]; // flip the indices if it is in flip_dims for (int64_t j = 0; j < flip_dims_size; j++) { if (i == flip_dims[j]) { cur_indices = shape[i] - 1 - cur_indices; } } dst_offset += cur_indices * strides[i]; cur_indices = rem; } out_tensor[linear_index] = in_tensor[dst_offset]; } // Flip tensor given a list of dims Tensor flip_cuda(const Tensor& self, IntList dims) { auto in_tensor = self; const int64_t flip_dims_size = dims.size(), total_dims = in_tensor.dim(), N = in_tensor.numel(); flip_check_errors(total_dims, flip_dims_size, dims); int64_t block_size = 512; dim3 dim_block(block_size); dim3 dim_grid((N + block_size - 1) / block_size); auto out_tensor = at::empty_like(in_tensor); if (out_tensor.numel() == 0) { return out_tensor; } auto flip_dims = dims.vec(); wrap_all_dims(flip_dims, total_dims); // use kernel_pointwise_flip_apply2 only when to-flip dim is the 1st or last dim, where collapseDims can reduce the amount of work if (flip_dims_size == 1 && in_tensor.is_contiguous() && (flip_dims[0] == 0 || flip_dims[0] == total_dims - 1)) { AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "flip_cuda", [&] { auto in_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(in_tensor); auto out_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(out_tensor); int flip_dim = in_tensor_info.collapseDims(flip_dims[0]); out_tensor_info.collapseDims(flip_dims[0]); hipLaunchKernelGGL(( kernel_pointwise_flip_apply2<scalar_t, int64_t>) , dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), in_tensor_info, out_tensor_info, N, flip_dim, total_dims); }); return out_tensor; } auto flip_dims_t = at::CPU(kLong).tensorFromBlob(flip_dims.data(), {static_cast<int64_t>(flip_dims.size())}); auto shape = in_tensor.sizes().vec(); auto shape_t = at::CPU(kLong).tensorFromBlob(shape.data(), {static_cast<int64_t>(shape.size())}); auto strides = in_tensor.strides().vec(); auto strides_t = at::CPU(kLong).tensorFromBlob(strides.data(), {static_cast<int64_t>(strides.size())}); // stride_contiguous is the stride of non-contiguous tensor after calling contiguous(), // it is used to compute indices for each element in non-contiguous tensor Tensor stride_contiguous = at::zeros({total_dims}, kLong); int64_t* stride_contiguous_d = stride_contiguous.data<int64_t>(); for (int64_t i = total_dims - 1; i >= 0; i--) { if (i == total_dims - 1) { stride_contiguous_d[i] = 1; } else { stride_contiguous_d[i] = std::max<int64_t>(shape[i+1], 1) * stride_contiguous_d[i + 1]; } } AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "flip_cuda", [&] { hipLaunchKernelGGL(( flip_cuda_kernel), dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), in_tensor.data<scalar_t>(), out_tensor.data<scalar_t>(), N, flip_dims_t.toType(CUDA(kLong)).data<int64_t>(), flip_dims_size, strides_t.toType(CUDA(kLong)).data<int64_t>(), stride_contiguous.toType(CUDA(kLong)).data<int64_t>(), shape_t.toType(CUDA(kLong)).data<int64_t>(), total_dims); }); return out_tensor; } template <typename scalar_t> __global__ void roll_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t roll_dim, int64_t shift, int64_t start, int64_t size, int64_t stride, int64_t total_dims) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index >= N) { return; } // roll dim idx is the index of linear_index along the rolling dimension. int64_t roll_dim_idx = linear_index % (stride * size) / stride; // index into the source data to find appropriate value. int64_t source_idx = 0; if( roll_dim_idx >= (size - start) ) { source_idx = linear_index - (shift * stride); } else { source_idx = linear_index + (start * stride); } out_tensor[linear_index] = in_tensor[source_idx]; } // Roll a tensor along a dimension Tensor roll_cuda(const Tensor& self, IntList shifts, IntList dims) { if (dims.size() == 0 && shifts.size() == 1) { auto flattened = self.contiguous().view(self.numel()); return roll_cuda(flattened, shifts[0], 0).view(self.sizes()); } AT_CHECK(shifts.size() == dims.size(), "shifts and dimensions must align"); // todo: support rolling along multiple dimensions as in numpy.roll. AT_CHECK(dims.size() == 1, "only single dimension roll currently supported"); auto in_tensor = self; if(!self.is_contiguous()) { in_tensor = self.contiguous(); } auto out_tensor = at::empty_like(in_tensor); if (out_tensor.numel() == 0) { return out_tensor; } const int64_t N = in_tensor.numel(); const int64_t dim = dims[0]; const int64_t size = in_tensor.size(dim); int64_t start = (size - shifts[0]) % size; // Behavior of % is different in C++ vs Python for negative numbers. This // corrects the difference. if( start < 0 ) start = start + size; dim3 dim_block = cuda::getApplyBlock(); dim3 dim_grid; AT_CHECK(cuda::getApplyGrid(N, dim_grid, in_tensor.get_device()), "unable to get dim grid"); auto total_dims = in_tensor.dim(); AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "roll_cuda", [&] { hipLaunchKernelGGL(( roll_cuda_kernel), dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), in_tensor.data<scalar_t>(), out_tensor.data<scalar_t>(), N, dim, shifts[0], start, size, in_tensor.stride(dim), total_dims); }); return out_tensor; } }} // namespace at::native
b220cc33c79fb27767adb2978c6ea46b543d1309.cu
#include "ATen/native/TensorTransformations.h" #include "ATen/cuda/detail/IndexUtils.cuh" #include "ATen/NativeFunctions.h" #include "ATen/cuda/CUDAApplyUtils.cuh" #include "ATen/cuda/CUDAContext.h" #include <cstddef> #include <vector> namespace at { namespace native { #define AT_APPLY_THREADS_PER_BLOCK 32 * 16 #define AT_APPLY_BLOCKS_PER_SM 4 template <typename scalar_t, typename IndexType> #if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__ __launch_bounds__(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM) #endif __global__ void kernel_pointwise_flip_apply2(const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info, cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info, IndexType N, int flip_dim, IndexType total_dims) { for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) { IndexType dst_offset = 0; if (flip_dim == 0) { // flip 1st dim dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0]; } else { // flip last dim IndexType i = total_dims - 1; dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]); } out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index]; } } template <typename scalar_t> __global__ void flip_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t* flip_dims, int64_t flip_dims_size, int64_t* strides, int64_t* strides_contiguous, int64_t* shape, int64_t total_dims) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index >= N) { return; } int64_t cur_indices = linear_index, rem = 0, dst_offset = 0; for (int64_t i = 0; i < total_dims; i++) { int64_t temp = cur_indices; cur_indices = cur_indices / strides_contiguous[i]; rem = temp - cur_indices * strides_contiguous[i]; // flip the indices if it is in flip_dims for (int64_t j = 0; j < flip_dims_size; j++) { if (i == flip_dims[j]) { cur_indices = shape[i] - 1 - cur_indices; } } dst_offset += cur_indices * strides[i]; cur_indices = rem; } out_tensor[linear_index] = in_tensor[dst_offset]; } // Flip tensor given a list of dims Tensor flip_cuda(const Tensor& self, IntList dims) { auto in_tensor = self; const int64_t flip_dims_size = dims.size(), total_dims = in_tensor.dim(), N = in_tensor.numel(); flip_check_errors(total_dims, flip_dims_size, dims); int64_t block_size = 512; dim3 dim_block(block_size); dim3 dim_grid((N + block_size - 1) / block_size); auto out_tensor = at::empty_like(in_tensor); if (out_tensor.numel() == 0) { return out_tensor; } auto flip_dims = dims.vec(); wrap_all_dims(flip_dims, total_dims); // use kernel_pointwise_flip_apply2 only when to-flip dim is the 1st or last dim, where collapseDims can reduce the amount of work if (flip_dims_size == 1 && in_tensor.is_contiguous() && (flip_dims[0] == 0 || flip_dims[0] == total_dims - 1)) { AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "flip_cuda", [&] { auto in_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(in_tensor); auto out_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(out_tensor); int flip_dim = in_tensor_info.collapseDims(flip_dims[0]); out_tensor_info.collapseDims(flip_dims[0]); kernel_pointwise_flip_apply2<scalar_t, int64_t> <<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( in_tensor_info, out_tensor_info, N, flip_dim, total_dims); }); return out_tensor; } auto flip_dims_t = at::CPU(kLong).tensorFromBlob(flip_dims.data(), {static_cast<int64_t>(flip_dims.size())}); auto shape = in_tensor.sizes().vec(); auto shape_t = at::CPU(kLong).tensorFromBlob(shape.data(), {static_cast<int64_t>(shape.size())}); auto strides = in_tensor.strides().vec(); auto strides_t = at::CPU(kLong).tensorFromBlob(strides.data(), {static_cast<int64_t>(strides.size())}); // stride_contiguous is the stride of non-contiguous tensor after calling contiguous(), // it is used to compute indices for each element in non-contiguous tensor Tensor stride_contiguous = at::zeros({total_dims}, kLong); int64_t* stride_contiguous_d = stride_contiguous.data<int64_t>(); for (int64_t i = total_dims - 1; i >= 0; i--) { if (i == total_dims - 1) { stride_contiguous_d[i] = 1; } else { stride_contiguous_d[i] = std::max<int64_t>(shape[i+1], 1) * stride_contiguous_d[i + 1]; } } AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "flip_cuda", [&] { flip_cuda_kernel<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( in_tensor.data<scalar_t>(), out_tensor.data<scalar_t>(), N, flip_dims_t.toType(CUDA(kLong)).data<int64_t>(), flip_dims_size, strides_t.toType(CUDA(kLong)).data<int64_t>(), stride_contiguous.toType(CUDA(kLong)).data<int64_t>(), shape_t.toType(CUDA(kLong)).data<int64_t>(), total_dims); }); return out_tensor; } template <typename scalar_t> __global__ void roll_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t roll_dim, int64_t shift, int64_t start, int64_t size, int64_t stride, int64_t total_dims) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index >= N) { return; } // roll dim idx is the index of linear_index along the rolling dimension. int64_t roll_dim_idx = linear_index % (stride * size) / stride; // index into the source data to find appropriate value. int64_t source_idx = 0; if( roll_dim_idx >= (size - start) ) { source_idx = linear_index - (shift * stride); } else { source_idx = linear_index + (start * stride); } out_tensor[linear_index] = in_tensor[source_idx]; } // Roll a tensor along a dimension Tensor roll_cuda(const Tensor& self, IntList shifts, IntList dims) { if (dims.size() == 0 && shifts.size() == 1) { auto flattened = self.contiguous().view(self.numel()); return roll_cuda(flattened, shifts[0], 0).view(self.sizes()); } AT_CHECK(shifts.size() == dims.size(), "shifts and dimensions must align"); // todo: support rolling along multiple dimensions as in numpy.roll. AT_CHECK(dims.size() == 1, "only single dimension roll currently supported"); auto in_tensor = self; if(!self.is_contiguous()) { in_tensor = self.contiguous(); } auto out_tensor = at::empty_like(in_tensor); if (out_tensor.numel() == 0) { return out_tensor; } const int64_t N = in_tensor.numel(); const int64_t dim = dims[0]; const int64_t size = in_tensor.size(dim); int64_t start = (size - shifts[0]) % size; // Behavior of % is different in C++ vs Python for negative numbers. This // corrects the difference. if( start < 0 ) start = start + size; dim3 dim_block = cuda::getApplyBlock(); dim3 dim_grid; AT_CHECK(cuda::getApplyGrid(N, dim_grid, in_tensor.get_device()), "unable to get dim grid"); auto total_dims = in_tensor.dim(); AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "roll_cuda", [&] { roll_cuda_kernel<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( in_tensor.data<scalar_t>(), out_tensor.data<scalar_t>(), N, dim, shifts[0], start, size, in_tensor.stride(dim), total_dims); }); return out_tensor; } }} // namespace at::native
fcec56474746339d9690a774faa945c858ce9fd2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THH/THHAtomics.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { using namespace at::cuda::detail; namespace { template <typename scalar_t, typename accscalar_t> __device__ inline int64_t get_intervals( accscalar_t sample, int64_t index, int64_t inputSize, int64_t outputSize, int64_t poolSize) { accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) / static_cast<accscalar_t>(outputSize - 1); if (index == outputSize - 1) { return inputSize - poolSize; } else { return static_cast<int64_t>((index + sample) * alpha) - \ static_cast<int64_t>(sample * alpha); } } template <typename scalar_t> __global__ void fractional_max_pool3d_out_frame( PackedTensorAccessor64<scalar_t, 5> input, PackedTensorAccessor64<scalar_t, 5> output, PackedTensorAccessor64<int64_t, 5> indices, PackedTensorAccessor64<scalar_t, 3> samples, int64_t poolSizeT, int64_t poolSizeH, int64_t poolSizeW) { using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>; // Output (t, h, w) point that this thread is responsible for int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; int64_t plane = blockIdx.y; int64_t batch = blockIdx.z; // Each thread generates a specific output point if (ourOutputPoint < output.size(2) * output.size(3) * output.size(4)){ int64_t outputT = ourOutputPoint / (output.size(3) * output.size(4)); int64_t outputH = (ourOutputPoint / output.size(4)) % output.size(3); int64_t outputW = ourOutputPoint % output.size(4); int64_t poolT = get_intervals<scalar_t,accscalar_t>( static_cast<accscalar_t>(samples[batch][plane][0]), outputT, input.size(2), output.size(2), poolSizeT); int64_t poolH = get_intervals<scalar_t, accscalar_t>( static_cast<accscalar_t>(samples[batch][plane][1]), outputH, input.size(3), output.size(3), poolSizeH); int64_t poolW = get_intervals<scalar_t, accscalar_t>( static_cast<accscalar_t>(samples[batch][plane][2]), outputW, input.size(4), output.size(4), poolSizeW); scalar_t maxVal = at::numeric_limits<scalar_t>::lowest(); int64_t maxIndex = -1; for(int64_t t = poolT; t < poolT + poolSizeT; ++ t) { for (int64_t h = poolH; h < poolH + poolSizeH; ++h) { if(poolSizeW < 2 || poolSizeW > 7) { for (int64_t w = poolW; w < poolW + poolSizeW; ++w) { scalar_t val = input[batch][plane][t][h][w]; // for consistency with THNN, favor the first max if (val > maxVal) { maxIndex = t * input.size(3) * input.size(4) + h * input.size(4) + w; maxVal = val; } } } else { for (int64_t i = 0; i < poolSizeW; ++i) { int64_t w = i + poolW; scalar_t val = input[batch][plane][t][h][w]; // for consistency with THNN, favor the first max if (val > maxVal) { maxIndex = t * input.size(3) * input.size(4) + h * input.size(4) + w; maxVal = val; } } } } } assert(maxVal != at::numeric_limits<scalar_t>::lowest()); assert(maxIndex != -1); indices[batch][plane][outputT][outputH][outputW] = maxIndex; output[batch][plane][outputT][outputH][outputW] = maxVal; } } template <typename scalar_t> __global__ void fractional_max_pool3d_backward_out_frame( PackedTensorAccessor64<scalar_t, 5> gradInput, PackedTensorAccessor64<scalar_t, 5> gradOutput, PackedTensorAccessor64<int64_t, 5> indices) { // Output (h, w) point that this thread is responsible for int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; int64_t plane = blockIdx.y; int64_t batch = blockIdx.z; // Each thread generates a specific output point if (ourOutputPoint < gradOutput.size(2) * gradOutput.size(3) * gradOutput.size(4)) { int64_t outputW = ourOutputPoint % gradOutput.size(4); int64_t outputH = (ourOutputPoint / gradOutput.size(4)) % gradOutput.size(3); int64_t outputT = ourOutputPoint / (gradOutput.size(3) * gradOutput.size(4)); int64_t index = indices[batch][plane][outputT][outputH][outputW]; assert(index >= 0); int64_t inputW = index % gradInput.size(4); int64_t inputH = (index / gradInput.size(4)) % gradInput.size(3); int64_t inputT = index / (gradInput.size(3) * gradInput.size(4)); assert(inputT < gradInput.size(2)); gpuAtomicAdd( &gradInput[batch][plane][inputT][inputH][inputW], gradOutput[batch][plane][outputT][outputH][outputW] ); } } void fractional_max_pool3d_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const Tensor& randomSamples) { int64_t planeDim = 0; int64_t dimt = 1; int64_t dimh = 2; int64_t dimw = 3; int64_t numBatch = 1; int64_t outputT = output_size[0]; int64_t outputH = output_size[1]; int64_t outputW = output_size[2]; int64_t poolSizeT = pool_size[0]; int64_t poolSizeH = pool_size[1]; int64_t poolSizeW = pool_size[2]; int64_t ndims = input.ndimension(); TORCH_CHECK( input.numel() != 0 && (ndims == 4 || ndims == 5), "fractional_max_pool3d_out_cuda_template(): ", "non-empty 4D or 5D (batch mode) tensor expected for input, but got: ", ndims); if (ndims == 5) { numBatch = input.size(0); planeDim++; dimt++; dimh++; dimw++; } /* sizes */ int64_t numPlanes = input.size(planeDim); int64_t inputT = input.size(dimt); int64_t inputH = input.size(dimh); int64_t inputW = input.size(dimw); TORCH_CHECK( outputT + poolSizeT - 1 < inputT, "fractional_max_pool3d_out_cuda_template(): ", "pool time (", poolSizeT, ") too large relative to input time (", inputT, ")"); TORCH_CHECK( outputH + poolSizeH - 1 < inputH, "fractional_max_pool3d_out_cuda_template(): ", "pool height (", poolSizeH, ") too large relative to input height (", inputH, ")"); TORCH_CHECK( outputW + poolSizeW - 1 < inputW, "fractional_max_pool3d_out_cuda_template(): ", "pool width (", poolSizeW, ") too large relative to input width (", inputW, ")"); if (ndims == 4) { /* resize output */ output.resize_({numPlanes, outputT, outputH, outputW}); /* indices will contain the locations for each output point */ indices.resize_({numPlanes, outputT, outputH, outputW}); } else { /* resize output */ output.resize_({numBatch, numPlanes, outputT, outputH, outputW}); /* indices will contain the locations for each output point */ indices.resize_({numBatch, numPlanes, outputT, outputH, outputW}); } auto output_ = output; auto indices_ = indices; auto input_ = input; if(ndims == 4) { output_ = output_.reshape({1, numPlanes, outputT, outputH, outputW}); indices_ = indices_.reshape({1, numPlanes, outputT, outputH, outputW}); input_ = input_.reshape({1, numPlanes, inputT, inputH, inputW}); } // block is limited to 4 warps // grid handles overflow per each plane int64_t outputPlaneSize = output_.size(2) * output_.size(3) * output_.size(4); dim3 grid( (outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128) input_.size(1), input_.size(0)); dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "fractional_max_pool3d_out_frame", [&]{ hipLaunchKernelGGL(( fractional_max_pool3d_out_frame<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_.packed_accessor64<scalar_t, 5>(), output_.packed_accessor64<scalar_t, 5>(), indices_.packed_accessor64<int64_t, 5>(), randomSamples.packed_accessor64<scalar_t, 3>(), poolSizeT, poolSizeH, poolSizeW ); } ); TORCH_CHECK(hipGetLastError() == hipSuccess, "fractional_max_pool2d_out_cuda_template failed with error code ", hipGetLastError()); } void fractional_max_pool3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef pool_size /* unused */, IntArrayRef output_size, const Tensor& indices) { int64_t dimt = 1; int64_t dimh = 2; int64_t dimw = 3; int64_t outputT = output_size[0]; int64_t outputH = output_size[1]; int64_t outputW = output_size[2]; int64_t ndims = input.ndimension(); if (ndims == 5) { dimt++; dimh++; dimw++; } /* sizes */ int64_t inputT = input.size(dimt); int64_t inputH = input.size(dimh); int64_t inputW = input.size(dimw); TORCH_CHECK( outputT == gradOutput.size(dimt), "fractional_max_pool3d_backward_out_cuda_template(): ", "gradOutput time unexpected" ); TORCH_CHECK( outputH == gradOutput.size(dimh), "fractional_max_pool3d_backward_out_cuda_template(): ", "gradOutput height unexpected" ); TORCH_CHECK( outputW == gradOutput.size(dimw), "fractional_max_pool3d_backward_out_cuda_template(): ", "gradOutput width unexpected" ); /* resize */ gradInput.resize_as_(input); gradInput.zero_(); auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; auto indices_ = indices; if(ndims == 4) { gradInput_ = gradInput_.reshape({1, gradInput.size(0), inputT, inputH, inputW}); gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputT, outputH, outputW}); indices_ = indices_.reshape({1, indices.size(0), outputT, outputH, outputW}); } /* backprop */ // block is limited to 4 warps // grid handles overflow per each plane int64_t outputPlaneSize = gradOutput_.size(2) * gradOutput_.size(3) * gradOutput_.size(4); dim3 grid( (outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128) gradInput_.size(1), gradInput_.size(0)); dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); AT_DISPATCH_FLOATING_TYPES_AND_HALF( gradOutput.scalar_type(), "fractional_max_pool3d_backward_out_frame", [&] { hipLaunchKernelGGL(( fractional_max_pool3d_backward_out_frame<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_.packed_accessor64<scalar_t, 5>(), gradOutput_.packed_accessor64<scalar_t, 5>(), indices_.packed_accessor64<int64_t, 5>() ); } ); TORCH_CHECK(hipGetLastError() == hipSuccess, "fractional_max_pool2d_out_cuda_template failed with error code ", hipGetLastError()); } }// namespace std::tuple<Tensor&, Tensor&> fractional_max_pool3d_out_cuda( at::Tensor& output, at::Tensor& indices, const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& randomSamples) { fractional_max_pool3d_out_cuda_template( output, indices, input, pool_size, output_size, randomSamples ); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> fractional_max_pool3d_cuda( const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& randomSamples) { Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); fractional_max_pool3d_out_cuda_template( output, indices, input, pool_size, output_size, randomSamples ); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& fractional_max_pool3d_backward_out_cuda( at::Tensor& gradInput, const at::Tensor& gradOutput_, const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& indices) { fractional_max_pool3d_backward_out_cuda_template( gradInput, gradOutput_, input, pool_size, output_size, indices ); return gradInput; } Tensor fractional_max_pool3d_backward_cuda( const at::Tensor& gradOutput, const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& indices) { Tensor gradInput = at::empty({0}, input.options()); fractional_max_pool3d_backward_out_cuda_template( gradInput, gradOutput, input, pool_size, output_size, indices ); return gradInput; } }// native }// at
fcec56474746339d9690a774faa945c858ce9fd2.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THC/THCAtomics.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { using namespace at::cuda::detail; namespace { template <typename scalar_t, typename accscalar_t> __device__ inline int64_t get_intervals( accscalar_t sample, int64_t index, int64_t inputSize, int64_t outputSize, int64_t poolSize) { accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) / static_cast<accscalar_t>(outputSize - 1); if (index == outputSize - 1) { return inputSize - poolSize; } else { return static_cast<int64_t>((index + sample) * alpha) - \ static_cast<int64_t>(sample * alpha); } } template <typename scalar_t> __global__ void fractional_max_pool3d_out_frame( PackedTensorAccessor64<scalar_t, 5> input, PackedTensorAccessor64<scalar_t, 5> output, PackedTensorAccessor64<int64_t, 5> indices, PackedTensorAccessor64<scalar_t, 3> samples, int64_t poolSizeT, int64_t poolSizeH, int64_t poolSizeW) { using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>; // Output (t, h, w) point that this thread is responsible for int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; int64_t plane = blockIdx.y; int64_t batch = blockIdx.z; // Each thread generates a specific output point if (ourOutputPoint < output.size(2) * output.size(3) * output.size(4)){ int64_t outputT = ourOutputPoint / (output.size(3) * output.size(4)); int64_t outputH = (ourOutputPoint / output.size(4)) % output.size(3); int64_t outputW = ourOutputPoint % output.size(4); int64_t poolT = get_intervals<scalar_t,accscalar_t>( static_cast<accscalar_t>(samples[batch][plane][0]), outputT, input.size(2), output.size(2), poolSizeT); int64_t poolH = get_intervals<scalar_t, accscalar_t>( static_cast<accscalar_t>(samples[batch][plane][1]), outputH, input.size(3), output.size(3), poolSizeH); int64_t poolW = get_intervals<scalar_t, accscalar_t>( static_cast<accscalar_t>(samples[batch][plane][2]), outputW, input.size(4), output.size(4), poolSizeW); scalar_t maxVal = at::numeric_limits<scalar_t>::lowest(); int64_t maxIndex = -1; for(int64_t t = poolT; t < poolT + poolSizeT; ++ t) { for (int64_t h = poolH; h < poolH + poolSizeH; ++h) { if(poolSizeW < 2 || poolSizeW > 7) { for (int64_t w = poolW; w < poolW + poolSizeW; ++w) { scalar_t val = input[batch][plane][t][h][w]; // for consistency with THNN, favor the first max if (val > maxVal) { maxIndex = t * input.size(3) * input.size(4) + h * input.size(4) + w; maxVal = val; } } } else { for (int64_t i = 0; i < poolSizeW; ++i) { int64_t w = i + poolW; scalar_t val = input[batch][plane][t][h][w]; // for consistency with THNN, favor the first max if (val > maxVal) { maxIndex = t * input.size(3) * input.size(4) + h * input.size(4) + w; maxVal = val; } } } } } assert(maxVal != at::numeric_limits<scalar_t>::lowest()); assert(maxIndex != -1); indices[batch][plane][outputT][outputH][outputW] = maxIndex; output[batch][plane][outputT][outputH][outputW] = maxVal; } } template <typename scalar_t> __global__ void fractional_max_pool3d_backward_out_frame( PackedTensorAccessor64<scalar_t, 5> gradInput, PackedTensorAccessor64<scalar_t, 5> gradOutput, PackedTensorAccessor64<int64_t, 5> indices) { // Output (h, w) point that this thread is responsible for int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; int64_t plane = blockIdx.y; int64_t batch = blockIdx.z; // Each thread generates a specific output point if (ourOutputPoint < gradOutput.size(2) * gradOutput.size(3) * gradOutput.size(4)) { int64_t outputW = ourOutputPoint % gradOutput.size(4); int64_t outputH = (ourOutputPoint / gradOutput.size(4)) % gradOutput.size(3); int64_t outputT = ourOutputPoint / (gradOutput.size(3) * gradOutput.size(4)); int64_t index = indices[batch][plane][outputT][outputH][outputW]; assert(index >= 0); int64_t inputW = index % gradInput.size(4); int64_t inputH = (index / gradInput.size(4)) % gradInput.size(3); int64_t inputT = index / (gradInput.size(3) * gradInput.size(4)); assert(inputT < gradInput.size(2)); gpuAtomicAdd( &gradInput[batch][plane][inputT][inputH][inputW], gradOutput[batch][plane][outputT][outputH][outputW] ); } } void fractional_max_pool3d_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const Tensor& randomSamples) { int64_t planeDim = 0; int64_t dimt = 1; int64_t dimh = 2; int64_t dimw = 3; int64_t numBatch = 1; int64_t outputT = output_size[0]; int64_t outputH = output_size[1]; int64_t outputW = output_size[2]; int64_t poolSizeT = pool_size[0]; int64_t poolSizeH = pool_size[1]; int64_t poolSizeW = pool_size[2]; int64_t ndims = input.ndimension(); TORCH_CHECK( input.numel() != 0 && (ndims == 4 || ndims == 5), "fractional_max_pool3d_out_cuda_template(): ", "non-empty 4D or 5D (batch mode) tensor expected for input, but got: ", ndims); if (ndims == 5) { numBatch = input.size(0); planeDim++; dimt++; dimh++; dimw++; } /* sizes */ int64_t numPlanes = input.size(planeDim); int64_t inputT = input.size(dimt); int64_t inputH = input.size(dimh); int64_t inputW = input.size(dimw); TORCH_CHECK( outputT + poolSizeT - 1 < inputT, "fractional_max_pool3d_out_cuda_template(): ", "pool time (", poolSizeT, ") too large relative to input time (", inputT, ")"); TORCH_CHECK( outputH + poolSizeH - 1 < inputH, "fractional_max_pool3d_out_cuda_template(): ", "pool height (", poolSizeH, ") too large relative to input height (", inputH, ")"); TORCH_CHECK( outputW + poolSizeW - 1 < inputW, "fractional_max_pool3d_out_cuda_template(): ", "pool width (", poolSizeW, ") too large relative to input width (", inputW, ")"); if (ndims == 4) { /* resize output */ output.resize_({numPlanes, outputT, outputH, outputW}); /* indices will contain the locations for each output point */ indices.resize_({numPlanes, outputT, outputH, outputW}); } else { /* resize output */ output.resize_({numBatch, numPlanes, outputT, outputH, outputW}); /* indices will contain the locations for each output point */ indices.resize_({numBatch, numPlanes, outputT, outputH, outputW}); } auto output_ = output; auto indices_ = indices; auto input_ = input; if(ndims == 4) { output_ = output_.reshape({1, numPlanes, outputT, outputH, outputW}); indices_ = indices_.reshape({1, numPlanes, outputT, outputH, outputW}); input_ = input_.reshape({1, numPlanes, inputT, inputH, inputW}); } // block is limited to 4 warps // grid handles overflow per each plane int64_t outputPlaneSize = output_.size(2) * output_.size(3) * output_.size(4); dim3 grid( (outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128) input_.size(1), input_.size(0)); dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "fractional_max_pool3d_out_frame", [&]{ fractional_max_pool3d_out_frame<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( input_.packed_accessor64<scalar_t, 5>(), output_.packed_accessor64<scalar_t, 5>(), indices_.packed_accessor64<int64_t, 5>(), randomSamples.packed_accessor64<scalar_t, 3>(), poolSizeT, poolSizeH, poolSizeW ); } ); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "fractional_max_pool2d_out_cuda_template failed with error code ", cudaGetLastError()); } void fractional_max_pool3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef pool_size /* unused */, IntArrayRef output_size, const Tensor& indices) { int64_t dimt = 1; int64_t dimh = 2; int64_t dimw = 3; int64_t outputT = output_size[0]; int64_t outputH = output_size[1]; int64_t outputW = output_size[2]; int64_t ndims = input.ndimension(); if (ndims == 5) { dimt++; dimh++; dimw++; } /* sizes */ int64_t inputT = input.size(dimt); int64_t inputH = input.size(dimh); int64_t inputW = input.size(dimw); TORCH_CHECK( outputT == gradOutput.size(dimt), "fractional_max_pool3d_backward_out_cuda_template(): ", "gradOutput time unexpected" ); TORCH_CHECK( outputH == gradOutput.size(dimh), "fractional_max_pool3d_backward_out_cuda_template(): ", "gradOutput height unexpected" ); TORCH_CHECK( outputW == gradOutput.size(dimw), "fractional_max_pool3d_backward_out_cuda_template(): ", "gradOutput width unexpected" ); /* resize */ gradInput.resize_as_(input); gradInput.zero_(); auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; auto indices_ = indices; if(ndims == 4) { gradInput_ = gradInput_.reshape({1, gradInput.size(0), inputT, inputH, inputW}); gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputT, outputH, outputW}); indices_ = indices_.reshape({1, indices.size(0), outputT, outputH, outputW}); } /* backprop */ // block is limited to 4 warps // grid handles overflow per each plane int64_t outputPlaneSize = gradOutput_.size(2) * gradOutput_.size(3) * gradOutput_.size(4); dim3 grid( (outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128) gradInput_.size(1), gradInput_.size(0)); dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); AT_DISPATCH_FLOATING_TYPES_AND_HALF( gradOutput.scalar_type(), "fractional_max_pool3d_backward_out_frame", [&] { fractional_max_pool3d_backward_out_frame<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( gradInput_.packed_accessor64<scalar_t, 5>(), gradOutput_.packed_accessor64<scalar_t, 5>(), indices_.packed_accessor64<int64_t, 5>() ); } ); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "fractional_max_pool2d_out_cuda_template failed with error code ", cudaGetLastError()); } }// namespace std::tuple<Tensor&, Tensor&> fractional_max_pool3d_out_cuda( at::Tensor& output, at::Tensor& indices, const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& randomSamples) { fractional_max_pool3d_out_cuda_template( output, indices, input, pool_size, output_size, randomSamples ); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> fractional_max_pool3d_cuda( const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& randomSamples) { Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); fractional_max_pool3d_out_cuda_template( output, indices, input, pool_size, output_size, randomSamples ); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& fractional_max_pool3d_backward_out_cuda( at::Tensor& gradInput, const at::Tensor& gradOutput_, const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& indices) { fractional_max_pool3d_backward_out_cuda_template( gradInput, gradOutput_, input, pool_size, output_size, indices ); return gradInput; } Tensor fractional_max_pool3d_backward_cuda( const at::Tensor& gradOutput, const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& indices) { Tensor gradInput = at::empty({0}, input.options()); fractional_max_pool3d_backward_out_cuda_template( gradInput, gradOutput, input, pool_size, output_size, indices ); return gradInput; } }// native }// at
7e36591f11d134097c5833cab738f540e80ec451.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void add(int *a, int *b, int *c) { c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; } #define N 16 void check(hipError_t err) { if (err != hipSuccess) printf("The error is %s.\n", hipGetErrorString(err)); } void print_array(int *arr, int len) { for (int row = 0; row < 16; row++) { for (int i = 0; i < len; i++) printf("%d ", arr[i]); printf("\n"); } } void random_ints(int *a, int n) { int i; for (i = 0; i < n; i++) a[i] = (int)(rand() / (RAND_MAX / 1.5)); } int main(void) { int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; // device copies of a, b, c int size = N * sizeof(int); // Allocate space for device copies of a, b, c check((hipError_t)hipMalloc((void **)&d_a, size)); check((hipError_t)hipMalloc((void **)&d_b, size)); check((hipError_t)hipMalloc((void **)&d_c, size)); // Allocate space for host copies of a, b, c and setup input values a = (int*)malloc(size); random_ints(a, N); b = (int*)malloc(size); random_ints(b, N); c = (int*)malloc(size); // Copy inputs to device check((hipError_t)hipMemcpy(d_a, a, size, hipMemcpyHostToDevice)); check((hipError_t)hipMemcpy(d_b, b, size, hipMemcpyHostToDevice)); // Launch all() kernel on GPU hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, d_a, d_b, d_c); // Copy result back to host check((hipError_t)hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost)); // Print results printf("Array a:\n"); print_array(a, N); printf("Array b:\n"); print_array(b, N); printf("Sum of a and b:\n"); print_array(c, N); // Cleanup free(a); free(b); free(c); hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
7e36591f11d134097c5833cab738f540e80ec451.cu
#include <stdio.h> __global__ void add(int *a, int *b, int *c) { c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; } #define N 16 void check(cudaError_t err) { if (err != cudaSuccess) printf("The error is %s.\n", cudaGetErrorString(err)); } void print_array(int *arr, int len) { for (int row = 0; row < 16; row++) { for (int i = 0; i < len; i++) printf("%d ", arr[i]); printf("\n"); } } void random_ints(int *a, int n) { int i; for (i = 0; i < n; i++) a[i] = (int)(rand() / (RAND_MAX / 1.5)); } int main(void) { int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; // device copies of a, b, c int size = N * sizeof(int); // Allocate space for device copies of a, b, c check((cudaError_t)cudaMalloc((void **)&d_a, size)); check((cudaError_t)cudaMalloc((void **)&d_b, size)); check((cudaError_t)cudaMalloc((void **)&d_c, size)); // Allocate space for host copies of a, b, c and setup input values a = (int*)malloc(size); random_ints(a, N); b = (int*)malloc(size); random_ints(b, N); c = (int*)malloc(size); // Copy inputs to device check((cudaError_t)cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice)); check((cudaError_t)cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice)); // Launch all() kernel on GPU add<<<N,1>>>(d_a, d_b, d_c); // Copy result back to host check((cudaError_t)cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost)); // Print results printf("Array a:\n"); print_array(a, N); printf("Array b:\n"); print_array(b, N); printf("Sum of a and b:\n"); print_array(c, N); // Cleanup free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }