text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
#include <thrust/device_vector.h> #include <thrust/tuple.h> #include <thrust/execution_policy.h> #include <thrust/copy.h> #include <thrust/sort.h> #include <thrust/binary_search.h> #include <thrust/unique.h> #include <thrust/sequence.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include "thrust_rmm_allocator.h" #include <iostream> #include <vector> #include <tuple> #include <iterator> #include <cassert> #include <type_traits> #include <gdf/gdf.h> #include <gdf/utils.h> #include <gdf/errorutils.h> #include "sqls_rtti_comp.hpp" extern gdf_error gdf_filter(size_t nrows, gdf_column* cols, size_t ncols, void** d_cols,//device-side data slicing of gdf_column array (host) int* d_types, //device-side dtype slicing of gdf_column array (host) void** d_vals, size_t* d_indx, size_t* new_sz); // Vector set to use rmmAlloc and rmmFree. template <typename T> using Vector = thrust::device_vector<T, rmm_allocator<T>>; ///using IndexT = int;//okay... using IndexT = size_t; template<typename T, typename Allocator, template<typename, typename> class Vector> __host__ __device__ void print_v(const Vector<T, Allocator>& v, std::ostream& os) { thrust::copy(v.begin(), v.end(), std::ostream_iterator<T>(os,",")); os<<"\n"; } template<typename T, typename Allocator, template<typename, typename> class Vector> __host__ __device__ void print_v(const Vector<T, Allocator>& v, typename Vector<T, Allocator>::const_iterator pos, std::ostream& os) { thrust::copy(v.begin(), pos, std::ostream_iterator<T>(os,","));//okay os<<"\n"; } template<typename T, typename Allocator, template<typename, typename> class Vector> __host__ __device__ void print_v(const Vector<T, Allocator>& v, size_t n, std::ostream& os) { thrust::copy_n(v.begin(), n, std::ostream_iterator<T>(os,","));//okay os<<"\n"; } void f_test_multi_filter(void) { std::vector<int> vc1{1,1,1,1,1,1}; std::vector<int> vi1{1,3,3,5,5,5}; std::vector<double> vd1{12., 13., 13., 17., 17., 17}; Vector<int> dc1 = vc1; Vector<int> di1 = vi1; Vector<double> dd1 = vd1; size_t nrows = dc1.size(); assert( nrows == di1.size() ); assert( nrows == dd1.size() ); ///thrust::tuple<int*, int*, double*> tptrs{dc1.data().get(), di1.data().get(), dd1.data().get()}; int i = 1; int j = 3; double d = 13.; vc1.resize(1); vi1.resize(1); vd1.resize(1); vc1[0] = i; vi1[0] = j; vd1[0] = d; Vector<int> d_ci = vc1; Vector<int> d_cj = vi1; Vector<double> d_cd = vd1; ///thrust::tuple<int, int, double> tvals{i,j,d}; size_t new_sz = 0; Vector<size_t> d_indices(nrows, 0); // thrust::tuple<int*, int*, int*, int*, double*, double*> // tpairs{dc1.data().get(), d_ci.data().get(), // di1.data().get(), d_cj.data().get(), // dd1.data().get(), d_cd.data().get()}; ///new_sz = multi_col_filter(nrows, tpairs, d_indices.data().get());//ok size_t ncols = 3; Vector<void*> d_cols(ncols, nullptr); Vector<int> d_types(ncols, 0); Vector<size_t> d_indx(nrows, 0); std::vector<gdf_column> v_gdf_cols(ncols); v_gdf_cols[0].data = static_cast<void*>(dc1.data().get()); v_gdf_cols[0].size = sizeof(int); v_gdf_cols[0].dtype = GDF_INT32; v_gdf_cols[1].data = static_cast<void*>(di1.data().get()); v_gdf_cols[1].size = sizeof(int); v_gdf_cols[1].dtype = GDF_INT32; v_gdf_cols[2].data = static_cast<void*>(dd1.data().get()); v_gdf_cols[2].size = sizeof(double); v_gdf_cols[2].dtype = GDF_FLOAT64; std::vector<void*> v_vals{static_cast<void*>(d_ci.data().get()), static_cast<void*>(d_cj.data().get()), static_cast<void*>(d_cd.data().get())}; Vector<void*> d_vals = v_vals; gdf_column* h_columns = &v_gdf_cols[0]; void** d_col_data = d_cols.data().get(); int* d_col_types = d_types.data().get(); size_t* ptr_d_indx = d_indices.data().get(); void** ptr_d_vals = d_vals.data().get(); gdf_filter(nrows, h_columns, ncols, d_col_data, d_col_types, ptr_d_vals, ptr_d_indx, &new_sz); bool res = (new_sz > 0); if( res ) { std::cout<<"filtered size: "<<new_sz<<"; filtered indices:\n"; print_v(d_indices, d_indices.begin()+new_sz, std::cout); } else std::cout << "table unfiltered.\n"; } extern gdf_error gdf_order_by(size_t nrows, //in: # rows gdf_column* cols, //in: host-side array of gdf_columns size_t ncols, //in: # cols void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host) int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host) size_t* d_indx); //out: device-side array of re-rdered row indices extern gdf_error gdf_filter(size_t nrows, //in: # rows gdf_column* cols, //in: host-side array of gdf_columns size_t ncols, //in: # cols void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host) int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host) void** d_vals, //in: device-side array of values to filter against (type-erased) size_t* d_indx, //out: device-side array of row indices that remain after filtering size_t* new_sz); //out: host-side # rows that remain after filtering void test_gb_sum_api_2(const std::vector<int>& vc1, const std::vector<int>& vi1, const std::vector<double>& vd1) { Vector<int> dc1 = vc1; Vector<int> di1 = vi1; Vector<double> dd1 = vd1; size_t sz = dc1.size(); assert( sz == di1.size() ); assert( sz == dd1.size() ); Vector<IndexT> d_indx(sz, 0); Vector<IndexT> d_keys(sz, 0); Vector<IndexT> d_vals(sz, 0); size_t ncols = 3; size_t& nrows = sz; Vector<void*> d_cols(ncols, nullptr); Vector<int> d_types(ncols, 0); std::vector<gdf_column> v_gdf_cols(ncols); v_gdf_cols[0].data = static_cast<void*>(dc1.data().get()); v_gdf_cols[0].size = nrows; v_gdf_cols[0].dtype = GDF_INT32; v_gdf_cols[1].data = static_cast<void*>(di1.data().get()); v_gdf_cols[1].size = nrows; v_gdf_cols[1].dtype = GDF_INT32; v_gdf_cols[2].data = static_cast<void*>(dd1.data().get()); v_gdf_cols[2].size = nrows; v_gdf_cols[2].dtype = GDF_FLOAT64; gdf_column c_agg; gdf_column c_vout; Vector<double> d_outd(sz, 0); c_agg.dtype = GDF_FLOAT64; c_agg.data = dd1.data().get(); c_agg.size = nrows; c_vout.dtype = GDF_FLOAT64; c_vout.data = d_outd.data().get(); c_vout.size = nrows; size_t n_group = 0; int flag_sorted = 0; std::cout<<"aggregate = sum on column:\n"; print_v(dd1, std::cout); //input //{ gdf_context ctxt{0, GDF_SORT, 0}; std::vector<gdf_column*> v_pcols(ncols); for(int i = 0; i < ncols; ++i) { v_pcols[i] = &v_gdf_cols[i]; } gdf_column** cols = &v_pcols[0];//pointer semantic (2); //} //output: //{ Vector<int32_t> d_vc_out(nrows); Vector<int32_t> d_vi_out(nrows); Vector<double> d_vd_out(nrows); std::vector<gdf_column> v_gdf_cols_out(ncols); v_gdf_cols_out[0].data = d_vc_out.data().get(); v_gdf_cols_out[0].dtype = GDF_INT32; v_gdf_cols_out[0].size = nrows; v_gdf_cols_out[1].data = d_vi_out.data().get(); v_gdf_cols_out[1].dtype = GDF_INT32; v_gdf_cols_out[1].size = nrows; v_gdf_cols_out[2].data = d_vd_out.data().get(); v_gdf_cols_out[2].dtype = GDF_FLOAT64; v_gdf_cols_out[2].size = nrows; std::vector<gdf_column*> h_cols_out(ncols); for(int i=0; i<ncols; ++i) h_cols_out[i] = &v_gdf_cols_out[i];// gdf_column** cols_out = &h_cols_out[0];//pointer semantics (2) d_keys.assign(nrows, 0); gdf_column c_indx; c_indx.data = d_keys.data().get(); c_indx.size = nrows; c_indx.dtype = GDF_INT32; //} gdf_group_by_sum((int)ncols, // # columns cols, //input cols &c_agg, //column to aggregate on &c_indx, //if not null return indices of re-ordered rows cols_out, //if not null return the grouped-by columns &c_vout, //aggregation result &ctxt); //struct with additional info; n_group = c_vout.size; std::cout<<"selected rows:\n"; print_v(d_vc_out, n_group, std::cout); print_v(d_vi_out, n_group, std::cout); print_v(d_vd_out, n_group, std::cout); size_t n_keys = n_group; size_t n_vals = n_group; std::cout<<"multi-col generic group-by (sum):\n"; std::cout<<"indices of grouped-by, new keys end position: "<<n_keys<<";\n"; std::cout<<"indices of grouped-by, new vals end position: "<<n_vals<<";\n"; std::cout<<"grouped-by keys:\n"; print_v(d_keys, n_keys, std::cout); std::cout<<"grouped-by vals:\n"; print_v(d_outd, n_vals, std::cout); } void test_gb_count_api_2(const std::vector<int>& vc1, const std::vector<int>& vi1, const std::vector<double>& vd1) { Vector<int> dc1 = vc1; Vector<int> di1 = vi1; Vector<double> dd1 = vd1; size_t sz = dc1.size(); assert( sz == di1.size() ); assert( sz == dd1.size() ); Vector<IndexT> d_indx(sz, 0); Vector<IndexT> d_keys(sz, 0); Vector<IndexT> d_vals(sz, 0); size_t ncols = 3; size_t& nrows = sz; Vector<void*> d_cols(ncols, nullptr); Vector<int> d_types(ncols, 0); std::vector<gdf_column> v_gdf_cols(ncols); v_gdf_cols[0].data = static_cast<void*>(dc1.data().get()); v_gdf_cols[0].size = nrows; v_gdf_cols[0].dtype = GDF_INT32; v_gdf_cols[1].data = static_cast<void*>(di1.data().get()); v_gdf_cols[1].size = nrows; v_gdf_cols[1].dtype = GDF_INT32; v_gdf_cols[2].data = static_cast<void*>(dd1.data().get()); v_gdf_cols[2].size = nrows; v_gdf_cols[2].dtype = GDF_FLOAT64; gdf_column c_agg; gdf_column c_vout; Vector<double> d_outd(sz, 0); c_agg.dtype = GDF_FLOAT64; c_agg.data = dd1.data().get(); c_agg.size = nrows; c_vout.dtype = GDF_INT32; c_vout.data = d_vals.data().get(); c_vout.size = nrows; size_t n_group = 0; int flag_sorted = 0; std::cout<<"aggregate = count on column:\n"; print_v(dd1, std::cout); //input //{ gdf_context ctxt{0, GDF_SORT, 0}; std::vector<gdf_column*> v_pcols(ncols); for(int i = 0; i < ncols; ++i) { v_pcols[i] = &v_gdf_cols[i]; } gdf_column** cols = &v_pcols[0];//pointer semantic (2); //} //output: //{ Vector<int32_t> d_vc_out(nrows); Vector<int32_t> d_vi_out(nrows); Vector<double> d_vd_out(nrows); std::vector<gdf_column> v_gdf_cols_out(ncols); v_gdf_cols_out[0].data = d_vc_out.data().get(); v_gdf_cols_out[0].dtype = GDF_INT32; v_gdf_cols_out[0].size = nrows; v_gdf_cols_out[1].data = d_vi_out.data().get(); v_gdf_cols_out[1].dtype = GDF_INT32; v_gdf_cols_out[1].size = nrows; v_gdf_cols_out[2].data = d_vd_out.data().get(); v_gdf_cols_out[2].dtype = GDF_FLOAT64; v_gdf_cols_out[2].size = nrows; std::vector<gdf_column*> h_cols_out(ncols); for(int i=0; i<ncols; ++i) h_cols_out[i] = &v_gdf_cols_out[i];// gdf_column** cols_out = &h_cols_out[0];//pointer semantics (2) d_keys.assign(nrows, 0); gdf_column c_indx; c_indx.data = d_keys.data().get(); c_indx.size = nrows; c_indx.dtype = GDF_INT32; //} gdf_group_by_count((int)ncols, // # columns cols, //input cols &c_agg, //column to aggregate on &c_indx, //if not null return indices of re-ordered rows cols_out, //if not null return the grouped-by columns &c_vout, //aggregation result &ctxt); //struct with additional info; n_group = c_vout.size; std::cout<<"selected rows:\n"; print_v(d_vc_out, n_group, std::cout); print_v(d_vi_out, n_group, std::cout); print_v(d_vd_out, n_group, std::cout); size_t n_keys = n_group; size_t n_vals = n_group; std::cout<<"multi-col count group-by:\n"; std::cout<<"indices of grouped-by, new keys end position: "<<n_keys<<";\n"; std::cout<<"indices of grouped-by, new vals end position: "<<n_vals<<";\n"; std::cout<<"grouped-by keys:\n"; print_v(d_keys, n_keys, std::cout); std::cout<<"grouped-by vals:\n"; print_v(d_vals, n_vals, std::cout); } void test_gb_min_api_2(const std::vector<int>& vc1, const std::vector<int>& vi1, const std::vector<double>& vd1) { Vector<int> dc1 = vc1; Vector<int> di1 = vi1; Vector<double> dd1 = vd1; size_t sz = dc1.size(); assert( sz == di1.size() ); assert( sz == dd1.size() ); Vector<IndexT> d_indx(sz, 0); Vector<IndexT> d_keys(sz, 0); Vector<IndexT> d_vals(sz, 0); size_t ncols = 3; size_t& nrows = sz; Vector<void*> d_cols(ncols, nullptr); Vector<int> d_types(ncols, 0); std::vector<gdf_column> v_gdf_cols(ncols); v_gdf_cols[0].data = static_cast<void*>(dc1.data().get()); v_gdf_cols[0].size = nrows; v_gdf_cols[0].dtype = GDF_INT32; v_gdf_cols[1].data = static_cast<void*>(di1.data().get()); v_gdf_cols[1].size = nrows; v_gdf_cols[1].dtype = GDF_INT32; v_gdf_cols[2].data = static_cast<void*>(dd1.data().get()); v_gdf_cols[2].size = nrows; v_gdf_cols[2].dtype = GDF_FLOAT64; gdf_column c_agg; gdf_column c_vout; Vector<double> d_outd(sz, 0); c_agg.dtype = GDF_FLOAT64; ///c_agg.data = dd1.data().get(); c_agg.size = nrows; c_vout.dtype = GDF_FLOAT64; c_vout.data = d_outd.data().get(); c_vout.size = nrows; size_t n_group = 0; int flag_sorted = 0; std::vector<double> v_col{2., 4., 5., 7., 11., 3.}; Vector<double> d_col = v_col; std::cout<<"aggregate = min on column:\n"; print_v(d_col, std::cout); c_agg.dtype = GDF_FLOAT64; c_agg.data = d_col.data().get(); //input //{ gdf_context ctxt{0, GDF_SORT, 0}; std::vector<gdf_column*> v_pcols(ncols); for(int i = 0; i < ncols; ++i) { v_pcols[i] = &v_gdf_cols[i]; } gdf_column** cols = &v_pcols[0];//pointer semantic (2); //} //output: //{ Vector<int32_t> d_vc_out(nrows); Vector<int32_t> d_vi_out(nrows); Vector<double> d_vd_out(nrows); std::vector<gdf_column> v_gdf_cols_out(ncols); v_gdf_cols_out[0].data = d_vc_out.data().get(); v_gdf_cols_out[0].dtype = GDF_INT32; v_gdf_cols_out[0].size = nrows; v_gdf_cols_out[1].data = d_vi_out.data().get(); v_gdf_cols_out[1].dtype = GDF_INT32; v_gdf_cols_out[1].size = nrows; v_gdf_cols_out[2].data = d_vd_out.data().get(); v_gdf_cols_out[2].dtype = GDF_FLOAT64; v_gdf_cols_out[2].size = nrows; std::vector<gdf_column*> h_cols_out(ncols); for(int i=0; i<ncols; ++i) h_cols_out[i] = &v_gdf_cols_out[i];// gdf_column** cols_out = &h_cols_out[0];//pointer semantics (2) d_keys.assign(nrows, 0); gdf_column c_indx; c_indx.data = d_keys.data().get(); c_indx.size = nrows; c_indx.dtype = GDF_INT32; //} gdf_group_by_min((int)ncols, // # columns cols, //input cols &c_agg, //column to aggregate on &c_indx, //if not null return indices of re-ordered rows cols_out, //if not null return the grouped-by columns &c_vout, //aggregation result &ctxt); //struct with additional info; n_group = c_vout.size; std::cout<<"selected rows:\n"; print_v(d_vc_out, n_group, std::cout); print_v(d_vi_out, n_group, std::cout); print_v(d_vd_out, n_group, std::cout); size_t n_keys = n_group; size_t n_vals = n_group; std::cout<<"multi-col generic group-by (min):\n"; std::cout<<"indices of grouped-by, new keys end position: "<<n_keys<<";\n"; std::cout<<"indices of grouped-by, new vals end position: "<<n_vals<<";\n"; std::cout<<"grouped-by keys:\n"; print_v(d_keys, n_keys, std::cout); std::cout<<"grouped-by vals:\n"; print_v(d_outd, n_vals, std::cout); } void test_gb_max_api_2(const std::vector<int>& vc1, const std::vector<int>& vi1, const std::vector<double>& vd1) { Vector<int> dc1 = vc1; Vector<int> di1 = vi1; Vector<double> dd1 = vd1; size_t sz = dc1.size(); assert( sz == di1.size() ); assert( sz == dd1.size() ); Vector<IndexT> d_indx(sz, 0); Vector<IndexT> d_keys(sz, 0); Vector<IndexT> d_vals(sz, 0); size_t ncols = 3; size_t& nrows = sz; Vector<void*> d_cols(ncols, nullptr); Vector<int> d_types(ncols, 0); std::vector<gdf_column> v_gdf_cols(ncols); v_gdf_cols[0].data = static_cast<void*>(dc1.data().get()); v_gdf_cols[0].size = nrows; v_gdf_cols[0].dtype = GDF_INT32; v_gdf_cols[1].data = static_cast<void*>(di1.data().get()); v_gdf_cols[1].size = nrows; v_gdf_cols[1].dtype = GDF_INT32; v_gdf_cols[2].data = static_cast<void*>(dd1.data().get()); v_gdf_cols[2].size = nrows; v_gdf_cols[2].dtype = GDF_FLOAT64; gdf_column c_agg; gdf_column c_vout; Vector<double> d_outd(sz, 0); c_agg.dtype = GDF_FLOAT64; ///c_agg.data = dd1.data().get(); c_agg.size = nrows; c_vout.dtype = GDF_FLOAT64; c_vout.data = d_outd.data().get(); c_vout.size = nrows; size_t n_group = 0; int flag_sorted = 0; std::vector<double> v_col{2., 4., 5., 7., 11., 3.}; Vector<double> d_col = v_col; std::cout<<"aggregate = max on column:\n"; print_v(d_col, std::cout); c_agg.dtype = GDF_FLOAT64; c_agg.data = d_col.data().get(); //input //{ gdf_context ctxt{0, GDF_SORT, 0}; std::vector<gdf_column*> v_pcols(ncols); for(int i = 0; i < ncols; ++i) { v_pcols[i] = &v_gdf_cols[i]; } gdf_column** cols = &v_pcols[0];//pointer semantic (2); //} //output: //{ Vector<int32_t> d_vc_out(nrows); Vector<int32_t> d_vi_out(nrows); Vector<double> d_vd_out(nrows); std::vector<gdf_column> v_gdf_cols_out(ncols); v_gdf_cols_out[0].data = d_vc_out.data().get(); v_gdf_cols_out[0].dtype = GDF_INT32; v_gdf_cols_out[0].size = nrows; v_gdf_cols_out[1].data = d_vi_out.data().get(); v_gdf_cols_out[1].dtype = GDF_INT32; v_gdf_cols_out[1].size = nrows; v_gdf_cols_out[2].data = d_vd_out.data().get(); v_gdf_cols_out[2].dtype = GDF_FLOAT64; v_gdf_cols_out[2].size = nrows; std::vector<gdf_column*> h_cols_out(ncols); for(int i=0; i<ncols; ++i) h_cols_out[i] = &v_gdf_cols_out[i];// gdf_column** cols_out = &h_cols_out[0];//pointer semantics (2) d_keys.assign(nrows, 0); gdf_column c_indx; c_indx.data = d_keys.data().get(); c_indx.size = nrows; c_indx.dtype = GDF_INT32; //} gdf_group_by_max((int)ncols, // # columns cols, //input cols &c_agg, //column to aggregate on &c_indx, //if not null return indices of re-ordered rows cols_out, //if not null return the grouped-by columns &c_vout, //aggregation result &ctxt); //struct with additional info; n_group = c_vout.size; std::cout<<"selected rows:\n"; print_v(d_vc_out, n_group, std::cout); print_v(d_vi_out, n_group, std::cout); print_v(d_vd_out, n_group, std::cout); size_t n_keys = n_group; size_t n_vals = n_group; std::cout<<"multi-col generic group-by (max):\n"; std::cout<<"indices of grouped-by, new keys end position: "<<n_keys<<";\n"; std::cout<<"indices of grouped-by, new vals end position: "<<n_vals<<";\n"; std::cout<<"grouped-by keys:\n"; print_v(d_keys, n_keys, std::cout); std::cout<<"grouped-by vals:\n"; print_v(d_outd, n_vals, std::cout); } void test_gb_avg_api_2(const std::vector<int>& vc1, const std::vector<int>& vi1, const std::vector<double>& vd1) { Vector<int> dc1 = vc1; Vector<int> di1 = vi1; Vector<double> dd1 = vd1; size_t sz = dc1.size(); assert( sz == di1.size() ); assert( sz == dd1.size() ); Vector<IndexT> d_indx(sz, 0); Vector<IndexT> d_keys(sz, 0); Vector<IndexT> d_vals(sz, 0); size_t ncols = 3; size_t& nrows = sz; Vector<void*> d_cols(ncols, nullptr); Vector<int> d_types(ncols, 0); std::vector<gdf_column> v_gdf_cols(ncols); v_gdf_cols[0].data = static_cast<void*>(dc1.data().get()); v_gdf_cols[0].size = nrows; v_gdf_cols[0].dtype = GDF_INT32; v_gdf_cols[1].data = static_cast<void*>(di1.data().get()); v_gdf_cols[1].size = nrows; v_gdf_cols[1].dtype = GDF_INT32; v_gdf_cols[2].data = static_cast<void*>(dd1.data().get()); v_gdf_cols[2].size = nrows; v_gdf_cols[2].dtype = GDF_FLOAT64; gdf_column c_agg; gdf_column c_vout; Vector<double> d_outd(sz, 0); c_agg.dtype = GDF_FLOAT64; c_agg.data = dd1.data().get(); c_agg.size = nrows; c_vout.dtype = GDF_FLOAT64; c_vout.data = d_outd.data().get(); c_vout.size = nrows; size_t n_group = 0; int flag_sorted = 0; std::cout<<"aggregate = avg on column:\n"; print_v(dd1, std::cout); //input //{ gdf_context ctxt{0, GDF_SORT, 0}; std::vector<gdf_column*> v_pcols(ncols); for(int i = 0; i < ncols; ++i) { v_pcols[i] = &v_gdf_cols[i]; } gdf_column** cols = &v_pcols[0];//pointer semantic (2); //} //output: //{ Vector<int32_t> d_vc_out(nrows); Vector<int32_t> d_vi_out(nrows); Vector<double> d_vd_out(nrows); std::vector<gdf_column> v_gdf_cols_out(ncols); v_gdf_cols_out[0].data = d_vc_out.data().get(); v_gdf_cols_out[0].dtype = GDF_INT32; v_gdf_cols_out[0].size = nrows; v_gdf_cols_out[1].data = d_vi_out.data().get(); v_gdf_cols_out[1].dtype = GDF_INT32; v_gdf_cols_out[1].size = nrows; v_gdf_cols_out[2].data = d_vd_out.data().get(); v_gdf_cols_out[2].dtype = GDF_FLOAT64; v_gdf_cols_out[2].size = nrows; std::vector<gdf_column*> h_cols_out(ncols); for(int i=0; i<ncols; ++i) h_cols_out[i] = &v_gdf_cols_out[i];// gdf_column** cols_out = &h_cols_out[0];//pointer semantics (2) d_keys.assign(nrows, 0); gdf_column c_indx; c_indx.data = d_keys.data().get(); c_indx.size = nrows; c_indx.dtype = GDF_INT32; //} gdf_group_by_avg((int)ncols, // # columns cols, //input cols &c_agg, //column to aggregate on &c_indx, //if not null return indices of re-ordered rows cols_out, //if not null return the grouped-by columns &c_vout, //aggregation result &ctxt); //struct with additional info; n_group = c_vout.size; std::cout<<"selected rows:\n"; print_v(d_vc_out, n_group, std::cout); print_v(d_vi_out, n_group, std::cout); print_v(d_vd_out, n_group, std::cout); size_t n_keys = n_group; size_t n_vals = n_group; std::cout<<"multi-col generic group-by (average):\n"; std::cout<<"indices of grouped-by, new keys end position: "<<n_keys<<";\n"; std::cout<<"indices of grouped-by, new vals end position: "<<n_vals<<";\n"; std::cout<<"grouped-by keys:\n"; print_v(d_keys, n_keys, std::cout); std::cout<<"grouped-by vals:\n"; print_v(d_outd, n_vals, std::cout); } int main(void) { { //okay: // std::vector<int> vc1{1,1,1}; std::vector<int> vi1{1,1,0}; std::vector<double> vd1{12., 11., 17.}; Vector<int> dc1 = vc1; Vector<int> di1 = vi1; Vector<double> dd1 = vd1; size_t nrows = dc1.size(); assert( nrows == di1.size() ); assert( nrows == dd1.size() ); Vector<int> dv(nrows, 0); ///multi_col_order_by(nrows, tv1, dv);//okay size_t ncols = 3; std::vector<void*> v_cols{static_cast<void*>(dc1.data().get()), static_cast<void*>(di1.data().get()), static_cast<void*>(dd1.data().get())}; std::vector<int> v_types{static_cast<int>(GDF_INT32), static_cast<int>(GDF_INT32), static_cast<int>(GDF_FLOAT64)}; Vector<void*> d_cols(ncols, nullptr); Vector<int> d_types(ncols, 0); Vector<size_t> d_indx(nrows, 0); std::vector<gdf_column> v_gdf_cols(ncols); v_gdf_cols[0].data = static_cast<void*>(dc1.data().get()); v_gdf_cols[0].size = nrows; v_gdf_cols[0].dtype = GDF_INT32; v_gdf_cols[1].data = static_cast<void*>(di1.data().get()); v_gdf_cols[1].size = nrows; v_gdf_cols[1].dtype = GDF_INT32; v_gdf_cols[2].data = static_cast<void*>(dd1.data().get()); v_gdf_cols[2].size = nrows; v_gdf_cols[2].dtype = GDF_FLOAT64; gdf_column* h_columns = &v_gdf_cols[0]; void** d_col_data = d_cols.data().get(); int* d_col_types = d_types.data().get(); size_t* ptr_dv = d_indx.data().get(); gdf_order_by(nrows, h_columns, ncols, d_col_data, d_col_types, ptr_dv); std::cout<<"multisort order:\n"; print_v(d_indx, std::cout); //should return: //multisort order: //2,1,0, } { //okay: // std::vector<int> vc1{1,1,1,1,1,1}; std::vector<int> vi1{1,3,3,5,5,0}; std::vector<double> vd1{12., 13., 13., 17., 17., 17}; std::cout<<"multi-column group-by experiments:\n"; test_gb_count_api_2(vc1, vi1, vd1);//okay test_gb_sum_api_2(vc1, vi1, vd1);//okay test_gb_avg_api_2(vc1, vi1, vd1);//okay test_gb_min_api_2(vc1, vi1, vd1);//okay test_gb_max_api_2(vc1, vi1, vd1);//okay //} } { std::cout<<"Filtering experiments:\n"; f_test_multi_filter(); } std::cout << "Done!" << std::endl; return 0; }
the_stack
#include <vector> #include <math.h> #include "geometry/grid_3d.h" #include "geometry/SE3.h" #include "util/mirrored_memory.h" namespace dart { inline __device__ __host__ bool intersectBox(float3 rayDir, float3 origin, float3 boxmin, float3 boxmax, float2 * t) { //double3 rayDir = make_double3(r.x,r.y,r.z); // compute intersection of ray with all six bbox planes //trying a new fix for axis =0. float3 invR = 1.0/rayDir;//rayDir / (rayDir*rayDir); //float3 invR = make_float3(invR_.x,invR_.y,invR_.z); float3 tbot = invR * (boxmin - origin); float3 ttop = invR * (boxmax - origin); // re-order intersections to find smallest and largest on each axis float3 tmin = fminf(ttop, tbot); float3 tmax = fmaxf(ttop, tbot); // find the largest tmin and the smallest tmax float largest_tmin = fmax(fmax(tmin.x, tmin.y), tmin.z); float smallest_tmax = fmin(fmin(tmax.x, tmax.y), tmax.z); //float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); //float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); t->x = largest_tmin; t->y = smallest_tmax; return smallest_tmax > largest_tmin; } // -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=- __global__ void gpu_raycastSDF(float2 fl, float2 pp, const int width, const int height, const SE3 T_mc, const SE3 * T_fms, const SE3 * T_mfs, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, float4 * points, const float levelSet) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; const SE3 T_cm = dart::SE3Invert(T_mc); if (x < width && y < height) { const float2 invfl = 1.0f/(fl); const float2 uv = make_float2(x + 0.5,y + 0.5); const float3 origin = make_float3(0,0,0); const float3 raydir = normalize( make_float3( (uv-pp)*invfl,1) ); float closestT = 10000; for (int s=0; s < nSdfs; ++s) { const int f = sdfFrames[s]; const Grid3D<float>& sdf = sdfs[s]; float2 t; float3 origin_f = make_float3(T_fms[f]*T_mc*make_float4(origin,1)); float3 raydir_f = make_float3(T_fms[f]*T_mc*make_float4(raydir,0)); float3 raydirNorm_f = normalize(raydir_f); float3 sdfMin = sdf.offset; float3 sdfMax = sdf.offset + sdf.resolution*make_float3(sdf.dim.x,sdf.dim.y,sdf.dim.z); bool intersects = intersectBox(raydirNorm_f,origin_f, sdfMin, sdfMax, &t); if (intersects) { float3 x_f = raydirNorm_f*t.x + origin_f; float3 x_g = fmaxf(make_float3(1,1,1), fminf((x_f - sdf.offset) / sdf.resolution, make_float3(sdf.dim.x-2,sdf.dim.y-2,sdf.dim.z-2))); float sdfVal = sdf.getValueInterpolated(x_g)*sdf.resolution; float sdfValPrev = sdfVal; float tPrev = t.x; while (t.x < t.y && t.x < closestT) { x_f = raydirNorm_f*t.x + origin_f; x_g = fmaxf(make_float3(1,1,1) , fminf((x_f - sdf.offset) / sdf.resolution, make_float3(sdf.dim.x-2,sdf.dim.y-2,sdf.dim.z-2))); sdfVal = sdf.getValueInterpolated(x_g)*sdf.resolution; if (sdfValPrev > levelSet && sdfVal <= levelSet) { const float tHit = (t.x - tPrev)*(levelSet - sdfValPrev)/(sdfVal - sdfValPrev) + tPrev; float3 xHit_f = raydirNorm_f*tHit + origin_f; float4 xHit_c = T_cm*T_mfs[f]*make_float4(xHit_f,1); points[x + y*width] = xHit_c; closestT = tHit; } sdfValPrev = sdfVal; tPrev = t.x; t.x += fmax(sdf.resolution / 10, fabs(sdfVal)); } } } } } __global__ void gpu_raycastPrediction(float2 fl, float2 pp, const int width, const int height, const int modelNum, const SE3 T_mc, const SE3 * T_fms, const SE3 * T_mfs, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, float4 * prediction, const float levelSet) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } const int index = x + y*width; if (modelNum == 0) { prediction[index].z = 0; prediction[index].w = -1; } const SE3 T_cm = dart::SE3Invert(T_mc); if (x < width && y < height) { const float2 invfl = 1.0f/(fl); const float2 uv = make_float2(x + 0.5,y + 0.5); const float3 origin = make_float3(0,0,0); const float3 raydir = normalize( make_float3( (uv-pp)*invfl,1) ); float closestT = 10000; for (int s=0; s < nSdfs; ++s) { const int f = sdfFrames[s]; const Grid3D<float>& sdf = sdfs[s]; float2 t; float3 origin_f = make_float3(T_fms[f]*T_mc*make_float4(origin,1)); float3 raydir_f = make_float3(T_fms[f]*T_mc*make_float4(raydir,0)); float3 raydirNorm_f = normalize(raydir_f); float3 sdfMin = sdf.offset; float3 sdfMax = sdf.offset + sdf.resolution*make_float3(sdf.dim.x,sdf.dim.y,sdf.dim.z); bool intersects = intersectBox(raydirNorm_f,origin_f, sdfMin, sdfMax, &t); if (intersects) { float3 x_f = raydirNorm_f*t.x + origin_f; float3 x_g = fmaxf(make_float3(1,1,1), fminf((x_f - sdf.offset) / sdf.resolution, make_float3(sdf.dim.x-2,sdf.dim.y-2,sdf.dim.z-2))); float sdfVal = sdf.getValueInterpolated(x_g)*sdf.resolution; float sdfValPrev = sdfVal; float tPrev = t.x; while (t.x < t.y && t.x < closestT) { x_f = raydirNorm_f*t.x + origin_f; x_g = fmaxf(make_float3(1,1,1) , fminf((x_f - sdf.offset) / sdf.resolution, make_float3(sdf.dim.x-2,sdf.dim.y-2,sdf.dim.z-2))); sdfVal = sdf.getValueInterpolated(x_g)*sdf.resolution; if (sdfValPrev > levelSet && sdfVal <= levelSet) { const float tHit = (t.x - tPrev)*(levelSet - sdfValPrev)/(sdfVal - sdfValPrev) + tPrev; float3 xHit_f = raydirNorm_f*tHit + origin_f; float4 xHit_c = T_cm*T_mfs[f]*make_float4(xHit_f,1); float currentZ = prediction[index].z; if (currentZ == 0 || xHit_c.z < currentZ) { int id = (modelNum << 16) + s; xHit_c.w = id; prediction[index] = xHit_c; } closestT = tHit; } sdfValPrev = sdfVal; tPrev = t.x; t.x += fmax(sdf.resolution / 10, fabs(sdfVal)); } } } } } __global__ void gpu_raycastPredictionDebug(float2 fl, float2 pp, const int width, const int height, const int modelNum, const SE3 T_mc, const SE3 * T_fms, const SE3 * T_mfs, const int* sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, float4 * prediction, const float levelSet, unsigned char * boxIntersections) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } const int index = x + y*width; if (modelNum == 0) { prediction[index].z = 0; boxIntersections[index] = 0; } const SE3 T_cm = dart::SE3Invert(T_mc); if (x < width && y < height) { const float2 invfl = 1.0f/(fl); const float2 uv = make_float2(x + 0.5,y + 0.5); const float3 origin = make_float3(0,0,0); const float3 raydir = normalize( make_float3( (uv-pp)*invfl,1) ); float closestT = 10000; for (int s=0; s < nSdfs; ++s) { const int f = sdfFrames[s]; const Grid3D<float>& sdf = sdfs[s]; float2 t; float3 origin_f = make_float3(T_fms[f]*T_mc*make_float4(origin,1)); float3 raydir_f = make_float3(T_fms[f]*T_mc*make_float4(raydir,0)); float3 raydirNorm_f = normalize(raydir_f); float3 sdfMin = sdf.offset; float3 sdfMax = sdf.offset + sdf.resolution*make_float3(sdf.dim.x,sdf.dim.y,sdf.dim.z); bool intersects = intersectBox(raydirNorm_f,origin_f, sdfMin, sdfMax, &t); if (intersects) { boxIntersections[index] = 1; float3 x_f = raydirNorm_f*t.x + origin_f; float3 x_g = fmaxf(make_float3(1,1,1), fminf((x_f - sdf.offset) / sdf.resolution, make_float3(sdf.dim.x-2,sdf.dim.y-2,sdf.dim.z-2))); float sdfVal = sdf.getValueInterpolated(x_g)*sdf.resolution; float sdfValPrev = sdfVal; float tPrev = t.x; while (t.x < t.y && t.x < closestT) { x_f = raydirNorm_f*t.x + origin_f; x_g = fmaxf(make_float3(1,1,1) , fminf((x_f - sdf.offset) / sdf.resolution, make_float3(sdf.dim.x-2,sdf.dim.y-2,sdf.dim.z-2))); sdfVal = sdf.getValueInterpolated(x_g)*sdf.resolution; if (sdfValPrev > levelSet && sdfVal <= levelSet) { const float tHit = (t.x - tPrev)*(levelSet - sdfValPrev)/(sdfVal - sdfValPrev) + tPrev; float3 xHit_f = raydirNorm_f*tHit + origin_f; float4 xHit_c = T_cm*T_mfs[f]*make_float4(xHit_f,1); float currentZ = prediction[index].z; if (currentZ == 0 || xHit_c.z < currentZ) { int id = (modelNum << 16) + s; xHit_c.w = id; prediction[index] = xHit_c; } closestT = tHit; } sdfValPrev = sdfVal; tPrev = t.x; t.x += fmax(sdf.resolution / 10, fabs(sdfVal)); } } } } } __global__ void gpu_raycastPredictionDebugRay(float2 fl, float2 pp, const int x, const int y, const int width, const int modelNum, const SE3 T_mc, const SE3 * T_fms, const SE3 * T_mfs, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, float4 * prediction, const float levelSet, float3 * boxIntersects, float2 * raySteps, const int maxRaySteps) { const int index = x + y*width; if (modelNum == 0) { prediction[index].z = 0; } printf("-=-=-=-=-=-=-=-=-\nmodel %d\n-=-=-=-=-=-=-=-=-\n",modelNum); const SE3 T_cm = dart::SE3Invert(T_mc); const float2 invfl = 1.0f/(fl); const float2 uv = make_float2(x + 0.5,y + 0.5); const float3 origin = make_float3(0,0,0); const float3 raydir = normalize( make_float3( (uv-pp)*invfl,1) ); int step = 0; float closestT = 10000; for (int s=0; s < nSdfs; ++s) { printf("sdf %d\n",s); const int f = sdfFrames[s]; const Grid3D<float>& sdf = sdfs[s]; float2 t; float3 origin_f = make_float3(T_fms[f]*T_mc*make_float4(origin,1)); float3 raydir_f = make_float3(T_fms[f]*T_mc*make_float4(raydir,0)); float3 raydirNorm_f = normalize(raydir_f); float3 sdfMin = sdf.offset; float3 sdfMax = sdf.offset + sdf.resolution*make_float3(sdf.dim.x,sdf.dim.y,sdf.dim.z); bool intersects = intersectBox(raydirNorm_f,origin_f, sdfMin, sdfMax, &t); if (intersects) { boxIntersects[s] = make_float3(t.x,t.y,1); printf("\tsdf is intersected\n"); float3 x_f = raydirNorm_f*t.x + origin_f; float3 x_g = fmaxf(make_float3(1,1,1), fminf((x_f - sdf.offset) / sdf.resolution, make_float3(sdf.dim.x-2,sdf.dim.y-2,sdf.dim.z-2))); float sdfVal = sdf.getValueInterpolated(x_g)*sdf.resolution; float sdfValPrev = sdfVal; float tPrev = t.x; while (t.x < t.y && t.x < closestT) { x_f = raydirNorm_f*t.x + origin_f; x_g = fmaxf(make_float3(1,1,1) , fminf((x_f - sdf.offset) / sdf.resolution, make_float3(sdf.dim.x-2,sdf.dim.y-2,sdf.dim.z-2))); sdfVal = sdf.getValueInterpolated(x_g)*sdf.resolution; if (step < maxRaySteps) { raySteps[step] = make_float2(t.x,sdfVal); ++step; } printf("\tt.x=%f\n",t.x); printf("\tsdfVal=%f\n",sdfVal); if (sdfValPrev > levelSet && sdfVal <= levelSet) { printf("\tcrossed\n"); const float tHit = (t.x - tPrev)*(levelSet - sdfValPrev)/(sdfVal - sdfValPrev) + tPrev; float3 xHit_f = raydirNorm_f*tHit + origin_f; float4 xHit_c = T_cm*T_mfs[f]*make_float4(xHit_f,1); float currentZ = prediction[index].z; if (currentZ == 0 || xHit_c.z < currentZ) { int id = (modelNum << 16) + s; xHit_c.w = id; prediction[index] = xHit_c; } closestT = tHit; printf("\tclosestT now %f\n",closestT); } sdfValPrev = sdfVal; tPrev = t.x; t.x += fmax(sdf.resolution / 10, fabs(sdfVal)); } } else { boxIntersects[s] = make_float3(5,5,5); } } printf("took %d steps\n",step); } // -=-=-=-=-=-=-=-=-=- interface -=-=-=-=-=-=-=-=-=- void raycastPrediction(float2 fl, float2 pp, const int width, const int height, const int modelNum, const SE3 T_mc, const SE3 * T_fms, const SE3 * T_mfs, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, float4 * prediction, const float levelSet, cudaStream_t stream) { dim3 block = dim3(8,8,1); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); gpu_raycastPrediction<<<grid,block,0,stream>>>(fl,pp,width,height,modelNum, T_mc,T_fms,T_mfs,sdfFrames,sdfs,nSdfs, prediction, levelSet); // gpu_raycastPredictionDebug<<<grid,block>>>(fl,pp,width,height,modelNum, // T_mc,T_fms,T_mfs,sdfFrames,sdfs,nSdfs, // prediction, levelSet, boxIntersect); } void raycastPredictionDebugRay(float2 fl, float2 pp, const int x, const int y, const int width, const int modelNum, const SE3 T_mc, const SE3 * T_fms, const SE3 * T_mfs, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, float4 * prediction, const float levelSet, float3 * boxIntersects, float2 * raySteps, const int maxRaySteps) { dim3 block(1,1,1); dim3 grid(1,1,1); gpu_raycastPredictionDebugRay<<<grid,block>>>(fl,pp,x,y,width,modelNum,T_mc,T_fms,T_mfs,sdfFrames,sdfs,nSdfs,prediction,levelSet,boxIntersects,raySteps,maxRaySteps); } }
the_stack
#include "debugging.h" #include "hash_compacting.h" #include "hash_functions.h" #include "hash_table.cuh" #include <cudpp.h> #include "cuda_util.h" #include <set> namespace CudaHT { namespace CuckooHashing { /* -------------------------------------------------------------------------- Retrieval functions. -------------------------------------------------------------------------- */ //! Answers a single query from a compacting hash table. /*! @ingroup PublicInterface * @param[in] key Query key * @param[in] table_size Size of the hash table * @param[in] table The contents of the hash table * @param[in] constants The hash functions used to build the table * @param[in] stash_constants Constants used by the stash hash function * @param[in] stash_count Number of items contained in the stash * @param[out] num_probes_required Debug only: The number of probes required to resolve the query. * * @returns The ID of the query key is returned if the key exists in the table. Otherwise, \ref kNotFound will be returned. */ template <unsigned kNumHashFunctions> __device__ unsigned retrieve_compacting(const unsigned query_key, const unsigned table_size, const Entry *table, const Functions<kNumHashFunctions> constants, const uint2 stash_constants, const unsigned stash_count, unsigned *num_probes_required = NULL) { // Identify all of the locations that the key can be located in. unsigned locations[kNumHashFunctions]; KeyLocations(constants, table_size, query_key, locations); // Check each location until the key is found. // Short-circuiting is disabled because the duplicate removal step breaks // it. unsigned num_probes = 1; Entry entry = table[locations[0]]; #pragma unroll for (unsigned i = 1; i < kNumHashFunctions; ++i) { if (get_key(entry) != query_key) { num_probes++; entry = table[locations[i]]; } } // Check the stash. if (stash_count && get_key(entry) != query_key) { num_probes++; const Entry *stash = table + table_size; unsigned slot = stash_hash_function(stash_constants, query_key); entry = stash[slot]; } #ifdef TRACK_ITERATIONS if (num_probes_required) { *num_probes_required = num_probes; } #endif if (get_key(entry) == query_key) { return get_value(entry); } else { return kNotFound; } } //! Returns the unique identifier for every query key. Each thread manages a single query. /*! @param[in] n_queries Number of query keys * @param[in] keys_in Query keys * @param[in] table_size Size of the hash table * @param[in] table The contents of the hash table * @param[in] constants The hash functions used to build the table * @param[in] stash_constants Constants used by the stash hash function * @param[in] stash_count Number of items contained in the stash * @param[out] values_out The unique identifiers for each query key * @param[out] num_probes_required Debug only: The number of probes required to resolve the query. * * The ID of the query key is written out if the key exists in the table. * Otherwise, \ref kNotFound will be. */ template <unsigned kNumHashFunctions> __global__ void hash_retrieve_compacting(const unsigned n_queries, const unsigned *keys_in, const unsigned table_size, const Entry *table, const Functions<kNumHashFunctions> constants, const uint2 stash_constants, const unsigned stash_count, unsigned *values_out, unsigned *num_probes_required = NULL) { // Get the key. unsigned thread_index = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if (thread_index >= n_queries) return; unsigned key = keys_in[thread_index]; values_out[thread_index] = retrieve_compacting<kNumHashFunctions> (key, table_size, table, constants, stash_constants, stash_count, (num_probes_required ? num_probes_required + thread_index : NULL)); } /*! @name Internal * @{ */ //! Builds a compacting hash table. template <unsigned kNumHashFunctions> __global__ void hash_build_compacting(const int n, const unsigned *keys, const unsigned table_size, const Functions<kNumHashFunctions> constants, const uint2 stash_constants, const unsigned max_iteration_attempts, unsigned *table, unsigned *stash_count, unsigned *failures) { // Check if this thread has an item and if any previous threads failed. unsigned int thread_index = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if (thread_index >= n || *failures) return; // Read the key that this thread should insert. It always uses its first // slot. unsigned key = keys[thread_index]; unsigned location = hash_function(constants, 0, key) % table_size; // Keep inserting until an empty slot is found, a copy was found, // or the eviction chain grows too large. unsigned old_key = kKeyEmpty; for (int its = 1; its < max_iteration_attempts; its++) { old_key = key; // Insert the new entry. key = atomicExch(&table[location], key); // If no unique key was evicted, we're done. if (key == kKeyEmpty || key == old_key) return; location = determine_next_location(constants, table_size, key, location); }; // Shove it into the stash. if (key != kKeyEmpty) { unsigned slot = stash_hash_function(stash_constants, key); unsigned *stash = table + table_size; unsigned replaced_key = atomicExch(stash + slot, key); if (replaced_key == kKeyEmpty || replaced_key == key) { atomicAdd(stash_count, 1); return; } } // The eviction chain grew too large. Report failure. #ifdef COUNT_UNINSERTED atomicAdd(failures, 1); #else *failures = 1; #endif } //! Removes all key duplicates from a compacting hash table. /*! The unspecialized version is significantly slower than the explicitly * specialized ones. */ template <unsigned kNumHashFunctions> __global__ void hash_remove_duplicates(const unsigned table_size, const unsigned total_table_size, const Functions<kNumHashFunctions> constants, const uint2 stash_constants, unsigned *keys, unsigned *is_unique) { // Read out the key that may be duplicated. unsigned int thread_index = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if (thread_index >= total_table_size) return; unsigned key = keys[thread_index]; // Determine all the locations that the key could be in. unsigned first_location = table_size + stash_hash_function(stash_constants, key); #pragma unroll for (int i = kNumHashFunctions-1; i >= 0; --i) { unsigned location = hash_function(constants, i, key) % table_size; first_location = (keys[location] == key ? location : first_location); } // If this thread got a later copy of the key, remove this thread's copy // from the table. if (first_location != thread_index || key == kKeyEmpty) { keys[thread_index] = kKeyEmpty; is_unique[thread_index] = 0; } else { is_unique[thread_index] = 1; } } /// @} //! @name Explicit template specializations /// @{ #if 1 template <> __global__ void hash_remove_duplicates<2>(const unsigned table_size, const unsigned total_table_size, const Functions<2> constants, const uint2 stash_constants, unsigned *keys, unsigned *is_unique) { // Read out the key that may be duplicated. unsigned int thread_index = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if (thread_index >= total_table_size) return; unsigned key = keys[thread_index]; // Determine all the locations that the key could be in. unsigned location_0 = hash_function(constants, 0, key) % table_size; unsigned location_1 = hash_function(constants, 1, key) % table_size; unsigned stash_loc = table_size + stash_hash_function(stash_constants, key); // Figure out where the key is first located. unsigned first_index; if (keys[location_0] == key) first_index = location_0; else if (keys[location_1] == key) first_index = location_1; else first_index = stash_loc; // If this thread got a later copy of the key, remove this thread's copy // from the table. if (first_index != thread_index || key == kKeyEmpty) { keys[thread_index] = kKeyEmpty; is_unique[thread_index] = 0; } else { is_unique[thread_index] = 1; } } template <> __global__ void hash_remove_duplicates<3>(const unsigned table_size, const unsigned total_table_size, const Functions<3> constants, const uint2 stash_constants, unsigned *keys, unsigned *is_unique) { // Read out the key that may be duplicated. unsigned int thread_index = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if (thread_index >= total_table_size) return; unsigned key = keys[thread_index]; // Determine all the locations that the key could be in. unsigned location_0 = hash_function(constants, 0, key) % table_size; unsigned location_1 = hash_function(constants, 1, key) % table_size; unsigned location_2 = hash_function(constants, 2, key) % table_size; unsigned stash_loc = table_size + stash_hash_function(stash_constants, key); // Figure out where the key is first located. unsigned first_index; if (keys[location_0] == key) first_index = location_0; else if (keys[location_1] == key) first_index = location_1; else if (keys[location_2] == key) first_index = location_2; else first_index = stash_loc; // If this thread got a later copy of the key, remove this thread's copy // from the table. if (first_index != thread_index || key == kKeyEmpty) { keys[thread_index] = kKeyEmpty; is_unique[thread_index] = 0; } else { is_unique[thread_index] = 1; } } template <> __global__ void hash_remove_duplicates<4>(const unsigned table_size, const unsigned total_table_size, const Functions<4> constants, const uint2 stash_constants, unsigned *keys, unsigned *is_unique) { // Read out the key that may be duplicated. unsigned int thread_index = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if (thread_index >= total_table_size) return; unsigned key = keys[thread_index]; // Determine all the locations that the key could be in. unsigned location_0 = hash_function(constants, 0, key) % table_size; unsigned location_1 = hash_function(constants, 1, key) % table_size; unsigned location_2 = hash_function(constants, 2, key) % table_size; unsigned location_3 = hash_function(constants, 3, key) % table_size; unsigned stash_loc = table_size + stash_hash_function(stash_constants, key); // Figure out where the key is first located. unsigned first_index; if (keys[location_0] == key) first_index = location_0; else if (keys[location_1] == key) first_index = location_1; else if (keys[location_2] == key) first_index = location_2; else if (keys[location_3] == key) first_index = location_3; else first_index = stash_loc; // If this thread got a later copy of the key, remove this thread's copy // from the table. if (first_index != thread_index || key == kKeyEmpty) { keys[thread_index] = kKeyEmpty; is_unique[thread_index] = 0; } else { is_unique[thread_index] = 1; } } template <> __global__ void hash_remove_duplicates<5>(const unsigned table_size, const unsigned total_table_size, const Functions<5> constants, const uint2 stash_constants, unsigned *keys, unsigned *is_unique) { // Read out the key that may be duplicated. unsigned int thread_index = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if (thread_index >= total_table_size) return; unsigned key = keys[thread_index]; // Determine all the locations that the key could be in. unsigned location_0 = hash_function(constants, 0, key) % table_size; unsigned location_1 = hash_function(constants, 1, key) % table_size; unsigned location_2 = hash_function(constants, 2, key) % table_size; unsigned location_3 = hash_function(constants, 3, key) % table_size; unsigned location_4 = hash_function(constants, 4, key) % table_size; unsigned stash_loc = table_size + stash_hash_function(stash_constants, key); // Figure out where the key is first located. unsigned first_index; if (keys[location_0] == key) first_index = location_0; else if (keys[location_1] == key) first_index = location_1; else if (keys[location_2] == key) first_index = location_2; else if (keys[location_3] == key) first_index = location_3; else if (keys[location_4] == key) first_index = location_4; else first_index = stash_loc; // If this thread got a later copy of the key, remove this thread's copy // from the table. if (first_index != thread_index || key == kKeyEmpty) { keys[thread_index] = kKeyEmpty; is_unique[thread_index] = 0; } else { is_unique[thread_index] = 1; } } /// @} #endif //! @name Internal //! @{ //! Interleave the keys and their unique IDs in the cuckoo hash table, then compact down the keys. __global__ void hash_compact_down(const unsigned table_size, Entry *table_entry, unsigned *unique_keys, const unsigned *table, const unsigned *indices) { // Read out the table entry. unsigned int thread_index = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*blockDim.x*gridDim.x; if (thread_index >= table_size) return; unsigned key = table[thread_index]; unsigned index = indices[thread_index] - 1; Entry entry = make_entry(key, index); // Write the key and value interleaved. The value for an invalid key // doesn't matter. table_entry[thread_index] = entry; // Compact down the keys. if (key != kKeyEmpty) { unique_keys[index] = key; } } //! @} namespace CUDAWrapper { void CallHashBuildCompacting(const int n, const unsigned num_hash_functions, const unsigned *d_keys, const unsigned table_size, const Functions<2> constants_2, const Functions<3> constants_3, const Functions<4> constants_4, const Functions<5> constants_5, const uint2 stash_constants, const unsigned max_iterations, unsigned *d_scratch_cuckoo_keys, unsigned *d_stash_count, unsigned *d_failures) { if (num_hash_functions == 2) { hash_build_compacting <<<ComputeGridDim(n), kBlockSize>>> (n, d_keys, table_size, constants_2, stash_constants, max_iterations, d_scratch_cuckoo_keys, d_stash_count, d_failures); } else if (num_hash_functions == 3) { hash_build_compacting <<<ComputeGridDim(n), kBlockSize>>> (n, d_keys, table_size, constants_3, stash_constants, max_iterations, d_scratch_cuckoo_keys, d_stash_count, d_failures); } else if (num_hash_functions == 4) { hash_build_compacting <<<ComputeGridDim(n), kBlockSize>>> (n, d_keys, table_size, constants_4, stash_constants, max_iterations, d_scratch_cuckoo_keys, d_stash_count, d_failures); } else { hash_build_compacting <<<ComputeGridDim(n), kBlockSize>>> (n, d_keys, table_size, constants_5, stash_constants, max_iterations, d_scratch_cuckoo_keys, d_stash_count, d_failures); } CUDA_CHECK_ERROR("Failed to build.\n"); } void CallHashRemoveDuplicates(const unsigned num_hash_functions, const unsigned table_size, const unsigned total_table_size, const Functions<2> constants_2, const Functions<3> constants_3, const Functions<4> constants_4, const Functions<5> constants_5, const uint2 stash_constants, unsigned *d_scratch_cuckoo_keys, unsigned *d_scratch_counts) { // Remove any duplicated keys from the hash table and set values to one. if (num_hash_functions == 2) { hash_remove_duplicates <<<ComputeGridDim(total_table_size), kBlockSize>>> (table_size, total_table_size, constants_2, stash_constants, d_scratch_cuckoo_keys, d_scratch_counts); } else if (num_hash_functions == 3) { hash_remove_duplicates <<<ComputeGridDim(total_table_size), kBlockSize>>> (table_size, total_table_size, constants_3, stash_constants, d_scratch_cuckoo_keys, d_scratch_counts); } else if (num_hash_functions == 4) { hash_remove_duplicates <<<ComputeGridDim(total_table_size), kBlockSize>>> (table_size, total_table_size, constants_4, stash_constants, d_scratch_cuckoo_keys, d_scratch_counts); } else { hash_remove_duplicates <<<ComputeGridDim(total_table_size), kBlockSize>>> (table_size, total_table_size, constants_5, stash_constants, d_scratch_cuckoo_keys, d_scratch_counts); } CUDA_CHECK_ERROR("!!! Failed to remove duplicates. \n"); } void CallHashCompactDown(const unsigned table_size, Entry *d_contents, unsigned *d_unique_keys, const unsigned *d_scratch_cuckoo_keys, const unsigned *d_scratch_unique_ids) { hash_compact_down <<<ComputeGridDim(table_size), kBlockSize>>> (table_size, d_contents, d_unique_keys, d_scratch_cuckoo_keys, d_scratch_unique_ids); CUDA_CHECK_ERROR("Compact down failed.\n"); } void CallHashRetrieveCompacting(const unsigned n_queries, const unsigned num_hash_functions, const unsigned *d_keys, const unsigned table_size, const Entry *d_contents, const Functions<2> constants_2, const Functions<3> constants_3, const Functions<4> constants_4, const Functions<5> constants_5, const uint2 stash_constants, const unsigned stash_count, unsigned *d_values) { unsigned *d_retrieval_probes = NULL; #ifdef TRACK_ITERATIONS CUDA_SAFE_CALL(cudaMalloc((void**)&d_retrieval_probes, sizeof(unsigned) * n_queries)); #endif if (num_hash_functions == 2) { hash_retrieve_compacting<<<ComputeGridDim(n_queries), kBlockSize>>> (n_queries, d_keys, table_size, d_contents, constants_2, stash_constants, stash_count, d_values, d_retrieval_probes); } else if (num_hash_functions == 3) { hash_retrieve_compacting<<<ComputeGridDim(n_queries), kBlockSize>>> (n_queries, d_keys, table_size, d_contents, constants_3, stash_constants, stash_count, d_values, d_retrieval_probes); } else if (num_hash_functions == 4) { hash_retrieve_compacting<<<ComputeGridDim(n_queries), kBlockSize>>> (n_queries, d_keys, table_size, d_contents, constants_4, stash_constants, stash_count, d_values, d_retrieval_probes); } else { hash_retrieve_compacting<<<ComputeGridDim(n_queries), kBlockSize>>> (n_queries, d_keys, table_size, d_contents, constants_5, stash_constants, stash_count, d_values, d_retrieval_probes); } CUDA_CHECK_ERROR("Retrieval failed.\n"); #ifdef TRACK_ITERATIONS OutputRetrievalStatistics(n_queries, d_retrieval_probes, num_hash_functions); CUDA_SAFE_CALL(cudaFree(d_retrieval_probes)); #endif } void ClearTable(const unsigned slots_in_table, const unsigned fill_value, unsigned *d_contents) { clear_table<<<ComputeGridDim(slots_in_table), kBlockSize>>> (slots_in_table, fill_value, d_contents); CUDA_CHECK_ERROR("Error occurred during hash table clear.\n"); } }; // namespace CUDAWrapper }; // namespace CuckooHashing }; // namespace CudaHT // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
// maximum # of GTYPE elements allocating on constant memory: 4096 __constant__ GTYPE matrix_const_gpu[1024]; __constant__ ITYPE matrix_mask_list_gpu[1024]; __constant__ UINT sorted_insert_index_list_gpu[15]; __constant__ UINT target_index_list_gpu[15]; /** vqcsim からの移植 * perform multi_qubit_Pauli_gate with XZ mask. * * This function assumes bit_flip_mask is not 0, i.e., at least one bit is flipped. If no bit is flipped, use multi_qubit_Pauli_gate_Z_mask. * This function update the quantum state with Pauli operation. * bit_flip_mask, phase_flip_mask, global_phase_90rot_count, and pivot_qubit_index must be computed before calling this function. * See get_masks_from_*_list for the above four arguemnts. */ //void multi_qubit_Pauli_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count,UINT pivot_qubit_index, CPPCTYPE* state, ITYPE dim); //void multi_qubit_Pauli_rotation_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, double angle, CPPCTYPE* state, ITYPE dim); //void multi_qubit_Pauli_gate_Z_mask(ITYPE phase_flip_mask, CPPCTYPE* state, ITYPE dim); //void multi_qubit_Pauli_rotation_gate_Z_mask(ITYPE phase_flip_mask, double angle, CPPCTYPE* state, ITYPE dim); __device__ double atomicAdd_double_duplicate(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } __global__ void penta_qubit_dense_matrix_gate_gpu(GTYPE *state_gpu, ITYPE dim){ __shared__ GTYPE state_basis[1024]; GTYPE tmp=make_cuDoubleComplex(0.0, 0.0); ITYPE loop_dim = dim >> 5; ITYPE basis = blockIdx.x * blockDim.x + threadIdx.x; int y; if (basis < loop_dim){ for(y=0;y<5;++y) basis = insert_zero_to_basis_index_device(basis, sorted_insert_index_list_gpu[y] ); for(y=0;y<5;++y) basis += (1ULL << target_index_list_gpu[y])*((threadIdx.y>>y)&1); state_basis[(threadIdx.x<<5)+threadIdx.y]=state_gpu[basis]; __syncthreads(); for(y=0;y<32;++y) tmp = cuCadd(tmp, cuCmul(matrix_const_gpu[(threadIdx.y<<5) + y], state_basis[(threadIdx.x<<5)+y] )); state_gpu[ basis ] = tmp; } } __host__ void penta_qubit_dense_matrix_gate_host(const unsigned int target_qubit_index[5], const CPPCTYPE matrix[1024], void* state, ITYPE dim, void* stream, UINT device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice((int)device_number); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaError cudaStatus; checkCudaErrors(cudaMemcpyToSymbolAsync(matrix_const_gpu, matrix, sizeof(GTYPE)*1024, 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); ITYPE loop_dim = dim >> 5; dim3 block; block.y = 32; block.x = loop_dim <= 32 ? loop_dim : 32; unsigned int grid = loop_dim / block.x; checkCudaErrors(cudaMemcpyToSymbolAsync(target_index_list_gpu, target_qubit_index, sizeof(UINT) * 5, 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); unsigned int sort_list[5]; memcpy(sort_list, target_qubit_index, sizeof(unsigned int) * 5); std::sort(sort_list, sort_list + 5); checkCudaErrors(cudaMemcpyToSymbolAsync(sorted_insert_index_list_gpu, sort_list, sizeof(UINT)*5, 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); penta_qubit_dense_matrix_gate_gpu<<< grid, block, 0, *cuda_stream>>>(state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } __global__ void quad_qubit_dense_matrix_gate_shared_gpu(GTYPE *state_gpu, ITYPE dim){ __shared__ GTYPE state_basis[1024]; GTYPE tmp=make_cuDoubleComplex(0.0, 0.0); ITYPE loop_dim = dim >> 4; ITYPE basis = blockIdx.x * blockDim.x + threadIdx.x; int y; if (basis < loop_dim){ for(y=0;y<4;++y) basis = insert_zero_to_basis_index_device(basis, sorted_insert_index_list_gpu[y] ); for(y=0;y<4;++y) basis += (1ULL << sorted_insert_index_list_gpu[y])*((threadIdx.y>>y)&1); state_basis[(threadIdx.x<<4)+y]=state_gpu[basis]; __syncthreads(); for(y=0;y<16;++y) tmp = cuCadd(tmp, cuCmul(matrix_const_gpu[(threadIdx.y<<4) + y], state_basis[(threadIdx.x<<4)+threadIdx.y] )); state_gpu[ basis ] = tmp; } } __global__ void quad_qubit_dense_matrix_gate_gpu(unsigned int target0_qubit_index, unsigned int target1_qubit_index, unsigned int target2_qubit_index, unsigned int target3_qubit_index, unsigned int sorted_index0, unsigned int sorted_index1, unsigned int sorted_index2, unsigned int sorted_index3, GTYPE *state_gpu, ITYPE dim){ //ITYPE basis0; ITYPE basis[16]; GTYPE d_buffer[16]; ITYPE loop_dim = dim >> 4; ITYPE basis0 = blockIdx.x * blockDim.x + threadIdx.x; int x, y; if (basis0 < loop_dim){ //basis0 = j; // create base index basis0 = insert_zero_to_basis_index_device(basis0, sorted_index0 ); basis0 = insert_zero_to_basis_index_device(basis0, sorted_index1 ); basis0 = insert_zero_to_basis_index_device(basis0, sorted_index2 ); basis0 = insert_zero_to_basis_index_device(basis0, sorted_index3); basis[0] = basis0; // 0000 basis[1] = basis0 + (1ULL << target0_qubit_index); // 0001 basis[2] = basis0 + (1ULL << target1_qubit_index); // 0010 basis[3] = basis0 + (1ULL << target1_qubit_index) + (1ULL << target0_qubit_index); // 0011 basis[4] = basis0 + (1ULL << target2_qubit_index); // 0100 basis[5] = basis0 + (1ULL << target2_qubit_index) + (1ULL << target0_qubit_index); // 0101 basis[6] = basis0 + (1ULL << target2_qubit_index) + (1ULL << target1_qubit_index); // 0110 basis[7] = basis0 + (1ULL << target2_qubit_index) + (1ULL << target1_qubit_index) + (1ULL << target0_qubit_index); // 0111 basis[8] = basis0 + (1ULL << target3_qubit_index); // 1000 basis[9] = basis0 + (1ULL << target3_qubit_index) + (1ULL << target0_qubit_index); // 1001 basis[10] = basis0 + (1ULL << target3_qubit_index) + (1ULL << target1_qubit_index); // 1010 basis[11] = basis0 + (1ULL << target3_qubit_index) + (1ULL << target1_qubit_index) + (1ULL << target0_qubit_index); // 1011 basis[12] = basis0 + (1ULL << target3_qubit_index) + (1ULL << target2_qubit_index); // 1100 basis[13] = basis0 + (1ULL << target3_qubit_index) + (1ULL << target2_qubit_index) + (1ULL << target0_qubit_index); // 1101 basis[14] = basis0 + (1ULL << target3_qubit_index) + (1ULL << target2_qubit_index) + (1ULL << target1_qubit_index); // 1110 basis[15] = basis0 + (1ULL << target3_qubit_index) + (1ULL << target2_qubit_index) + (1ULL << target1_qubit_index) + (1ULL << target0_qubit_index); // 1111 for(y = 0 ; y < 16 ; ++y ){ d_buffer[y]=make_cuDoubleComplex(0.0,0.0); for(x = 0 ; x < 16 ; ++x){ d_buffer[y] = cuCadd(d_buffer[y], cuCmul(matrix_const_gpu[y*16 + x], state_gpu[ basis[x] ])); } } for(y = 0 ; y < 16 ; ++y){ state_gpu[basis[y]] = d_buffer[y]; } } } __host__ void quad_qubit_dense_matrix_gate_host(const unsigned int target_qubit_index[4], const CPPCTYPE matrix[256], void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice((int)device_number); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaError cudaStatus; checkCudaErrors(cudaMemcpyToSymbolAsync(matrix_const_gpu, matrix, sizeof(GTYPE)*256, 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); ITYPE loop_dim = dim >> 4; unsigned int block = loop_dim <= 512 ? loop_dim : 512; unsigned int grid = loop_dim / block; unsigned int target0_qubit_index, target1_qubit_index, target2_qubit_index, target3_qubit_index; target0_qubit_index=target_qubit_index[0]; target1_qubit_index=target_qubit_index[1]; target2_qubit_index=target_qubit_index[2]; target3_qubit_index=target_qubit_index[3]; unsigned int sort_list[4]; memcpy(sort_list, target_qubit_index, sizeof(unsigned int) * 4); std::sort(sort_list, sort_list + 4); quad_qubit_dense_matrix_gate_gpu << <grid, block, 0, *cuda_stream >> >( target0_qubit_index, target1_qubit_index, target2_qubit_index, target3_qubit_index, sort_list[0], sort_list[1], sort_list[2], sort_list[3], state_gpu, dim ); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); /* dim3 block; block.y = 16; block.x = loop_dim <= 64 ? loop_dim : 64; unsigned int grid = loop_dim / block.x; checkCudaErrors(cudaMemcpyToSymbol(sorted_insert_index_list_gpu, target_qubit_index, sizeof(UINT)*4), __FILE__, __LINE__); quad_qubit_dense_matrix_gate_shared_gpu << <grid, block >> >(state_gpu, dim); checkCudaErrors(cudaDeviceSynchronize(), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); */ } // target qubit 0 < target qubit 1 < target qubit 2 __global__ void triple_qubit_dense_matrix_gate_gpu(unsigned int target0_qubit_index, unsigned int target1_qubit_index, unsigned int target2_qubit_index, unsigned int small, unsigned int mid, unsigned int large, GTYPE *state_gpu, ITYPE dim){ ITYPE basis[8]; GTYPE d_buffer[8]; ITYPE loop_dim = dim >> 3; ITYPE basis0 = blockIdx.x * blockDim.x + threadIdx.x; int x, y; if (basis0 < loop_dim){ // create base index basis0 = insert_zero_to_basis_index_device(basis0, small ); basis0 = insert_zero_to_basis_index_device(basis0, mid ); basis0 = insert_zero_to_basis_index_device(basis0, large ); basis[0] = basis0; // 000 basis[1] = basis0 + (1ULL << target0_qubit_index); // 001 basis[2] = basis0 + (1ULL << target1_qubit_index); // 010 basis[3] = basis0 + (1ULL << target1_qubit_index) + (1ULL << target0_qubit_index); // 011 basis[4] = basis0 + (1ULL << target2_qubit_index); // 100 basis[5] = basis0 + (1ULL << target2_qubit_index) + (1ULL << target0_qubit_index); // 101 basis[6] = basis0 + (1ULL << target2_qubit_index) + (1ULL << target1_qubit_index); // 110 basis[7] = basis0 + (1ULL << target2_qubit_index) + (1ULL << target1_qubit_index) + (1ULL << target0_qubit_index); // 111 for(y = 0 ; y < 8 ; ++y ){ d_buffer[y]=make_cuDoubleComplex(0.0,0.0); for(x = 0 ; x < 8 ; ++x){ d_buffer[y] = cuCadd(d_buffer[y], cuCmul(matrix_const_gpu[y*8 + x], state_gpu[ basis[x] ])); } } for(y = 0 ; y < 8 ; ++y) state_gpu[basis[y]] = d_buffer[y]; } } __host__ void triple_qubit_dense_matrix_gate_host(unsigned int target0_qubit_index, unsigned int target1_qubit_index, unsigned int target2_qubit_index, const CPPCTYPE matrix[64], void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice((int)device_number); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaError cudaStatus; checkCudaErrors(cudaMemcpyToSymbolAsync(matrix_const_gpu, matrix, sizeof(GTYPE)*64, 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); // (not using shared memory) ITYPE loop_dim = dim >> 3; unsigned int block = loop_dim <= 1024 ? loop_dim : 1024; unsigned int grid = loop_dim / block; unsigned int small, mid, large, tmp; small = target0_qubit_index; mid = target1_qubit_index; large = target2_qubit_index; if (small > mid) { tmp = small; small = mid; mid = tmp; } if (mid > large) { tmp = large; large = mid; mid = tmp; } if (small > mid) { tmp = small; small = mid; mid = tmp; } triple_qubit_dense_matrix_gate_gpu << <grid, block, 0, *cuda_stream >> >(target0_qubit_index, target1_qubit_index, target2_qubit_index, small, mid, large, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } // target1 qubit index > target0 qubit index __global__ void double_qubit_dense_matrix_gate_gpu(unsigned int target0_qubit_index, unsigned int target1_qubit_index,unsigned int small, unsigned int large, GTYPE *state_gpu, ITYPE dim){ // unsigned int left, right; ITYPE head, body, tail, basis0; ITYPE basis[4]; GTYPE d_buffer[4]; ITYPE quad_dim = dim >> 2; ITYPE j = blockIdx.x * blockDim.x + threadIdx.x; int x, y; /* if (target1_qubit_index > target2_qubit_index){ left = target1_qubit_index; right = target2_qubit_index; } else { left = target2_qubit_index; right = target1_qubit_index; } */ // target1 qubit index > target2 qubit index if (j < quad_dim){ head = j >> (large - 1); body = (j & ((1ULL << (large - 1)) - 1)) >> small; // (j % 2^(k-1)) >> i tail = j & ((1ULL << small) - 1); // j%(2^i) basis0 = (head << (large + 1)) + (body << (small + 1)) + tail; basis[0] = basis0; basis[1] = basis0 + (1ULL << target0_qubit_index); basis[2] = basis0 + (1ULL << target1_qubit_index); basis[3] = basis0 + (1ULL << target1_qubit_index) + (1ULL << target0_qubit_index); for(y = 0 ; y < 4 ; ++y ){ d_buffer[y]=make_cuDoubleComplex(0.0,0.0); for(x = 0 ; x < 4 ; ++x){ d_buffer[y] = cuCadd(d_buffer[y], cuCmul(matrix_const_gpu[y*4 + x], state_gpu[ basis[x] ])); } } for(y = 0 ; y < 4 ; ++y) state_gpu[basis[y]] = d_buffer[y]; } } __host__ void double_qubit_dense_matrix_gate_host(unsigned int target0_qubit_index, unsigned int target1_qubit_index, const CPPCTYPE matrix[16], void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice((int)device_number); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaError cudaStatus; checkCudaErrors(cudaMemcpyToSymbolAsync(matrix_const_gpu, matrix, sizeof(GTYPE)*16, 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); ITYPE quad_dim = dim >> 2; unsigned int block = quad_dim <= 1024 ? quad_dim : 1024; unsigned int grid = quad_dim / block; unsigned int small; unsigned int large; small = (target0_qubit_index < target1_qubit_index) ? target0_qubit_index : target1_qubit_index; large = (target0_qubit_index < target1_qubit_index) ? target1_qubit_index : target0_qubit_index; double_qubit_dense_matrix_gate_gpu << <grid, block, 0, *cuda_stream >> >(target0_qubit_index, target1_qubit_index, small, large, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } // multi_qubit_PauliZ_gate __device__ void multi_qubit_Pauli_gate_Z_mask_device(ITYPE phase_flip_mask, GTYPE* state_gpu, ITYPE dim){ ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x; // loop varaibles const ITYPE loop_dim = dim; if(state_index<loop_dim){ // determine parity //UINT bit1_num = popcount64(state_index & phase_flip_mask); UINT bit1_num = __popcll(state_index & phase_flip_mask); // set values if(bit1_num&1) state_gpu[state_index] = make_cuDoubleComplex(-1*cuCreal(state_gpu[state_index]), -1*cuCimag(state_gpu[state_index])); } } __global__ void multi_qubit_Pauli_gate_Z_mask_gpu(ITYPE phase_flip_mask, GTYPE* state_gpu, ITYPE dim){ multi_qubit_Pauli_gate_Z_mask_device(phase_flip_mask, state_gpu, dim); } __host__ void multi_qubit_Pauli_gate_Z_mask_host(ITYPE phase_flip_mask, void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice((int)device_number); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaError cudaStatus; unsigned int block = dim <= 1024 ? dim : 1024; unsigned int grid = dim / block; multi_qubit_Pauli_gate_Z_mask_gpu << <grid, block, 0, *cuda_stream >> >(phase_flip_mask, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } __device__ void multi_qubit_Pauli_gate_XZ_mask_device(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, GTYPE* state_gpu, ITYPE dim){ ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x; // loop varaibles const ITYPE loop_dim = dim>>1; GTYPE PHASE_M90ROT[4] = { make_cuDoubleComplex(1.0,0.0), make_cuDoubleComplex(0.0,-1), make_cuDoubleComplex(-1,0.0), make_cuDoubleComplex(0.0,1)}; if(state_index<loop_dim){ // create base index ITYPE basis_0 = insert_zero_to_basis_index_device(state_index, pivot_qubit_index); // gather index ITYPE basis_1 = basis_0 ^ bit_flip_mask; // determine sign unsigned int sign_0 = __popcll(basis_0 & phase_flip_mask)&1; unsigned int sign_1 = __popcll(basis_1 & phase_flip_mask)&1; // fetch values GTYPE cval_0 = state_gpu[basis_0]; GTYPE cval_1 = state_gpu[basis_1]; // set values state_gpu[basis_0] = cuCmul(cval_1, PHASE_M90ROT[(global_phase_90rot_count + sign_0*2)&3]); // a % 4 = a & (4-1) state_gpu[basis_1] = cuCmul(cval_0, PHASE_M90ROT[(global_phase_90rot_count + sign_1*2)&3]); // a % 4 = a & (4-1) } } __global__ void multi_qubit_Pauli_gate_XZ_mask_gpu(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, GTYPE* state_gpu, ITYPE dim){ multi_qubit_Pauli_gate_XZ_mask_device(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, state_gpu, dim); } __host__ void multi_qubit_Pauli_gate_XZ_mask_host(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice((int)device_number); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaError cudaStatus; unsigned int block = dim <= 1024 ? dim : 1024; unsigned int grid = dim / block; multi_qubit_Pauli_gate_XZ_mask_gpu<< <grid, block, 0, *cuda_stream >> >(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } __device__ void multi_qubit_Pauli_rotation_gate_XZ_mask_device(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, double angle, GTYPE* state_gpu, ITYPE dim){ ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x; // loop varaibles ITYPE loop_dim = dim>>1; // coefs double cosval = cos(angle/2); double sinval = sin(angle/2); //GTYPE PHASE_90ROT[4] = {make_cuDoubleComplex(1.0,0.0), make_cuDoubleComplex(0.0,1.0), make_cuDoubleComplex(-1.0,0.0), make_cuDoubleComplex(0.0,-1.0)}; GTYPE PHASE_M90ROT[4] = { make_cuDoubleComplex(1.0,0.0), make_cuDoubleComplex(0.0,-1), make_cuDoubleComplex(-1,0.0), make_cuDoubleComplex(0.0,1)}; if(state_index<loop_dim){ // create base index ITYPE basis_0 = insert_zero_to_basis_index_device(state_index, pivot_qubit_index); // gather index ITYPE basis_1 = basis_0 ^ bit_flip_mask; // determine parity unsigned int bit_parity_0 = __popcll(basis_0 & phase_flip_mask)&1; unsigned int bit_parity_1 = __popcll(basis_1 & phase_flip_mask)&1; // fetch values GTYPE cval_0 = state_gpu[basis_0]; GTYPE cval_1 = state_gpu[basis_1]; // set values GTYPE tmp = cuCmul(make_cuDoubleComplex(sinval*cuCreal(cval_1), sinval*cuCimag(cval_1)), PHASE_M90ROT[ (global_phase_90rot_count + bit_parity_0*2)&3 ]); //state[basis_0] = cuCmul(cosval, cval_0) + 1.i * sinval * cval_1 * PHASE_M90ROT[ (global_phase_90rot_count + bit_parity_0*2)&3 ]; // % 4 state_gpu[basis_0] = cuCadd(make_cuDoubleComplex(cosval*cuCreal(cval_0), cosval*cuCimag(cval_0)), cuCmul(tmp, make_cuDoubleComplex(0.0,1.0))); //state[basis_1] = cosval * cval_1 + 1.i * sinval * cval_0 * PHASE_M90ROT[ (global_phase_90rot_count + bit_parity_1*2)&3 ]; // % 4 tmp = cuCmul(make_cuDoubleComplex(sinval*cuCreal(cval_0), sinval*cuCimag(cval_0)), PHASE_M90ROT[(global_phase_90rot_count + bit_parity_1*2)&3 ]); state_gpu[basis_1] = cuCadd(make_cuDoubleComplex(cosval*cuCreal(cval_1), cosval*cuCimag(cval_1)), cuCmul(tmp, make_cuDoubleComplex(0.0, 1.0)) ); // % 4 } } __global__ void multi_qubit_Pauli_rotation_gate_XZ_mask_gpu(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, double angle, GTYPE* state_gpu, ITYPE dim){ multi_qubit_Pauli_rotation_gate_XZ_mask_device(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, angle, state_gpu, dim); } __host__ void multi_qubit_Pauli_rotation_gate_XZ_mask_host(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, double angle, void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice((int)device_number); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaError cudaStatus; unsigned int block = dim <= 1024 ? dim : 1024; unsigned int grid = dim / block; multi_qubit_Pauli_rotation_gate_XZ_mask_gpu<< <grid, block, 0, *cuda_stream >> >(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, angle, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } __device__ void multi_qubit_Pauli_rotation_gate_Z_mask_device(ITYPE phase_flip_mask, double angle, GTYPE* state_gpu, ITYPE dim){ // loop variables const ITYPE loop_dim = dim; ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x; // coefs const double cosval = cos(angle/2); const double sinval = sin(angle/2); if(state_index<loop_dim){ // determine sign UINT bit_parity = __popcll(state_index & phase_flip_mask)&1; int sign = 1 - 2*bit_parity; // set value state_gpu[state_index] = cuCmul(state_gpu[state_index], make_cuDoubleComplex(cosval, sign * sinval)); } } __global__ void multi_qubit_Pauli_rotation_gate_Z_mask_gpu(ITYPE phase_flip_mask, double angle, GTYPE* state_gpu, ITYPE dim){ multi_qubit_Pauli_rotation_gate_Z_mask_device(phase_flip_mask, angle, state_gpu, dim); } __host__ void multi_qubit_Pauli_rotation_gate_Z_mask_host(ITYPE phase_flip_mask, double angle, void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice((int)device_number); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaError cudaStatus; unsigned int block = dim <= 1024 ? dim : 1024; unsigned int grid = dim / block; multi_qubit_Pauli_rotation_gate_Z_mask_gpu<< <grid, block, 0, *cuda_stream >> >(phase_flip_mask, angle, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } __host__ void multi_qubit_Pauli_gate_partial_list_host(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count, void* state, ITYPE dim, void* stream, unsigned int device_number) { // create pauli mask and call function ITYPE bit_flip_mask = 0; ITYPE phase_flip_mask = 0; UINT global_phase_90rot_count = 0; UINT pivot_qubit_index = 0; get_Pauli_masks_partial_list_gsim(target_qubit_index_list, Pauli_operator_type_list, target_qubit_index_count, &bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index); if (bit_flip_mask == 0) { multi_qubit_Pauli_gate_Z_mask_host(phase_flip_mask, state, dim, stream, device_number); } else { multi_qubit_Pauli_gate_XZ_mask_host(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, state, dim, stream, device_number); } } __host__ void multi_qubit_Pauli_gate_whole_list_host(const UINT* Pauli_operator_type_list, UINT qubit_count, void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice((int)device_number); // create pauli mask and call function ITYPE bit_flip_mask = 0; ITYPE phase_flip_mask = 0; UINT global_phase_90rot_count = 0; UINT pivot_qubit_index = 0; get_Pauli_masks_whole_list_gsim(Pauli_operator_type_list, qubit_count, &bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index); if (bit_flip_mask == 0) { multi_qubit_Pauli_gate_Z_mask_host(phase_flip_mask, state, dim, stream, device_number); } else { multi_qubit_Pauli_gate_XZ_mask_host(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, state, dim, stream, device_number); } } __host__ void multi_qubit_Pauli_rotation_gate_partial_list_host(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count, double angle, void* state, ITYPE dim, void* stream, unsigned int device_number) { // create pauli mask and call function ITYPE bit_flip_mask = 0; ITYPE phase_flip_mask = 0; UINT global_phase_90rot_count = 0; UINT pivot_qubit_index = 0; get_Pauli_masks_partial_list_gsim(target_qubit_index_list, Pauli_operator_type_list, target_qubit_index_count, &bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index); if(bit_flip_mask == 0){ multi_qubit_Pauli_rotation_gate_Z_mask_host(phase_flip_mask, angle, state, dim, stream, device_number); }else{ multi_qubit_Pauli_rotation_gate_XZ_mask_host(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index,angle, state, dim, stream, device_number); } } __host__ void multi_qubit_Pauli_rotation_gate_whole_list_host(const UINT* Pauli_operator_type_list, UINT qubit_count, double angle, void* state, ITYPE dim, void* stream, unsigned int device_number) { // create pauli mask and call function ITYPE bit_flip_mask = 0; ITYPE phase_flip_mask = 0; UINT global_phase_90rot_count = 0; UINT pivot_qubit_index = 0; get_Pauli_masks_whole_list_gsim(Pauli_operator_type_list, qubit_count, &bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index); if (bit_flip_mask == 0) { multi_qubit_Pauli_rotation_gate_Z_mask_host(phase_flip_mask, angle, state, dim, stream, device_number); } else { multi_qubit_Pauli_rotation_gate_XZ_mask_host(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, angle, state, dim, stream, device_number); } } // target_qubit_count <= 5 __global__ void multi_qubit_dense_matrix_gate_shared_gpu(UINT target_qubit_index_count, GTYPE *state_gpu, ITYPE dim){ __shared__ GTYPE state_basis[1024]; GTYPE tmp=make_cuDoubleComplex(0.0, 0.0); ITYPE loop_dim = dim >> target_qubit_index_count; ITYPE basis = blockIdx.x * blockDim.x + threadIdx.x; int j; ITYPE mat_len = 1ULL << target_qubit_index_count; if (basis < loop_dim){ for(j=0;j<target_qubit_index_count;++j) basis = insert_zero_to_basis_index_device(basis, sorted_insert_index_list_gpu[j] ); for(j=0;j<target_qubit_index_count;++j) basis += (1ULL << target_index_list_gpu[j])*((threadIdx.y>>j)&1); state_basis[(threadIdx.x<<target_qubit_index_count)+threadIdx.y]=state_gpu[basis]; __syncthreads(); for(j=0;j<mat_len;++j) tmp = cuCadd(tmp, cuCmul(matrix_const_gpu[(threadIdx.y<<target_qubit_index_count) + j], state_basis[(threadIdx.x<<target_qubit_index_count)+j] )); state_gpu[ basis ] = tmp; } } // target_qubit_count <= 10 __global__ void multi_qubit_dense_matrix_gate_shared_gpu(UINT target_qubit_index_count, GTYPE* matrix_gpu, GTYPE *state_gpu, ITYPE dim){ __shared__ GTYPE state_basis[1024]; GTYPE tmp=make_cuDoubleComplex(0.0, 0.0); ITYPE loop_dim = dim >> target_qubit_index_count; ITYPE basis = blockIdx.x * blockDim.x + threadIdx.x; int j; ITYPE mat_len = 1ULL << target_qubit_index_count; if (basis < loop_dim){ for(j=0;j<target_qubit_index_count;++j) basis = insert_zero_to_basis_index_device(basis, sorted_insert_index_list_gpu[j] ); for(j=0;j<target_qubit_index_count;++j) basis += (1ULL << target_index_list_gpu[j])*((threadIdx.y>>j)&1); state_basis[(threadIdx.x<<target_qubit_index_count)+threadIdx.y]=state_gpu[basis]; __syncthreads(); for(j=0;j<mat_len;++j) tmp = cuCadd(tmp, cuCmul(matrix_gpu[(threadIdx.y<<target_qubit_index_count) + j], state_basis[(threadIdx.x<<target_qubit_index_count)+j] )); state_gpu[ basis ] = tmp; } } // there is no atomicAdd // target_qubit_index_count<=11 __global__ void multi_qubit_dense_matrix_gate_half_shared_gpu(UINT target_qubit_index_count, GTYPE* matrix_gpu, GTYPE *state_gpu, ITYPE dim){ __shared__ GTYPE state_basis[2048]; ITYPE loop_dim = dim >> target_qubit_index_count; ITYPE basis = blockIdx.x * blockDim.x + threadIdx.x; ITYPE basis0, basis1; ITYPE matrix_len = 1ULL << target_qubit_index_count; //ITYPE half_matrix_len = 1ULL << (target_qubit_index_count-1); if (basis < loop_dim){ for(int j=0;j<target_qubit_index_count;++j) basis = insert_zero_to_basis_index_device(basis, sorted_insert_index_list_gpu[j] ); for(int j=0;j<target_qubit_index_count-1;++j) basis += (1ULL << target_index_list_gpu[j+1])*((threadIdx.y>>j)&1); basis0=basis; basis1=basis0^(1ULL<<sorted_insert_index_list_gpu[0]); state_basis[(threadIdx.x<<target_qubit_index_count)+(threadIdx.y<<1)]=state_gpu[basis0]; state_basis[(threadIdx.x<<target_qubit_index_count)+(threadIdx.y<<1)+1]=state_gpu[basis1]; __syncthreads(); GTYPE d_buff = make_cuDoubleComplex(0.0, 0.0); for(int j=0;j<matrix_len;++j) d_buff = cuCadd(d_buff, cuCmul(matrix_gpu[((threadIdx.y<<1)<<target_qubit_index_count) + j], state_basis[(threadIdx.x<<target_qubit_index_count)+j] )); state_gpu[ basis0 ] = d_buff; d_buff = make_cuDoubleComplex(0.0, 0.0); for(int j=0;j<matrix_len;++j) d_buff = cuCadd(d_buff, cuCmul(matrix_gpu[(((threadIdx.y<<1)+1)<<target_qubit_index_count) + j], state_basis[(threadIdx.x<<target_qubit_index_count)+j] )); state_gpu[ basis1 ] = d_buff; // printf("basis0: %d, basis1: %d\n", (int)basis0, (int)basis1); } } __global__ void multi_qubit_dense_matrix_gate_gpu(UINT target_qubit_index_count, GTYPE* matrix_gpu, GTYPE* state_gpu, GTYPE* state_gpu_copy, ITYPE dim) { __shared__ GTYPE state_basis[1024]; ITYPE loop_dim = dim >> target_qubit_index_count; ITYPE large_block_index = 0; ITYPE large_block_residual = 0; ITYPE block_loop_dim = 1; //target_qubit_index_count-3; ITYPE block_index = 0; ITYPE block_residual = 0; //block_loop_dim<=1 ? 0 : blockIdx.x % (1ULL<<block_loop_dim); ITYPE basis = blockIdx.x * blockDim.x + threadIdx.x; ITYPE assign_basis; ITYPE basis0; if(target_qubit_index_count>=10+1){ block_loop_dim = 1ULL << (target_qubit_index_count-10); large_block_index = blockIdx.x / (block_loop_dim*block_loop_dim); large_block_residual = blockIdx.x % (block_loop_dim*block_loop_dim); block_index= large_block_residual / block_loop_dim; block_residual = blockIdx.x % block_loop_dim; basis = large_block_index * blockDim.x + threadIdx.x; } ITYPE matrix_len = 1ULL << target_qubit_index_count; if(basis < loop_dim){ ITYPE tmp = (block_residual<<10) + threadIdx.y; for(int j=0;j<target_qubit_index_count;++j) basis = insert_zero_to_basis_index_device(basis, sorted_insert_index_list_gpu[j] ); basis0=basis; for(int j=0;j<target_qubit_index_count;++j) basis += (1ULL << target_index_list_gpu[j])*( (tmp>>j) & 1); state_basis[(threadIdx.x<<target_qubit_index_count)+threadIdx.y]=state_gpu_copy[basis]; if(target_qubit_index_count>=10+1){ tmp = (block_index << 10) + threadIdx.y; assign_basis = basis0; for(int j=0;j<target_qubit_index_count;++j) assign_basis += (1ULL << target_index_list_gpu[j])*( (tmp>>j) & 1); }else{ assign_basis = basis; } __syncthreads(); GTYPE d_buff = make_cuDoubleComplex(0.0, 0.0); ITYPE tmp_len = block_residual << 10; if(matrix_len>1024) matrix_len=1024; ITYPE row_index = ( block_index << 10 ) + threadIdx.y; for(ITYPE j=0;j<matrix_len;++j) d_buff = cuCadd(d_buff, cuCmul(matrix_gpu[(row_index<<target_qubit_index_count) + j + tmp_len], state_basis[(threadIdx.x<<target_qubit_index_count)+j] )); atomicAdd_double_duplicate(&(state_gpu[assign_basis].x), d_buff.x); atomicAdd_double_duplicate(&(state_gpu[assign_basis].y), d_buff.y); } } __host__ void multi_qubit_dense_matrix_gate_small_qubit_host(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CPPCTYPE* matrix, void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice((int)device_number); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaError cudaStatus; // matrix dim, mask, buffer ITYPE matrix_dim = 1ULL << target_qubit_index_count; // insert index UINT* h_sorted_insert_index_list = create_sorted_ui_list_gsim(target_qubit_index_list, target_qubit_index_count); // loop variables ITYPE loop_dim = dim >> target_qubit_index_count; GTYPE* matrix_gpu; unsigned int max_block_size = 1024 / matrix_dim; dim3 block; block.y = matrix_dim; block.x = loop_dim <= max_block_size ? loop_dim : max_block_size; unsigned int grid = loop_dim / block.x; if (target_qubit_index_count <= 5) { checkCudaErrors(cudaMemcpyToSymbolAsync(matrix_const_gpu, matrix, sizeof(GTYPE)*matrix_dim*matrix_dim, 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyToSymbolAsync(target_index_list_gpu, target_qubit_index_list, sizeof(UINT)*target_qubit_index_count, 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyToSymbolAsync(sorted_insert_index_list_gpu, h_sorted_insert_index_list, sizeof(UINT)*target_qubit_index_count, 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); multi_qubit_dense_matrix_gate_shared_gpu << <grid, block, 0, *cuda_stream >> > (target_qubit_index_count, state_gpu, dim); } else if (target_qubit_index_count <= 10) { checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&matrix_gpu), matrix_dim *matrix_dim * sizeof(GTYPE)), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyAsync(matrix_gpu, matrix, matrix_dim *matrix_dim * sizeof(GTYPE), cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyToSymbolAsync(target_index_list_gpu, target_qubit_index_list, sizeof(UINT)*target_qubit_index_count, 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyToSymbolAsync(sorted_insert_index_list_gpu, h_sorted_insert_index_list, sizeof(UINT)*target_qubit_index_count, 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); multi_qubit_dense_matrix_gate_shared_gpu << <grid, block, 0, *cuda_stream >> > (target_qubit_index_count, matrix_gpu, state_gpu, dim); } checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); if (target_qubit_index_count > 5) cudaFree(matrix_gpu); free((UINT*)h_sorted_insert_index_list); state = reinterpret_cast<void*>(state_gpu); } __host__ void multi_qubit_dense_matrix_gate_11qubit_host(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CPPCTYPE* matrix, void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice((int)device_number); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); //cudaError cudaStatus; // matrix dim, mask, buffer ITYPE matrix_dim = 1ULL << target_qubit_index_count; UINT* h_sorted_insert_index_list = create_sorted_ui_list_gsim(target_qubit_index_list, target_qubit_index_count); // loop variables //ITYPE loop_dim = dim >> target_qubit_index_count; GTYPE *matrix_gpu; dim3 block; block.y = (matrix_dim>>1) <= 1024 ? (matrix_dim>>1) : 1024; unsigned int max_block_size = 1024 / block.y; block.x = dim/block.y <= max_block_size ? dim/block.y : max_block_size; unsigned int grid = dim / block.x / block.y; checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&matrix_gpu), matrix_dim *matrix_dim * sizeof(GTYPE) ), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyAsync(matrix_gpu, matrix, matrix_dim *matrix_dim * sizeof(GTYPE), cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyToSymbolAsync(target_index_list_gpu, target_qubit_index_list, sizeof(UINT)*target_qubit_index_count, 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyToSymbolAsync(sorted_insert_index_list_gpu, h_sorted_insert_index_list, sizeof(UINT)*target_qubit_index_count, 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); multi_qubit_dense_matrix_gate_half_shared_gpu << <grid, block, 0, *cuda_stream >> >(target_qubit_index_count, matrix_gpu, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaGetLastError(), __FILE__, __LINE__); checkCudaErrors(cudaFree(matrix_gpu), __FILE__, __LINE__); free((UINT*)h_sorted_insert_index_list); state = reinterpret_cast<void*>(state_gpu); } __host__ void multi_qubit_dense_matrix_gate_more_than_11qubit_host(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CPPCTYPE* matrix, void* state, ITYPE dim, void* stream, UINT device_number){ int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice((int)device_number); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); //cudaError cudaStatus; // matrix dim, mask, buffer ITYPE matrix_dim = 1ULL << target_qubit_index_count; UINT* h_sorted_insert_index_list = create_sorted_ui_list_gsim(target_qubit_index_list, target_qubit_index_count); // loop variables ITYPE loop_dim = dim >> target_qubit_index_count; GTYPE *matrix_gpu; dim3 grid, block; block.y = matrix_dim <= (1ULL<<10) ? matrix_dim : (1ULL<<10); unsigned int max_block_size = (1ULL<<10) / block.y; block.x = dim/block.y <= max_block_size ? dim/block.y : max_block_size; grid.x = dim / block.x / block.y; if(target_qubit_index_count>=10+1) grid.x = (1ULL<<((target_qubit_index_count-10)<<1)) * loop_dim; GTYPE* state_gpu_copy; checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&matrix_gpu), matrix_dim *matrix_dim * sizeof(GTYPE) ), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyAsync(matrix_gpu, matrix, matrix_dim *matrix_dim * sizeof(GTYPE), cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyToSymbolAsync(target_index_list_gpu, target_qubit_index_list, sizeof(UINT)*target_qubit_index_count, 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyToSymbolAsync(sorted_insert_index_list_gpu, h_sorted_insert_index_list, sizeof(UINT)*target_qubit_index_count, 0,cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&state_gpu_copy), dim * sizeof(GTYPE) ), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyAsync(state_gpu_copy, state_gpu, dim * sizeof(GTYPE), cudaMemcpyDeviceToDevice, *cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaMemsetAsync(state_gpu, 0, dim * sizeof(GTYPE), *cuda_stream), __FILE__, __LINE__); multi_qubit_dense_matrix_gate_gpu<<< grid, block, 0, *cuda_stream >>>(target_qubit_index_count, matrix_gpu, state_gpu, state_gpu_copy, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaGetLastError(), __FILE__, __LINE__); cudaFree(state_gpu_copy); cudaFree(matrix_gpu); free((UINT*)h_sorted_insert_index_list); state = reinterpret_cast<void*>(state_gpu); } __host__ void multi_qubit_dense_matrix_gate_host(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CPPCTYPE* matrix, void* state, ITYPE dim, void* stream, unsigned int device_number) { if (target_qubit_index_count == 1) { single_qubit_dense_matrix_gate_host(target_qubit_index_list[0], matrix, state, dim, stream, device_number); } else if (target_qubit_index_count == 2) { double_qubit_dense_matrix_gate_host(target_qubit_index_list[0], target_qubit_index_list[1], matrix, state, dim, stream, device_number); } else if (target_qubit_index_count == 3) { triple_qubit_dense_matrix_gate_host(target_qubit_index_list[0], target_qubit_index_list[1], target_qubit_index_list[2], matrix, state, dim, stream, device_number); } else if (target_qubit_index_count == 4) { UINT target_qubit_index_list_copy[4]; for (int i = 0; i < 4; ++i) target_qubit_index_list_copy[i] = target_qubit_index_list[i]; quad_qubit_dense_matrix_gate_host(target_qubit_index_list_copy, matrix, state, dim, stream, device_number); } else if (target_qubit_index_count == 11) { multi_qubit_dense_matrix_gate_11qubit_host(target_qubit_index_list, target_qubit_index_count, matrix, state, dim, stream, device_number); } else if (target_qubit_index_count >= 12) { multi_qubit_dense_matrix_gate_more_than_11qubit_host(target_qubit_index_list, target_qubit_index_count, matrix, state, dim, stream, device_number); } else { multi_qubit_dense_matrix_gate_small_qubit_host(target_qubit_index_list, target_qubit_index_count, matrix, state, dim, stream, device_number); } } // target_qubit_index_count <= 5 __global__ void single_qubit_control_multi_qubit_dense_matrix_gate_const_gpu(UINT control_qubit_index, UINT control_value, UINT target_qubit_index_count, GTYPE* state, ITYPE dim) { // control mask const ITYPE control_mask = (1ULL << control_qubit_index) * control_value; const UINT insert_index_count = target_qubit_index_count + 1; const ITYPE matrix_dim = 1ULL << target_qubit_index_count; // loop varaibles const ITYPE loop_dim = dim >> insert_index_count; GTYPE d_buffer[1024]; ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x; if(state_index < loop_dim){ // create base index ITYPE basis_0 = state_index; for(UINT cursor=0; cursor < insert_index_count ; cursor++){ UINT insert_index = sorted_insert_index_list_gpu[cursor]; basis_0 = insert_zero_to_basis_index_device(basis_0, insert_index ); } // flip control basis_0 ^= control_mask; // compute matrix mul for(ITYPE y = 0 ; y < matrix_dim ; ++y ){ d_buffer[y]=make_cuDoubleComplex(0.0, 0.0); for(ITYPE x = 0 ; x < matrix_dim ; ++x){ d_buffer[y] = cuCadd( d_buffer[y], cuCmul( matrix_const_gpu[y*matrix_dim + x], state[ basis_0 ^ matrix_mask_list_gpu[x] ])); } } // set result for(ITYPE y = 0 ; y < matrix_dim ; ++y){ state[basis_0 ^ matrix_mask_list_gpu[y]] = d_buffer[y]; } } } // target_qubit_index_count <= 10 __global__ void single_qubit_control_multi_qubit_dense_matrix_gate_const_gpu(UINT control_qubit_index, UINT control_value, UINT target_qubit_index_count, const GTYPE* matrix, GTYPE* state, ITYPE dim) { // control mask const ITYPE control_mask = (1ULL << control_qubit_index) * control_value; const UINT insert_index_count = target_qubit_index_count + 1; const ITYPE matrix_dim = 1ULL << target_qubit_index_count; // loop varaibles const ITYPE loop_dim = dim >> insert_index_count; GTYPE d_buffer[1024]; ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x; if(state_index < loop_dim){ // create base index ITYPE basis_0 = state_index; for(UINT cursor=0; cursor < insert_index_count ; cursor++){ UINT insert_index = sorted_insert_index_list_gpu[cursor]; basis_0 = insert_zero_to_basis_index_device(basis_0, insert_index ); } // flip control basis_0 ^= control_mask; // compute matrix mul for(ITYPE y = 0 ; y < matrix_dim ; ++y ){ d_buffer[y]=make_cuDoubleComplex(0.0, 0.0); for(ITYPE x = 0 ; x < matrix_dim ; ++x){ d_buffer[y] = cuCadd(d_buffer[y] , cuCmul( matrix[y*matrix_dim + x], state[ basis_0 ^ matrix_mask_list_gpu[x] ])); } } // set result for(ITYPE y = 0 ; y < matrix_dim ; ++y){ state[basis_0 ^ matrix_mask_list_gpu[y]] = d_buffer[y]; } } } __host__ void single_qubit_control_multi_qubit_dense_matrix_gate_host(UINT control_qubit_index, UINT control_value, const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CPPCTYPE* matrix, void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice((int)device_number); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaError cudaStatus; // matrix dim, mask, buffer const ITYPE matrix_dim = 1ULL << target_qubit_index_count; ITYPE* matrix_mask_list = create_matrix_mask_list_gsim(target_qubit_index_list, target_qubit_index_count); // insert list const UINT insert_index_count = target_qubit_index_count + 1; UINT* sorted_insert_index_list = create_sorted_ui_list_value_gsim(target_qubit_index_list, target_qubit_index_count ,control_qubit_index); GTYPE *d_matrix, *d_matrix_mask_list; // loop varaibles const ITYPE loop_dim = dim >> insert_index_count; unsigned int block = loop_dim <= 1024 ? loop_dim : 1024; unsigned int grid = loop_dim / block; if(target_qubit_index_count<=10){ if(target_qubit_index_count>=3){ unsigned int tmp_block = 1ULL << (13-target_qubit_index_count); block = loop_dim <= tmp_block ? loop_dim : tmp_block; }else{ block = loop_dim <= 1024 ? loop_dim : 1024; } grid = loop_dim / block; if(target_qubit_index_count<=5){ checkCudaErrors(cudaMemcpyToSymbolAsync(matrix_const_gpu, matrix, sizeof(GTYPE)*matrix_dim*matrix_dim, 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyToSymbolAsync(matrix_mask_list_gpu, matrix_mask_list, sizeof(ITYPE)*matrix_dim, 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyToSymbolAsync(sorted_insert_index_list_gpu, sorted_insert_index_list, sizeof(UINT)*(target_qubit_index_count + 1), 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); single_qubit_control_multi_qubit_dense_matrix_gate_const_gpu<<< grid, block, 0, *cuda_stream >>> (control_qubit_index, control_value, target_qubit_index_count, state_gpu, dim); }else{ checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_matrix), matrix_dim *matrix_dim * sizeof(GTYPE) ), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyAsync(d_matrix, matrix, matrix_dim *matrix_dim * sizeof(GTYPE), cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_matrix_mask_list), matrix_dim *matrix_dim * sizeof(GTYPE) ), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyAsync(d_matrix_mask_list, matrix_mask_list, sizeof(ITYPE)*matrix_dim, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyToSymbolAsync(sorted_insert_index_list_gpu, sorted_insert_index_list, sizeof(UINT)*(target_qubit_index_count + 1), 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); single_qubit_control_multi_qubit_dense_matrix_gate_const_gpu<<< grid, block, 0, *cuda_stream >>> (control_qubit_index, control_value, target_qubit_index_count, d_matrix, state_gpu, dim); } }else{ printf("The max number of targets is limited to 10."); assert(0); } checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); if(target_qubit_index_count>5){ cudaFree(d_matrix); cudaFree(d_matrix_mask_list); } free(sorted_insert_index_list); free(matrix_mask_list); state = reinterpret_cast<void*>(state_gpu); } // target_qubit_index_count <= 5 __global__ void multi_qubit_control_multi_qubit_dense_matrix_gate_const_gpu(ITYPE control_mask, UINT target_qubit_index_count, ITYPE control_qubit_index_count, GTYPE* state, ITYPE dim) { // control mask const UINT insert_index_count = target_qubit_index_count + control_qubit_index_count; const ITYPE matrix_dim = 1ULL << target_qubit_index_count; // loop varaibles const ITYPE loop_dim = dim >> insert_index_count; GTYPE d_buffer[1024]; ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x; if(state_index < loop_dim){ // create base index ITYPE basis_0 = state_index; for(UINT cursor=0; cursor < insert_index_count ; cursor++){ UINT insert_index = sorted_insert_index_list_gpu[cursor]; basis_0 = insert_zero_to_basis_index_device(basis_0, insert_index ); } // flip control basis_0 ^= control_mask; // compute matrix mul for(ITYPE y = 0 ; y < matrix_dim ; ++y ){ d_buffer[y]=make_cuDoubleComplex(0.0, 0.0); for(ITYPE x = 0 ; x < matrix_dim ; ++x){ d_buffer[y] = cuCadd(d_buffer[y] , cuCmul( matrix_const_gpu[y*matrix_dim + x], state[ basis_0 ^ matrix_mask_list_gpu[x] ])); } } // set result for(ITYPE y = 0 ; y < matrix_dim ; ++y){ state[basis_0 ^ matrix_mask_list_gpu[y]] = d_buffer[y]; } } } // target_qubit_index_count <= 10 __global__ void multi_qubit_control_multi_qubit_dense_matrix_gate_const_gpu(ITYPE control_mask, UINT target_qubit_index_count, ITYPE control_qubit_index_count, const GTYPE* matrix, GTYPE* state, ITYPE dim) { // control mask const UINT insert_index_count = target_qubit_index_count + control_qubit_index_count; const ITYPE matrix_dim = 1ULL << target_qubit_index_count; // loop varaibles const ITYPE loop_dim = dim >> insert_index_count; GTYPE d_buffer[1024]; ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x; if(state_index < loop_dim){ // create base index ITYPE basis_0 = state_index; for(UINT cursor=0; cursor < insert_index_count ; cursor++){ UINT insert_index = sorted_insert_index_list_gpu[cursor]; basis_0 = insert_zero_to_basis_index_device(basis_0, insert_index ); } // flip control basis_0 ^= control_mask; // compute matrix mul for(ITYPE y = 0 ; y < matrix_dim ; ++y ){ d_buffer[y]=make_cuDoubleComplex(0.0, 0.0); for(ITYPE x = 0 ; x < matrix_dim ; ++x){ d_buffer[y] = cuCadd(d_buffer[y] , cuCmul( matrix[y*matrix_dim + x], state[ basis_0 ^ matrix_mask_list_gpu[x] ])); } } // set result for(ITYPE y = 0 ; y < matrix_dim ; ++y){ state[basis_0 ^ matrix_mask_list_gpu[y]] = d_buffer[y]; } } } __host__ void multi_qubit_control_multi_qubit_dense_matrix_gate_host(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CPPCTYPE* matrix, void* state, ITYPE dim, void* stream, unsigned int device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice((int)device_number); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*> (stream); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaError cudaStatus; // matrix dim, mask, buffer const ITYPE matrix_dim = 1ULL << target_qubit_index_count; ITYPE* matrix_mask_list = create_matrix_mask_list_gsim(target_qubit_index_list, target_qubit_index_count); // insert index UINT* sorted_insert_index_list = create_sorted_ui_list_list_gsim(target_qubit_index_list, target_qubit_index_count, control_qubit_index_list, control_qubit_index_count); // control mask ITYPE control_mask = create_control_mask_gsim(control_qubit_index_list, control_value_list, control_qubit_index_count); // loop varaibles const ITYPE loop_dim = dim >> (target_qubit_index_count+control_qubit_index_count); GTYPE *d_matrix, *d_matrix_mask_list; unsigned int block = loop_dim <= 1024 ? loop_dim : 1024; unsigned int grid = loop_dim / block; if(target_qubit_index_count<=10){ if(target_qubit_index_count>=3){ unsigned int tmp_block = 1ULL << (13-target_qubit_index_count); block = loop_dim <= tmp_block ? loop_dim : tmp_block; }else{ block = loop_dim <= 1024 ? loop_dim : 1024; } grid = loop_dim / block; if(target_qubit_index_count<=5){ checkCudaErrors(cudaMemcpyToSymbolAsync(matrix_const_gpu, matrix, sizeof(GTYPE)*matrix_dim*matrix_dim, 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyToSymbolAsync(matrix_mask_list_gpu, matrix_mask_list, sizeof(ITYPE)*matrix_dim, 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyToSymbolAsync(sorted_insert_index_list_gpu, sorted_insert_index_list, sizeof(UINT)*(target_qubit_index_count+control_qubit_index_count), 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); multi_qubit_control_multi_qubit_dense_matrix_gate_const_gpu<<< grid, block, 0, *cuda_stream >>> (control_mask, target_qubit_index_count, control_qubit_index_count, state_gpu, dim); }else{ checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_matrix), matrix_dim *matrix_dim * sizeof(GTYPE) ), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyAsync(d_matrix, matrix, matrix_dim *matrix_dim * sizeof(GTYPE), cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_matrix_mask_list), matrix_dim *matrix_dim * sizeof(GTYPE) ), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyAsync(d_matrix_mask_list, matrix_mask_list, sizeof(ITYPE)*matrix_dim, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyToSymbolAsync(sorted_insert_index_list_gpu, sorted_insert_index_list, sizeof(UINT)*(target_qubit_index_count + control_qubit_index_count), 0, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); multi_qubit_control_multi_qubit_dense_matrix_gate_const_gpu<<< grid, block, 0, *cuda_stream >>> (control_mask, target_qubit_index_count, control_qubit_index_count, d_matrix, state_gpu, dim); } }else{ printf("The max number of targets is limited to 10."); assert(0); } checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus, __FILE__, __LINE__); if(target_qubit_index_count>5){ cudaFree(d_matrix); cudaFree(d_matrix_mask_list); } free(sorted_insert_index_list); free(matrix_mask_list); state = reinterpret_cast<void*>(state_gpu); } // n_qubit <= 10 __global__ void multi_qubit_diagonal_matrix_gate_gpu(GTYPE* state_gpu, ITYPE dim) { ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x; if (state_index < dim) { state_gpu[state_index] = cuCmul(matrix_const_gpu[state_index], state_gpu[state_index]); } } // n_qubit > 10 __global__ void multi_qubit_diagonal_matrix_gate_gpu(GTYPE* matrix_gpu, GTYPE* state_gpu, ITYPE dim) { ITYPE state_index = blockIdx.x * blockDim.x + threadIdx.x; if (state_index < dim) { state_gpu[state_index] = cuCmul(matrix_gpu[state_index], state_gpu[state_index]); } } __host__ void multi_qubit_diagonal_matrix_gate_with_constant_memory_host(const CPPCTYPE* diagonal_matrix, void* state, ITYPE dim, void* stream, UINT device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice(device_number); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); checkCudaErrors(cudaMemcpyToSymbol(matrix_const_gpu, diagonal_matrix, sizeof(GTYPE) * dim), __FILE__, __LINE__); unsigned int block = dim <= 1024 ? dim : 1024; unsigned int grid = dim / block; multi_qubit_diagonal_matrix_gate_gpu << <grid, block, 0, * cuda_stream >> > (state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaGetLastError(), __FILE__, __LINE__); state = reinterpret_cast<void*>(state_gpu); } __host__ void multi_qubit_diagonal_matrix_gate_with_global_memory_host(const CPPCTYPE* diagonal_matrix, void* state, ITYPE dim, void* stream, UINT device_number) { int current_device = get_current_device(); if (device_number != current_device) cudaSetDevice(device_number); GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state); cudaStream_t* cuda_stream = reinterpret_cast<cudaStream_t*>(stream); GTYPE* d_matrix; checkCudaErrors(cudaMalloc((void**)& d_matrix, sizeof(GTYPE) * dim), __FILE__, __LINE__); checkCudaErrors(cudaMemcpyAsync(d_matrix, diagonal_matrix, sizeof(GTYPE) * dim, cudaMemcpyHostToDevice, *cuda_stream), __FILE__, __LINE__); unsigned int block = dim <= 1024 ? dim : 1024; unsigned int grid = dim / block; multi_qubit_diagonal_matrix_gate_gpu << <grid, block, 0, * cuda_stream >> > (d_matrix, state_gpu, dim); checkCudaErrors(cudaStreamSynchronize(*cuda_stream), __FILE__, __LINE__); checkCudaErrors(cudaGetLastError(), __FILE__, __LINE__); cudaFree(d_matrix); state = reinterpret_cast<void*>(state_gpu); } __host__ void multi_qubit_diagonal_matrix_gate_host(const CPPCTYPE* diagonal_matrix, void* state, ITYPE dim, void* stream, UINT device_number) { if (dim <= 1024) { multi_qubit_diagonal_matrix_gate_with_constant_memory_host(diagonal_matrix, state, dim, stream, device_number); } else { multi_qubit_diagonal_matrix_gate_with_global_memory_host(diagonal_matrix, state, dim, stream, device_number); } }
the_stack
//#define __CUDA_ARCH__ 350 //#define USE_INTERPOLATED_EMPTY namespace { enum ModelFuncEvaluationOutputMode { NoOutput, OneParam, TwoParams }; } //empty t __device__ //void interpolateEmpty( float * interPolTrace, const float * emptyTracesRegion, const unsigned short * RegionMask, const size_t regId, const size_t ix, const size_t iy, int nframes) void interpolateEmpty( float * interPolTrace, const unsigned short * RegionMask, const size_t regId, const size_t ix, const size_t iy, int nframes) { int nRegId; int col= ImgRegP.getRegCol(regId); int row= ImgRegP.getRegRow(regId); float regX = ImgRegP.getRegCenterX(regId); float regY = ImgRegP.getRegCenterY(regId); //const int startC = (ix < regX )?(-1):(0); //const int startR = (iy < regY )?(-1):(0); // bilinear interpolation // | | // 0--|--1 | // | | | | //----------|---- // | | *| | // 2--|--3--| <- regId == 3, our well is the * so we have to interpolate between // | | | the regions marked 0, 1, 2 and 3 // ---|-----|----- // determine which quadrant we are in so we know which 4 regions we have to // interpolate inbetween //allays start with the region at the top left of the quadrant // float area = 0; // to normalize weights float weights[9] = {0}; int RegIds[9]; //bool printMe = (ix%80 == 62 && iy%112 == 100); for(int r = -1; r < 2; r++){ int nRow=row + r ; for(int c = -1; c < 2; c++){ int nCol = col+c; nRegId = regId + ImgRegP.getGridDimX() * r + c; int ArrayIdx = (r+1)*3+(c+1); bool valid = ( nRow >= 0 && nRow < ImgRegP.getGridDimY()) && ( nCol >= 0 && nCol < ImgRegP.getGridDimX()); if(valid) valid = (valid && ( LDG_ACCESS(RegionMask,nRegId) == RegionMaskLive)); //regH/W are not necessarily correct for the weighting at boundary regions but should be negligible float weightX = (valid)?(ImgRegP.getRegW(regId)-ImgRegP.getRegDistanceX(nRegId,ix)):(0.0f) ; float weightY = (valid)?(ImgRegP.getRegH(regId)-ImgRegP.getRegDistanceY(nRegId,iy)):(0.0f); // if(printMe && valid) printf("centerof,%d,%f,%f,ix,%lu,iy,%lu,dist,%f,%f\n",nRegId,ImgRegP.getRegCenterX(nRegId),ImgRegP.getRegCenterY(nRegId),ix,iy,ImgRegP.getRegDistanceX(nRegId,ix),ImgRegP.getRegDistanceY(nRegId,iy)); float weight = (weightX > 0 && weightY > 0)?(weightX*weightY):(0); valid = (valid && weight>0); area+=(valid)?(weight):(0); // accumulate the calculated area to normalize by. so all out weights always add up to 1.0 weights[ArrayIdx] = (valid)?(weight):(0); RegIds[ArrayIdx] = (valid)?(nRegId):(-1); //if (printMe && valid) printf ("%lu,%d,%d,%d,%d,%f,%lu,%lu,%f\n", regId,c,r,ArrayIdx,RegIds[ArrayIdx],weights[ArrayIdx],ix,iy,area); } } for(int i=0;i<9;i++) weights[i] = (RegIds[i] > -1 && area>0 )?(weights[i]/area):(0); for(int f=0; f<nframes; f++) interPolTrace[f] = 0; for(int i=0;i<9;i++){ //const float * nEmptyTrace = emptyTracesRegion + RegIds[i] * ConstFrmP.getUncompFrames(); const float * nEmptyTrace = ConstHistCol.getLatestEmptyTraces() + RegIds[i] * ConstFrmP.getUncompFrames(); for(int f=0; f<nframes; f++){ if(weights[i] > 0 && RegIds[i] != -1){ float thisFrame = LDG_ACCESS(nEmptyTrace, f); interPolTrace[f] += thisFrame * weights[i]; } } } } // smoothing kernel to provide weights for smoothing exponential tail __device__ void GenerateSmoothingKernelForExponentialTailFit_dev( const float* frameNumber, const int size, const float taubInv, const int exp_start, float* kern ) { float dt; for (int i=0; i<size; ++i) { dt = (frameNumber[i+exp_start] - frameNumber[exp_start + 3])*taubInv; kern[i] = __expf(dt); } } __device__ float BlockLevel_DecideOnEmphasisVectorsForInterpolation( const int* nonZeroEmpFramesVec, const float** emLeft, const float** emRight, const float Ampl, const float* emphasis, const int num_frames, int &nonZeroEmpFrames ) { float frac; int left; if (Ampl < LAST_POISSON_TABLE_COL) { left = (int) Ampl; frac = (left + 1.0f - Ampl); if (left < 0) { left = 0; frac = 1.0f; } *emLeft = &emphasis[left]; *emRight = &emphasis[left + 1]; }else{ left = LAST_POISSON_TABLE_COL; *emLeft = &emphasis[left]; *emRight = NULL; frac = 1.0f; } nonZeroEmpFrames = (left == LAST_POISSON_TABLE_COL) ? nonZeroEmpFramesVec[left] : max(nonZeroEmpFramesVec[left], nonZeroEmpFramesVec[left + 1]); return frac; } __device__ void Fermi_ModelFuncEvaluationForSingleFlowFit( const ConstantParamsRegion * constRegP, const PerNucParamsRegion * perNucRegP, const int startFrame, const float * const nucRise, float A1, float A2, const float Krate1, const float Krate2, const float tau, const float gain, const float SP, const float d, const float sens_in, int c_dntp_top_ndx, const ModelFuncEvaluationOutputMode flag, const float * const emLeft, const float * const emRight, const float frac, const float * const fval_in, const float * const err, float *const aa, float *const rhs0, float *const krkr, float *const rhs1, float *const akr, const float * const deltaFrame, int endFrames ) { float sens1 = sens_in; if ( A1!=A1 ) A1=0.0001f; // safety check if (A1 < 0.0f) { A1 = -A1; sens1 = -sens1; } else if (A1 > LAST_POISSON_TABLE_COL) A1 = LAST_POISSON_TABLE_COL; if ( A1<0.0001f ) A1 = 0.0001f; // safety float sens2 = sens_in; if ( A2!=A2 ) A2=0.0001f; // safety check if (A2 < 0.0f) { A2 = -A2; sens2 = -sens2; } else if (A2 > LAST_POISSON_TABLE_COL) A2 = LAST_POISSON_TABLE_COL; if ( A2<0.0001f ) A2 = 0.0001f; // safety #if USE_TABLE_GAMMA int ileft1 = ( int ) A1; float occ_r1 = A1-ileft1; // upper mixture int iright1 = ileft1+1; float occ_l1 = 1-occ_r1; // lower mixture ileft1--; iright1--; if (ileft1 < 0) { occ_l1 = 0.0; ileft1 = 0; } if (iright1 == LAST_POISSON_TABLE_COL) { iright1 = ileft1; occ_r1 = occ_l1; occ_l1 = 0; } occ_l1 *= SP; occ_r1 *= SP; float pact1 = occ_l1 + occ_r1; int ileft2 = ( int ) A2; float occ_r2 = A2-ileft2; // upper mixture int iright2 = ileft2+1; float occ_l2 = 1-occ_r2; // lower mixture ileft2--; iright2--; if (ileft2 < 0) { occ_l2 = 0.0; ileft2 = 0; } if (iright2 == LAST_POISSON_TABLE_COL) { iright2 = ileft2; occ_r2 = occ_l2; occ_l2 = 0; } occ_l2 *= SP; occ_r2 *= SP; float pact2 = occ_l2 + occ_r2; #ifndef POISS_FLOAT4 const float* rptr1 = precompute_pois_params_streaming (iright1); const float* lptr1 = precompute_pois_params_streaming (ileft1); #else const float4 * LUTptr1 = precompute_pois_LUT_params_SingelFLowFit (ileft1, iright1); #endif #ifndef POISS_FLOAT4 const float* rptr2 = precompute_pois_params_streaming (iright2); const float* lptr2 = precompute_pois_params_streaming (ileft2); #else const float4 * LUTptr2 = precompute_pois_LUT_params_SingelFLowFit (ileft2, iright2); #endif #else // !USE_TABLE_GAMMA float pact1 = SP; float pact2 = SP; #endif // USE_TABLE_GAMMA const float totocc1 = SP*A1; float totgen1 = totocc1; const float totocc2 = SP*A2; float totgen2 = totocc2; // We reuse this constant every loop... const float cp_sid_kmax_nucid = perNucRegP->getKmax(); float c_dntp_sum1 = 0.0; float c_dntp_new_rate1 = 0; const float scaled_kr1 = Krate1*constRegP->getMoleculesToMicromolarConversion()/d; //CP_SINGLEFLOWFIT float red_hydro_prev1; float c_dntp_bot_plus_kmax1 = 1.0f/cp_sid_kmax_nucid; //CP_SINGLEFLOWFIT float c_dntp_sum2 = 0.0; float c_dntp_new_rate2 = 0; float fval_local1 = 0.f; float fval_local2 = 0.f; const float scaled_kr2 = Krate2*constRegP->getMoleculesToMicromolarConversion()/d; //CP_SINGLEFLOWFIT float red_hydro_prev2; float c_dntp_bot_plus_kmax2 = 1.0f/cp_sid_kmax_nucid; //CP_SINGLEFLOWFIT for (int i=startFrame;i < endFrames; i++) //CP_SINGLEFLOWFIT { float delta_frame = deltaFrame[i]; float red_hydro1 = totocc1; float red_hydro2 = totocc2; // Move memory fetches well ahead of where they're used. const float fval_in_i = fval_in[i]; if (totgen1 > 0.0f || (totgen2 > 0.f && flag == TwoParams ) ) { //CP_SINGLEFLOWFIT float ldt1 = (delta_frame/( ISIG_SUB_STEPS_SINGLE_FLOW * FRAMESPERSEC)) * (Krate1*0.5f); float ldt2 = (delta_frame/( ISIG_SUB_STEPS_SINGLE_FLOW * FRAMESPERSEC)) * (Krate2*0.5f); for (int st=1; st <= ISIG_SUB_STEPS_SINGLE_FLOW ;st++) { // All the threads should be grabbing from the same nucRise location. float nuc_rise = nucRise[ c_dntp_top_ndx++ ]; if ( totgen1 > 0.f ) { // assume instantaneous equilibrium const float c_dntp_old_rate1 = c_dntp_new_rate1; // c_dntp_bot is concentration of dNTP in the well const float c_dntp_bot = nuc_rise / (1.0f + scaled_kr1*pact1*c_dntp_bot_plus_kmax1); c_dntp_bot_plus_kmax1 = 1.0f/ (c_dntp_bot + cp_sid_kmax_nucid); //CP_SINGLEFLOWFIT c_dntp_new_rate1 = c_dntp_bot*c_dntp_bot_plus_kmax1; float c_dntp_int1 = ldt1* (c_dntp_new_rate1+c_dntp_old_rate1); c_dntp_sum1 += c_dntp_int1; // calculate new number of active polymerase float pact_new1 = poiss_cdf_approx_float4_SingelFLowFit(c_dntp_sum1, LUTptr1, occ_l1, occ_r1); totgen1 -= ( (pact1+pact_new1) * 0.5f) * c_dntp_int1; pact1 = pact_new1; } if ( totgen2 > 0.f && flag == TwoParams ) { // assume instantaneous equilibrium const float c_dntp_old_rate2 = c_dntp_new_rate2; // c_dntp_bot is concentration of dNTP in the well const float c_dntp_bot = nuc_rise / (1.0f + scaled_kr2*pact2*c_dntp_bot_plus_kmax2); c_dntp_bot_plus_kmax2 = 1.0f/ (c_dntp_bot + cp_sid_kmax_nucid); //CP_SINGLEFLOWFIT c_dntp_new_rate2 = c_dntp_bot*c_dntp_bot_plus_kmax2; float c_dntp_int2 = ldt2* (c_dntp_new_rate2+c_dntp_old_rate2); c_dntp_sum2 += c_dntp_int2; // calculate new number of active polymerase float pact_new2 = poiss_cdf_approx_float4_SingelFLowFit(c_dntp_sum2, LUTptr2, occ_l2, occ_r2); totgen2 -= ( (pact2+pact_new2) * 0.5f) * c_dntp_int2; pact2 = pact_new2; } } if (totgen1 < 0.0f) totgen1 = 0.0f; red_hydro1 -= totgen1; if ( flag == TwoParams ) { if (totgen2 < 0.0f) totgen2 = 0.0f; red_hydro2 -= totgen2; } } float err_bxi = err[i]; // Grab this early so that we only get it once. // calculate the 'background' part (the accumulation/decay of the protons in the well // normally accounted for by the background calc) red_hydro1 *= sens1; // variables used for solving background signal shape const float one_over_two_tauB = 1.0f/ (2.0f*tau); const float aval = delta_frame*one_over_two_tauB; //CP_SINGLEFLOWFIT const float one_over_one_plus_aval = 1.0f/ (1.0f+aval); if( i == startFrame ) //CP_SINGLEFLOWFIT fval_local1 = red_hydro1; // *one_over_one_plus_aval; else fval_local1 = red_hydro1 - red_hydro_prev1 + (1.0f-aval)*fval_local1; // *one_over_one_plus_aval; red_hydro_prev1 = red_hydro1; fval_local1 *= one_over_one_plus_aval; float weight = emRight != NULL ? frac*emLeft[i*(MAX_POISSON_TABLE_COL)] + (1.0f - frac)*emRight[i*(MAX_POISSON_TABLE_COL)] : emLeft[i*(MAX_POISSON_TABLE_COL)]; float jac_1 = weight * (fval_local1*gain - fval_in_i) * 1000.0f; *aa += jac_1 * jac_1; *rhs0 += (jac_1 * err_bxi); if ( flag == TwoParams ) { // calculate the 'background' part (the accumulation/decay of the protons in the well // normally accounted for by the background calc) red_hydro2 *= sens2; if( i == startFrame ) //CP_SINGLEFLOWFIT fval_local2 = red_hydro2; // *one_over_one_plus_aval; else fval_local2 = red_hydro2 - red_hydro_prev2 + (1.0f-aval)*fval_local2; // *one_over_one_plus_aval; red_hydro_prev2 = red_hydro2; fval_local2 *= one_over_one_plus_aval; float jac_2 = weight * (fval_local2*gain - fval_in_i) * 1000.0f; *akr += jac_1 * jac_2; *rhs1 += jac_2 * err_bxi; *krkr += jac_2 * jac_2; } // end flag == TwoParams } // loop over i } __device__ void Keplar_ModelFuncEvaluationForSingleFlowFit( const ConstantParamsRegion * constRegP, const PerNucParamsRegion * perNucRegP, const bool twoParamFit, const int startFrame, const float * nucRise, float A, const float Krate, const float tau, const float gain, const float SP, const float d, float sens, int c_dntp_top_ndx, float* fval, const float* deltaFrame, int endFrames, const ModelFuncEvaluationOutputMode flag, float * jac_out = NULL, const float * emLeft = NULL, const float * emRight = NULL, const float frac = 0, const float * fval_in = NULL, const float * err = NULL, float *aa = NULL, float *rhs0 = NULL, float *krkr = NULL, float *rhs1 = NULL, float *akr = NULL ) { if ( A!=A ) A=0.0001f; // safety check if (A < 0.0f) { A = -A; sens = -sens; } else if (A > LAST_POISSON_TABLE_COL) A = LAST_POISSON_TABLE_COL; if ( A<0.0001f ) A = 0.0001f; // safety int ileft, iright; float ifrac, idelta; // step 2 float occ_l,occ_r; float totocc; float totgen; float pact; int i, st; // step 3 float ldt; // step 4 float c_dntp_int; float pact_new; ileft = ( int ) A; idelta = A-ileft; iright = ileft+1; ifrac = 1-idelta; ileft--; iright--; occ_l = ifrac; // lower mixture occ_r = idelta; // upper mixture if (ileft < 0) { occ_l = 0.0; ileft = 0; } if (iright == LAST_POISSON_TABLE_COL) { iright = ileft; occ_r = occ_l; occ_l = 0; } occ_l *= SP; occ_r *= SP; pact = occ_l + occ_r; totocc = SP*A; totgen = totocc; const float4 * LUTptr = precompute_pois_LUT_params_SingelFLowFit (ileft, iright); // We reuse this constant every loop... float cp_sid_kmax_nucid = perNucRegP->getKmax(); float c_dntp_bot = 0.0; // concentration of dNTP in the well float c_dntp_sum = 0.0; float c_dntp_old_rate = 0; float c_dntp_new_rate = 0; float scaled_kr = Krate*constRegP->getMoleculesToMicromolarConversion()/d; //CP_SINGLEFLOWFIT float half_kr = Krate*0.5f; // variables used for solving background signal shape float aval = 0.0f; //new Solve HydrogenFlowInWell float one_over_two_tauB = 1.0f; float one_over_one_plus_aval = 1.0f/ (1.0f+aval); float red_hydro_prev; float fval_local = 0.0f; float red_hydro; float c_dntp_bot_plus_kmax = 1.0f/cp_sid_kmax_nucid; //CP_SINGLEFLOWFIT for (i=startFrame;i < endFrames; i++) //CP_SINGLEFLOWFIT { if (totgen > 0.0f) { ldt = (deltaFrame[i]/( ISIG_SUB_STEPS_SINGLE_FLOW * FRAMESPERSEC)) * half_kr; //CP_SINGLEFLOWFIT for (st=1; (st <= ISIG_SUB_STEPS_SINGLE_FLOW) && (totgen > 0.0f);st++) { // assume instantaneous equilibrium c_dntp_old_rate = c_dntp_new_rate; // All the threads should be grabbing from the same nucRise location. c_dntp_bot = LDG_ACCESS(nucRise, c_dntp_top_ndx++) / (1.0f + scaled_kr*pact*c_dntp_bot_plus_kmax); c_dntp_bot_plus_kmax = 1.0f/ (c_dntp_bot + cp_sid_kmax_nucid); //CP_SINGLEFLOWFIT c_dntp_new_rate = c_dntp_bot*c_dntp_bot_plus_kmax; c_dntp_int = ldt* (c_dntp_new_rate+c_dntp_old_rate); c_dntp_sum += c_dntp_int; // calculate new number of active polymerase pact_new = poiss_cdf_approx_float4_SingelFLowFit(c_dntp_sum, LUTptr, occ_l, occ_r); totgen -= ( (pact+pact_new) * 0.5f) * c_dntp_int; pact = pact_new; } if (totgen < 0.0f) totgen = 0.0f; red_hydro = (totocc-totgen); }else{ red_hydro = totocc; } // calculate the 'background' part (the accumulation/decay of the protons in the well // normally accounted for by the background calc) red_hydro *= sens; one_over_two_tauB = 1.0f/ (2.0f*tau); aval = LDG_ACCESS(deltaFrame, i)*one_over_two_tauB; one_over_one_plus_aval = 1.0f/ (1.0f+aval); if(i==startFrame) //CP_SINGLEFLOWFIT fval_local = red_hydro; // *one_over_one_plus_aval; else fval_local = red_hydro - red_hydro_prev + (1.0f-aval)*fval_local; // *one_over_one_plus_aval; red_hydro_prev = red_hydro; fval_local *= one_over_one_plus_aval; switch( flag ) { case NoOutput: fval[i] = fval_local * gain; break; case OneParam: case TwoParams: float weight = emRight != NULL ? frac*emLeft[i*(MAX_POISSON_TABLE_COL)] + (1.0f - frac)*emRight[i*(MAX_POISSON_TABLE_COL)] : emLeft[i*(MAX_POISSON_TABLE_COL)]; float err_bxi = err[i]; // Grab this early so that we only get it once. float jac_tmp = weight * (fval_local*gain - fval_in[i]) * 1000.0f; if(flag==OneParam){ jac_out[i] = jac_tmp; *aa += jac_tmp * jac_tmp; if (!twoParamFit) *rhs0 += (jac_tmp * err_bxi); } else { // Two params. float my_jac_out = jac_out[i]; // Only grab it from memory once. *akr += my_jac_out * jac_tmp; *rhs0 += my_jac_out * err_bxi; *rhs1 += jac_tmp * err_bxi; *krkr += jac_tmp * jac_tmp; } break; } } } __device__ void ZeromerCorrectionFromRawTrace( const float* bkgTrace, const short* rawTrace, const float* beadParamCube, const float* regionFrameCube, const float* deltaFrames, #if FG_TRACES_REZERO const float dcOffset, #endif const float darkness, const float etbR, const float gain, const float tauB, const int num_frames, const int frameStride, const int regionFrameStride, float* correctedTrace, //TraceLevelXTalk const float * XTalkContribution, const float * genericXTalk ) { float R = etbR - 1.0f; float dv = 0.0f; float dv_rs = 0.0f; float dvn = 0.0f; float aval; float curSbgVal, deltaFrameVal; // printf("fg after PreSingleFit\n"); //T*** REMOVE!! DEBUG ONLY for (int i=0; i<num_frames; ++i) { deltaFrameVal = LDG_ACCESS(deltaFrames, i); #ifdef EMPTY_TRACES_REZERO_SHARED curSbgVal = bkgTrace[i]; #else #ifdef USE_INTERPOLATED_EMPTY curSbgVal = bkgTrace[i]; #else curSbgVal = LDG_ACCESS(bkgTrace, i); #endif #endif if(ConfigP.PerformTraceLevelXTalk()){ curSbgVal += (*XTalkContribution) - genericXTalk[i]; XTalkContribution += frameStride; } aval = deltaFrameVal/(2.0f * tauB); dvn = (R*curSbgVal - dv_rs/tauB - dv*aval) / (1.0f + aval); dv_rs += (dv+dvn) * deltaFrameVal * 0.5f; dv = dvn; correctedTrace[i] = (float)(*rawTrace) #if FG_TRACES_REZERO - dcOffset #endif - ((dv+curSbgVal)*gain + ApplyDarkMatterToFrame( beadParamCube, regionFrameCube, darkness, i, num_frames, frameStride, regionFrameStride ) ); rawTrace += frameStride; // printf("%f ",correctedTrace[i] ); //T*** REMOVE!! DEBUG ONLY } // printf("\n"); //T*** REMOVE!! DEBUG ONLY } __device__ void ExponentialTailFitCorrection( const PerFlowParamsRegion * perFlowRegP, const PerNucParamsRegion * perNucRegP, const float* bkgTrace, const float* frameNumber, const float Ampl, const float adjustedTauB, const int num_frames, float* correctedTrace ) { float kern[7]; if (adjustedTauB > 0.0f) { float tmid = ComputeMidNucTime(perFlowRegP->getTMidNuc(), perFlowRegP, perNucRegP); // set up start and end point for exponential tail float tail_start = tmid + 6.0f + 1.75f * Ampl; int tail_start_idx = -1, tail_end_idx = -1; for (int i=0; i<num_frames; ++i) { if ((tail_start_idx == -1) && frameNumber[i] >= tail_start) tail_start_idx = i; if ((tail_end_idx == -1) && frameNumber[i] >= (tail_start + 60.0f)) tail_end_idx = i; } if (tail_start_idx == -1) return; if (tail_end_idx == -1) tail_end_idx = num_frames; // too few points int tailLen = tail_end_idx - tail_start_idx; if (tailLen >= 5) { // Generate smoothing kernel vector. Distance from the point is +/- 3 so need // 7 weights int exp_kern_start = tailLen < 7 ? (tail_end_idx - 7) : tail_start_idx; float taubInv = 1.0f / adjustedTauB; GenerateSmoothingKernelForExponentialTailFit_dev( frameNumber, 7, taubInv, exp_kern_start, kern); //CP_SINGLEFLOWFIT // perform kernel smoothing on exponential tail float avg_bkg_amp_tail = 0; float lhs_01=0,lhs_11=0, rhs_0=0, rhs_1=0; for (int i=tail_start_idx; i<tail_end_idx; ++i) { float sum=0,scale=0; for (int j=i-3, k=0; j <= (i+3); ++j, ++k) { if (j >= 0 && j < num_frames) { sum += (kern[k] * correctedTrace[j]); scale += kern[k]; } } float tmp_fval = sum / scale; #ifdef EMPTY_TRACES_REZERO_SHARED avg_bkg_amp_tail += bkgTrace[i]; #else #ifdef USE_INTERPOLATED_EMPTY avg_bkg_amp_tail += bkgTrace[i]; #else avg_bkg_amp_tail += LDG_ACCESS(bkgTrace, i); #endif #endif // linear regression to calculate A and C in Aexp(-(t-t0)/taub) + C // First calculate lhs and rhs matrix entries which are obtained by taking // derivative of the squared residual (y - (Aexp(-(t-t0)/taub) + C))^2 w.r.t // A and C to 0 which gives two linear equations in A and C float expval = __expf((-frameNumber[i] + frameNumber[tail_start_idx])*taubInv); lhs_01 += expval; lhs_11 += expval*expval; rhs_0 += tmp_fval; rhs_1 += tmp_fval*expval; } float detInv = 1.0f / (tailLen*lhs_11 - lhs_01*lhs_01); float C = (lhs_11*rhs_0 - lhs_01*rhs_1) * detInv; float A = (-lhs_01*rhs_0 + tailLen*rhs_1) * detInv; // if negative then no incorporation if (A < -20.0f) { C = rhs_0 / tailLen; } avg_bkg_amp_tail /= tailLen; if (avg_bkg_amp_tail > ConstGlobalP.getTailDClowerBound()) { C /= avg_bkg_amp_tail; clampT(C, -ConstGlobalP.getScaleLimit(), ConstGlobalP.getScaleLimit()); } else C = 0; // printf("fg after exptail: \n");//T*** REMOVE!! DEBUG ONLY for (int i=0; i<num_frames; ++i) { #ifdef EMPTY_TRACES_REZERO_SHARED correctedTrace[i] -= C*bkgTrace[i]; #else #ifdef USE_INTERPOLATED_EMPTY correctedTrace[i] -= C*bkgTrace[i]; #else correctedTrace[i] -= C*LDG_ACCESS(bkgTrace, i); #endif #endif // printf("%f ", correctedTrace[i]); //T*** REMOVE!! DEBUG ONLY } // printf("\n"); //T*** REMOVE!! DEBUG ONLY } } } __device__ float ResidualCalculationPerFlow( const int startFrame, const float* rawTrace, const float* fval, const float* emLeft, const float* emRight, const float frac, float* err, const int nonZeroEmpFrames) { float e; float weight; float wtScale = 0; float residual = 0; int i; for (i=0; i<nonZeroEmpFrames; ++i) { weight = (emRight != NULL) ?( frac* (*emLeft) + (1.0f - frac)*emRight[i*(MAX_POISSON_TABLE_COL)]) :( (*emLeft)); if (i < startFrame) e = weight * rawTrace[i]; else err[i] = e = weight * (rawTrace[i] - fval[i]); residual += e*e; wtScale += weight*weight; emLeft += (MAX_POISSON_TABLE_COL); } residual /= wtScale; return residual; } __device__ float CalculateMeanResidualErrorPerFlow( const int startFrame, const float* rawTrace, const float* fval, const float* weight, // highest hp weighting emphasis vector const int num_frames) { float wtScale = 0.0f; float residual = 0; float e; float wt; for (int i=0; i<num_frames; ++i) { wt = *weight; wtScale += wt * wt; if (i < startFrame) e = wt * rawTrace[i]; else e = wt * (rawTrace[i] - fval[i]); residual += e*e; weight += MAX_POISSON_TABLE_COL; } residual = sqrtf(residual/wtScale); return residual; } //global PLimits containing limits //global Image and Region params in constant as ImgRegP //global config flags are available in constant mem symbol: ConfigP __device__ void SingleFlowFitUsingRelaxKmultGaussNewton( float* ResultCube, //Ampl, kmult, avg_error, points to correct Ampl value, stride == frameStride //per bead //in parameters const unsigned short * BStateMask, //needed to update corrupt state const short * RawTraces, // imgW*imgHxF const float * BeadParamCube, //Copies, R, dmult, gain, tau_adj, phi, stride == frameStride const float* fineemphasisVec, //(MAX_POISSON_TABLE_COL)*F const int * finenonZeroEmphFrames, const float* crudeemphasisVec, //(MAX_POISSON_TABLE_COL)*F const int * crudenonZeroEmphFrames, const float* finenucRise, // ISIG_SUB_STEPS_SINGLE_FLOW * F const float* coarsenucRise, // ISIG_SUB_STEPS_SINGLE_FLOW * F //per region const ConstantParamsRegion * constRegP, const PerFlowParamsRegion * perFlowRegP, const PerNucParamsRegion * perNucRegP, const float * RegionFrameCube, //bkgTrace, DarkMatter, DeltaFrames, DeltaFramesStd, FrameNumber const float * EmptyTraceAvg, //bkgTrace, DarkMatter, DeltaFrames, DeltaFramesStd, FrameNumber #if FG_TRACES_REZERO const float dcOffset, #endif // other inputs const size_t num_frames, // 4 const size_t frameStride, //stride from one CUBE plane to the next for the Per Well Cubes const size_t regionFrameStride,//, //stride in Region Frame Cube to get to next parameter const size_t emphStride, //TraceLevelXTalk const float * XTalkContribution, const float * genericXTalk //bool print //int * maxIterWarp = NULL // float * fgBufferFloat ) { float correctedTrace[MAX_COMPRESSED_FRAMES_GPU]; float fval[MAX_COMPRESSED_FRAMES_GPU]; float tmp_fval[MAX_COMPRESSED_FRAMES_GPU]; float err[MAX_COMPRESSED_FRAMES_GPU]; #if __CUDA_ARCH__ >= 350 float jac[MAX_COMPRESSED_FRAMES_GPU]; #endif // right now getting bead params in the order they were in bead_params struct const float copies = *(BeadParamCube + BpCopies*frameStride); const float R = *(BeadParamCube + BpR*frameStride); const float d = (*(BeadParamCube + BpDmult*frameStride)) * perNucRegP->getD(); // effective diffusion const float gain = *(BeadParamCube + BpGain*frameStride); *(ResultCube + ResultKmult*frameStride) = 1.0f; float kmult = 1.0f; //*(ResultCube + ResultKmult*frameStride); // calculate empty to bead ratio and buffering const float etbR = ComputeETBR(perNucRegP, perFlowRegP->getRatioDrift(), R, copies); const float tauB = ComputeTauB(constRegP, etbR); const float SP = ComputeSP(perFlowRegP->getCopyDrift(), copies); const float* bkgTrace = EmptyTraceAvg;//RegionFrameCube + RfBkgTraces*regionFrameStride; //const float* bkgTrace = RegionFrameCube + RfBkgTraces*regionFrameStride; const float* deltaFrames = RegionFrameCube + RfDeltaFrames*regionFrameStride; // zeromer correction ZeromerCorrectionFromRawTrace( bkgTrace, RawTraces, BeadParamCube, RegionFrameCube, deltaFrames, #if FG_TRACES_REZERO dcOffset, #endif perFlowRegP->getDarkness(), etbR, gain, tauB, num_frames, frameStride, regionFrameStride, correctedTrace, XTalkContribution, genericXTalk ); /*if (blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0 && threadIdx.y == 0) { printf("Corrected Trace\n"); for (int fr=0; fr<num_frames; ++fr) printf("%f ", correctedTrace[fr]); printf("\n"); printf("RawTrace Trace\n"); for (int fr=0; fr<num_frames; ++fr) printf("%f ", (float)(RawTraces[fr*frameStride])); printf("\n"); printf("Empty Trace\n"); for (int fr=0; fr<num_frames; ++fr) printf("%f ", bkgTrace[fr]); printf("\n"); }*/ // projection search for initial ampl estimates float Ampl = ProjectionSearch( constRegP, perFlowRegP, perNucRegP, correctedTrace, crudeemphasisVec, crudenonZeroEmphFrames[0], coarsenucRise, deltaFrames, kmult, d, tauB, gain, SP, tmp_fval, perFlowRegP->getCoarseStart(), frameStride, 1, ISIG_SUB_STEPS_MULTI_FLOW //print ); #if !PROJECTION_ONLY if( Match(BStateMask,BkgMaskPolyClonal)){ #endif *(ResultCube + ResultAmpl*frameStride) = Ampl; return; #if !PROJECTION_ONLY } #endif // exponential tail fit if(ConfigP.PerformExpTailFitting() && ConfigP.PerformBkgAdjInExpTailFit()){ const float adjustTauB = tauB * (*(BeadParamCube + BpTauAdj*frameStride)); const float* frameNumber = RegionFrameCube + RfFrameNumber*regionFrameStride; ExponentialTailFitCorrection( perFlowRegP, perNucRegP, bkgTrace, frameNumber, Ampl, adjustTauB, num_frames, correctedTrace); } //used and done with // copies // R // etbR // adjustTauB // //used and used again // tmp_fval (gets overwritten in modelfunction dump would not be needed) // correctedTrace, // d // gain // kmult // tauB // SP // Ampl // not used yet // fval // err // jac // perform gauss newton fit float localMinKmult = ConstGlobalP.getMinKmult(); float localMaxKmult= ConstGlobalP.getMaxKmult(); const bool twoParamFit = ConfigP.FitKmult() || ( copies * Ampl > ConstGlobalP.getAdjKmult() ); if (twoParamFit) kmult = ConstGlobalP.getMinKmult(); float residual, newresidual; // These values before start are always zero since there is no nucrise yet. Don't need to // zero it out. Have to change the residual calculation accordingly for the frames before the // start. float sens = constRegP->getSens() * SENSMULTIPLIER; int relax_kmult_pass = 0; int startFrame = perFlowRegP->getFineStart(); while (relax_kmult_pass < 2) { // first step // Evaluate model function using input Ampl and Krate and get starting residual #if __CUDA_ARCH__ >= 350 Keplar_ModelFuncEvaluationForSingleFlowFit( constRegP, perNucRegP, twoParamFit, startFrame, finenucRise, Ampl, kmult*perNucRegP->getKrate(), tauB, gain, SP, d, sens, ISIG_SUB_STEPS_SINGLE_FLOW*startFrame, fval, deltaFrames, num_frames, NoOutput); #else BkgModelRedTraceCalculation( constRegP, perNucRegP, startFrame, finenucRise, Ampl, kmult*perNucRegP->getKrate(), tauB, gain, SP, d, sens, ISIG_SUB_STEPS_SINGLE_FLOW * startFrame, fval, deltaFrames, ISIG_SUB_STEPS_SINGLE_FLOW, num_frames); #endif const float *emLeft, *emRight; // calculating weighted sum of square residuals for the convergence test const float EmphSel = Ampl; int nonZeroEmpFrames; float frac = BlockLevel_DecideOnEmphasisVectorsForInterpolation( finenonZeroEmphFrames, &emLeft, &emRight, EmphSel, fineemphasisVec, num_frames, nonZeroEmpFrames); residual = ResidualCalculationPerFlow( startFrame, correctedTrace, fval, emLeft, emRight, frac, err, nonZeroEmpFrames); // printf("DEBUG: start residual %f\n",residual); //T*** REMOVE!! DEBUG ONLY // new Ampl and Krate generated from the Lev mar Fit float newAmpl, newKmult; float delta0 = 0, delta1 = 0; int iter; int done = 0; for (iter = 0; iter < ITER; ++iter) { if ((delta0 * delta0) < 0.0000025f) done++; else done = 0; if (done > 1) break; // new Ampl and krate by adding delta to existing values newAmpl = Ampl + 0.001f; newKmult = (twoParamFit)?(kmult + 0.001f):(kmult); // Evaluate model function for new Ampl keeping Krate constant float aa = 0, akr= 0, krkr = 0, rhs0 = 0, rhs1 = 0; #if __CUDA_ARCH__ >= 350 Keplar_ModelFuncEvaluationForSingleFlowFit( constRegP, perNucRegP, twoParamFit, startFrame, finenucRise, newAmpl, kmult*perNucRegP->getKrate(), tauB, gain, SP, d, sens, startFrame*ISIG_SUB_STEPS_SINGLE_FLOW, tmp_fval, deltaFrames, nonZeroEmpFrames, OneParam, jac, emLeft, emRight, frac, fval, err, &aa, &rhs0, &krkr, &rhs1, &akr); if (twoParamFit) Keplar_ModelFuncEvaluationForSingleFlowFit( constRegP, perNucRegP, twoParamFit, startFrame, finenucRise, Ampl, newKmult*perNucRegP->getKrate(), tauB, gain, SP, d, sens, startFrame*ISIG_SUB_STEPS_SINGLE_FLOW, tmp_fval, deltaFrames, nonZeroEmpFrames, TwoParams, jac, emLeft, emRight, frac, fval, err, &aa, &rhs0, &krkr, &rhs1, &akr); #else Fermi_ModelFuncEvaluationForSingleFlowFit( constRegP, perNucRegP, startFrame, finenucRise, newAmpl, Ampl, kmult*perNucRegP->getKrate(), newKmult*perNucRegP->getKrate(), tauB, gain, SP, d, sens, startFrame*ISIG_SUB_STEPS_SINGLE_FLOW, twoParamFit ? TwoParams : OneParam, emLeft, emRight, frac, fval, err, &aa, &rhs0, &krkr, &rhs1, &akr, deltaFrames, nonZeroEmpFrames); #endif // Now start the solving. if(twoParamFit){ const float det = 1.0f / (aa*krkr - akr*akr); delta1 = (-akr*rhs0 + aa*rhs1)*det; delta0 = (krkr*rhs0 - akr*rhs1)*det; }else delta0 = rhs0 / aa; if( !::isnan(delta0) && !::isnan(delta1)){ // add delta to params to obtain new params newAmpl = Ampl + delta0; if(twoParamFit) newKmult = kmult + delta1; clampT(newAmpl, ConstGlobalP.getMinAmpl(), (float)LAST_POISSON_TABLE_COL); if(twoParamFit)clampT(newKmult, localMinKmult, localMaxKmult); // printf("DEBUG: %d newAmpl %f\n", iter, newAmpl); //T*** REMOVE!! DEBUG ONLY // Evaluate using new params if (ConfigP.UseDynamicEmphasis()) frac = BlockLevel_DecideOnEmphasisVectorsForInterpolation( finenonZeroEmphFrames, &emLeft, &emRight, newAmpl, fineemphasisVec, num_frames, nonZeroEmpFrames); #if __CUDA_ARCH__ >= 350 Keplar_ModelFuncEvaluationForSingleFlowFit( constRegP, perNucRegP, twoParamFit, startFrame, finenucRise, newAmpl, newKmult*perNucRegP->getKrate(), tauB, gain, SP, d, sens, startFrame*ISIG_SUB_STEPS_SINGLE_FLOW, tmp_fval, deltaFrames, num_frames, NoOutput); #else BkgModelRedTraceCalculation( constRegP, perNucRegP, startFrame, finenucRise, newAmpl, newKmult*perNucRegP->getKrate(), tauB, gain, SP, d, sens, startFrame*ISIG_SUB_STEPS_SINGLE_FLOW, tmp_fval, deltaFrames, ISIG_SUB_STEPS_SINGLE_FLOW, num_frames); #endif // residual calculation using new parameters newresidual = ResidualCalculationPerFlow( startFrame, correctedTrace, tmp_fval, emLeft, emRight, frac, err, nonZeroEmpFrames); // printf("DEBUG: %d residual %f\n", iter, newresidual); //T*** REMOVE!! DEBUG ONLY if (newresidual < residual) { Ampl = newAmpl; if(twoParamFit)kmult = newKmult; // copy new function val to fval for (int i=startFrame; i<num_frames; ++i) { fval[i] = tmp_fval[i]; } residual = newresidual; } else { if (ConfigP.UseDynamicEmphasis()) { frac = BlockLevel_DecideOnEmphasisVectorsForInterpolation( finenonZeroEmphFrames, &emLeft, &emRight, Ampl, fineemphasisVec, num_frames, nonZeroEmpFrames); } } } else { delta0 = 0; delta1 = 0; } } // end ITER loop //DEBUG (rawtrase const?) //RawTraces[(ConstFrmP.getRawFrames() - 2 + relax_kmult_pass)*frameStride] = iter; //threadIdx.x%9; // if(relax_kmult_pass == 0) atomicMax(maxIterWarp,iter); // printf("DEBUG: done in pass %d at iter %d\n",relax_kmult_pass, iter-1); //T*** REMOVE!! DEBUG ONLY // probably slower incorporation if ((kmult - localMinKmult) < 0.01f) { if (sqrtf(residual) > 20.0f) { localMaxKmult = localMinKmult; //kmult = 0.3f; localMinKmult = 0.3f; relax_kmult_pass++; continue; } } relax_kmult_pass = 2; }// end relax_kmult_pass loop if(twoParamFit) *(ResultCube + ResultKmult*frameStride) = kmult; *(ResultCube + ResultAmpl*frameStride) = Ampl; //*(ResultCube + ResultAmpl*frameStride) = (Ampl * pow(perFlowRegP->getCopyDrift(), ConstFlowP.getRealFnum()) * copies); residual = CalculateMeanResidualErrorPerFlow( startFrame, correctedTrace, fval, fineemphasisVec+LAST_POISSON_TABLE_COL, num_frames); // printf("DEBUG: final residual %f\n",residual); //T*** REMOVE!! DEBUG ONLY float avg_err = *(ResultCube + ResultAvgErr*frameStride) * ConstFlowP.getRealFnum(); avg_err = (avg_err + residual) / (ConstFlowP.getRealFnum() + 1); *(ResultCube + ResultAvgErr*frameStride) = avg_err; //int high_err_cnt = 0; //avg_err *= WASHOUT_THRESHOLD; //for (int flow_ndx = flow_end - 1; flow_ndx >= 0 // && (meanErr[num_beads* flow_ndx] > avg_err); flow_ndx--) // high_err_cnt++; //if (high_err_cnt > WASHOUT_FLOW_DETECTION) // pState->corrupt = true; } /* // execute with one warp per row and 2D thread blocks of width warp length // each warp will slide across one row of the region // kernel parameters: // thread block dimensions (WARPSIZE,n,1) //n = number of warps per block) // grid dimension ( numRegions.x, (imgH + n-1)/n, 1) // one block per region in x direction and one per n img rows in y direction // const execParams ep, moved to constant memory as ExecP // const ImgRegParams moved to constant memory as ImgRegP __global__ void ExecuteThreadBlockPerRegion2DBlocks( const unsigned short * RegionMask, const unsigned short * bfMask, unsigned short * bstateMask, //per bead //in parameters const short * RawTraces, // NxF const float * BeadParamCube, const float* emphasisVec, //(MAX_POISSON_TABLE_COL)*F const int * nonZeroEmphFrames, const float* nucRise, // ISIG_SUB_STEPS_SINGLE_FLOW * F //in out parameters float* ResultCube, const size_t * numFramesRegion, //constant memory? const int * numLBeadsRegion, //constant memory? //per region const ConstantParamsRegion * constRegP, const PerFlowParamsRegion * perFlowRegP, const PerNucParamsRegion * perNucRegP, const float * RegionFrameCube, //DarkMatter, DeltaFrames, DeltaFramesStd, FrameNumber const float * EmptyTraceRegion //DarkMatter, DeltaFrames, DeltaFramesStd, FrameNumber //DEBUG buffer //int * numLBeads//, //ToDo only for debuging // float * fgBufferFloat ) { extern __shared__ float emphasis[]; #if EMPTY_IN_SHARED float * smEmptyTrace = emphasis + MAX_POISSON_TABLE_COL * ConstFrmP.getMaxCompFrames(); #endif //region on chip //determine region location const size_t regionCol = blockIdx.x; const size_t regionRow = (blockIdx.y*blockDim.y)/ImgRegP.getRegH(); //image coordinates size_t ix = regionCol * ImgRegP.getRegW() + threadIdx.x; const size_t iy = (blockIdx.y*blockDim.y) + threadIdx.y; size_t idx = ImgRegP.getWellIdx(ix,iy); //region coordinates int rx = threadIdx.x; // int leftshift = (idx-threadIdx.x)%32; //if(idx == imgRegP.getWellIdx(5,149) print = true; //Shift block to the left so thread 0 of the block aligns with //a 128 byte or at least 64 byte (for short) alignment boundary // rx -= leftshift; // idx -= leftshift; //region index to address region specific parameters //does not work if any blockDim > RegionDim //const size_t regId = ImgRegP.getRegId(ix,iy); //use regionCol and Row instead const size_t regId = regionRow*ImgRegP.getGridDimX()+regionCol; size_t numf = numFramesRegion[regId]; /////////////////////////////////////////////////// //If the Region does not have any useful data frames will be 0 or numLBeads will be 0, so nothing has to be done if( LDG_ACCESS(RegionMask,regId) != RegionMaskLive) return; //if( LDG_ACCESS(numLBeadsRegion,regId) == 0) return; // no live beads in region, no more work for this thread block if (numf == 0) return; //strides const size_t BeadFrameStride = ImgRegP.getPlaneStride(); const size_t RegionFrameStride = ConstFrmP.getMaxCompFrames() * ImgRegP.getNumRegions(); const size_t windowSize = blockDim.x; //if EmptyTraces from GenerateBeadTrace Kernel padding is uncompressed frames const float * emptyTraceAvg = EmptyTraceRegion + regId*ConstFrmP.getUncompFrames(); RegionFrameCube += regId*ConstFrmP.getMaxCompFrames(); //DarkMatter, DeltaFrames, DeltaFramesStd, FrameNumber //////////////////////////////////////////////////////////// // setup code that needs to be done by all threads emphasisVec += regId * MAX_POISSON_TABLE_COL * ConstFrmP.getMaxCompFrames(); // //careful when accessing pointers since threads that would work out of bounds //are not yet filtered at this point if(blockDim.x == 32){ const int numWarps = blockDim.y; const int threadWarpIdx = threadIdx.x; const int warpIdx = threadIdx.y; for(int i=warpIdx; i<numf; i += numWarps) { if (threadWarpIdx < MAX_POISSON_TABLE_COL) emphasis[(MAX_POISSON_TABLE_COL)*i + threadWarpIdx ] = emphasisVec[numf*threadWarpIdx + i ]; } }else{ const int numthreads = blockDim.x*blockDim.y; const int numWarps = numthreads/32; const int absThreadIdx = threadIdx.y*blockDim.x+threadIdx.x; const int threadWarpIdx = absThreadIdx%32; const int warpIdx = absThreadIdx/32; for(int i=warpIdx; i<numf; i += numWarps) { if (threadWarpIdx < MAX_POISSON_TABLE_COL) emphasis[(MAX_POISSON_TABLE_COL)*i + threadWarpIdx ] = emphasisVec[numf*threadWarpIdx + i ]; } } size_t emphStride = MAX_POISSON_TABLE_COL; //update per region pointers constRegP += regId; perFlowRegP += regId; //point to correct nuc perNucRegP += ImgRegP.getNumRegions() * ConstFlowP.getNucId() + regId; nonZeroEmphFrames += regId*MAX_POISSON_TABLE_COL; nucRise += regId * ISIG_SUB_STEPS_SINGLE_FLOW * ConstFrmP.getMaxCompFrames() ; float rezero_t_start = perFlowRegP->getTMidNuc()+perFlowRegP->getTMidNucShift(); #if EMPTY_TRACES_REZERO_SHARED_UNCOMPRESSED_INPUT if( threadIdx.y == 0){ // only first warp float * sm = smEmptyTrace; const float * frameNumber = (RegionFrameCube+RegionFrameStride*RfFrameNumber); float dcoffset = ComputeDcOffsetForUncompressedTrace(emptyTraceAvg,ConstFrmP.getUncompFrames(),constRegP->getTimeStart(), rezero_t_start-MAGIC_OFFSET_FOR_EMPTY_TRACE); for(int fn = threadIdx.x; fn < numf; fn+=blockDim.x){ TShiftAndPseudoCompressionOneFrame(sm,emptyTraceAvg,frameNumber, perFlowRegP->getTshift(), fn, ConstFrmP.getUncompFrames(),dcoffset); } __syncthreads(); // guarantee sm writes are completed and visiable within block } emptyTraceAvg = smEmptyTrace; #endif #if EMPTY_TRACES_REZERO_SHARED_COMPRESSED_INPUT && !EMPTY_TRACES_REZERO_SHARED_UNCOMPRESSED_INPUT if( threadIdx.y == 0){ // only first warp volatile float * sm = smEmptyTrace; const float * frameNumber = (RegionFrameCube+RegionFrameStride*RfFrameNumber); for(int fn = threadIdx.x; fn < numf; fn+=blockDim.x){ sm[fn] = LDG_ACCESS(emptyTraceAvg, fn); } //__syncthreads(); // guarantee sm writes are completed and visiable within block float dcoffset = ComputeDcOffsetForCompressedTrace(smEmptyTrace,1,frameNumber,constRegP->getTimeStart(),rezero_t_start-MAGIC_OFFSET_FOR_EMPTY_TRACE, numf); for(int fn = threadIdx.x; fn < numf; fn+=blockDim.x){ sm[fn] -= dcoffset; } //__syncthreads(); } emptyTraceAvg = smEmptyTrace; #endif __syncthreads(); //end all thread setup code, now excess threads can drop out //////////////////////////////////////////////////////////// //if(idx == 20){ // printf("input trace \n"); // for( size_t i = 0 ; i < *numFrames; i++){ // printf("%d, ", RawTraces[i * ImgRegP.getPlaneStride()]); // } // printf("\n"); //} //filter blocks that are outside the region in y-direction (ToDO: when 2d blocks this has to be done per warp after the all threads tasks are completed) const size_t ry = iy%ImgRegP.getRegH(); if( ! ImgRegP.isValidIdx(idx) || ry >= ImgRegP.getRegH(regId)) return; //get actual region Width const size_t regionWidth = ImgRegP.getRegW(regId); //update bead pointers to base for bfMask += idx; bstateMask += idx; RawTraces += idx; BeadParamCube += idx; ResultCube += idx; //fgBufferFloat += idx; // if (threadIdx.x == 0) printf(" %d %d %lu %lf %x %lf \n", rx, leftshift, idx, idx/32.0, RawTraces, ((size_t)(RawTraces))/128.0); //if(blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0 && threadIdx.y == 0){ //printf("Flow %d (%d) NucId from Kernel: %d \n ",ConstFlowP.getRealFnum(), ConstFlowP.getFlowIdx(), ConstFlowP.getNucId()); // printf("Flow %d nucrise: ",ConstFlowP.getRealFnum() ); // for(int i=0; i<ISIG_SUB_STEPS_SINGLE_FLOW*numf;i++) // printf("%f,",nucRise[i]); // printf("\n"); // } //sliding window if thread block is too small to handle a whole row in a region while(rx < regionWidth){ //while thread inside region //do not do work if thread points to a well left of the region boundary // if(rx >= 0){ // if (threadIdx.x < 4 && blockIdx.y%224 == 0) printf("x %d bx %d by %d rx %d idx %lu %lf %x %lf \n",threadIdx.x, blockIdx.x, blockIdx.y, rx, idx, idx/32.0, RawTraces, ((size_t)(RawTraces))/128.0); if(Match(bfMask,MaskLive)){ //printf( "x: %d y: %lu\n",rx, ry) ; //atomicAdd(numLBeads,1); if(!Match(bstateMask,BkgMaskCorrupt)){ //here we know all the coordinates and if the bead is live and not corrupted... //so lets do some work! //printf("%d, %d in block %d, %d I am alive!\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y); // bool print = false; // if(rx ==191 && ry == 43) print = true; #if FG_TRACES_REZERO float dcOffset = ComputeDcOffsetForCompressedTrace ( RawTraces, BeadFrameStride, RegionFrameCube + RegionFrameStride * RfFrameNumber, constRegP->getTimeStart(), rezero_t_start - perFlowRegP->getSigma(),numf ); #endif SingleFlowFitUsingRelaxKmultGaussNewton( //per Bead ResultCube, //Ampl, kmult, avg_error, points to correct Ampl value, stride == frameStride bstateMask, RawTraces, // imgW*imgHxF BeadParamCube, //Copies, R, dmult, gain, tau_adj, phi, stride == frameStride //per region emphasis, //(MAX_POISSON_TABLE_COL)*F nonZeroEmphFrames, nucRise, // ISIG_SUB_STEPS_SINGLE_FLOW * F constRegP, perFlowRegP, perNucRegP, RegionFrameCube, //bkgTrace, DarkMatter, DeltaFrames, DeltaFramesStd, FrameNumber emptyTraceAvg, //smEmptyTrace //EmptyTraceRegion #if FG_TRACES_REZERO dcOffset, #endif // other scalar inputs numf, // 4 //strides BeadFrameStride, RegionFrameStride, emphStride ); } } //end work for active bead move to next beads // } rx += windowSize; // idx += windowSize; bfMask += windowSize; bstateMask += windowSize; RawTraces += windowSize; BeadParamCube += windowSize; ResultCube += windowSize; } } */ // execute with one warp per row and 2D thread blocks of width warp length // each warp will slide across one row of the region // kernel parameters: // thread block dimensions (WARPSIZE,n,1) //n = number of warps per block) // grid dimension ( numRegions.x, (imgH + n-1)/n, 1) // one block per region in x direction and one per n img rows in y direction // const execParams ep, moved to constant memory as ExecP // const ImgRegParams moved to constant memory as ImgRegP // in this implementation the warp (sliding window) will not contain any non-live beads. // before assigning the beads/wells to the threads a single pass over the masks is performed and all non-live beads are discarded. // this improves the overall parallelism during the execution. ~ 17% speedup //launch bounds: //K20 //regs per SM: 65536 // #if __CUDA_ARCH__ >= 300 #define SINGLEFLOW_MAX_THREADS 128 #define SINGLEFLOW_MIN_BLOCKS 8 #else #define SINGLEFLOW_MAX_THREADS 128 #define SINGLEFLOW_MIN_BLOCKS 5 #endif __global__ __launch_bounds__(SINGLEFLOW_MAX_THREADS,SINGLEFLOW_MIN_BLOCKS) void ExecuteThreadBlockPerRegion2DBlocksDense( const unsigned short * RegionMask, const unsigned short * bfMask, unsigned short * bstateMask, //per bead //in parameters const short * RawTraces, // NxF const float * BeadParamCube, const float* crudeemphasisVec, //(MAX_POISSON_TABLE_COL)*F const int * crudenonZeroEmphFrames, const float* fineemphasisVec, //(MAX_POISSON_TABLE_COL)*F const int * finenonZeroEmphFrames, const float* finenucRise, // ISIG_SUB_STEPS_SINGLE_FLOW * F const float* coarsenucRise, // ISIG_SUB_STEPS_SINGLE_FLOW * F //in out parameters float* ResultCube, const size_t * numFramesRegion, //constant memory? const int * numLBeadsRegion, //constant memory? //per region const ConstantParamsRegion * constRegP, const PerFlowParamsRegion * perFlowRegP, const PerNucParamsRegion * perNucRegP, const float * RegionFrameCube, //DarkMatter, DeltaFrames, DeltaFramesStd, FrameNumber //TraceLevelXTalk const float * XTalkPerBead, const float * genericXTalkRegion //DEBUG buffer //int * numLBeads//, //ToDo only for debuging // float * fgBufferFloat ) { extern __shared__ float emphasis[]; #if EMPTY_IN_SHARED float * smEmptyTrace = emphasis + MAX_POISSON_TABLE_COL * ConstFrmP.getMaxCompFrames(); #endif //region on chip //determine region location const size_t regionCol = blockIdx.x; const size_t regionRow = (blockIdx.y*blockDim.y)/ImgRegP.getRegH(); //image coordinates size_t ix = regionCol * ImgRegP.getRegW(); // + threadIdx.x; const size_t iy = (blockIdx.y*blockDim.y) + threadIdx.y; size_t idx = ImgRegP.getWellIdx(ix,iy); //region index to address region specific parameters //does not work if any blockDim > RegionDim //const size_t regId = ImgRegP.getRegId(ix,iy); //use regionCol and Row instead const size_t regId = ImgRegP.getRegIdFromGrid(regionCol,regionRow); size_t numf = numFramesRegion[regId]; /////////////////////////////////////////////////// //If the Region does not have any useful data frames will be 0 or numLBeads will be 0, so nothing has to be done if( LDG_ACCESS(RegionMask,regId) != RegionMaskLive) return; //if( LDG_ACCESS(numLBeadsRegion,regId) == 0) return; // no live beads in region, no more work for this thread block if (numf == 0) return; //strides const size_t BeadFrameStride = ImgRegP.getPlaneStride(); const size_t RegionFrameStride = ConstFrmP.getMaxCompFrames() * ImgRegP.getNumRegions(); //const size_t windowSize = blockDim.x; //if EmptyTraces from GenerateBeadTrace Kernel padding is uncompressed frames RegionFrameCube += regId*ConstFrmP.getMaxCompFrames(); //DarkMatter, DeltaFrames, DeltaFramesStd, FrameNumber #ifndef USE_INTERPOLATED_EMPTY const float * emptyTraceAvg = ConstHistCol.getLatestEmptyTraces() + regId*ConstFrmP.getUncompFrames(); // EmptyTraceRegion + regId*ConstFrmP.getUncompFrames(); #endif //////////////////////////////////////////////////////////// // setup code that needs to be done by all threads fineemphasisVec += regId * MAX_POISSON_TABLE_COL * ConstFrmP.getMaxCompFrames(); crudeemphasisVec += regId * MAX_POISSON_TABLE_COL * ConstFrmP.getMaxCompFrames(); // //careful when accessing pointers since threads that would work out of bounds //are not yet filtered at this point if(blockDim.x == 32){ const int numWarps = blockDim.y; const int threadWarpIdx = threadIdx.x; const int warpIdx = threadIdx.y; for(int i=warpIdx; i<numf; i += numWarps) { if (threadWarpIdx < MAX_POISSON_TABLE_COL) emphasis[(MAX_POISSON_TABLE_COL)*i + threadWarpIdx ] = fineemphasisVec[numf*threadWarpIdx + i ]; } }else{ const int numthreads = blockDim.x*blockDim.y; const int numWarps = numthreads/32; const int absThreadIdx = threadIdx.y*blockDim.x+threadIdx.x; const int threadWarpIdx = absThreadIdx%32; const int warpIdx = absThreadIdx/32; for(int i=warpIdx; i<numf; i += numWarps) { if (threadWarpIdx < MAX_POISSON_TABLE_COL) emphasis[(MAX_POISSON_TABLE_COL)*i + threadWarpIdx ] = fineemphasisVec[numf*threadWarpIdx + i ]; } } size_t emphStride = MAX_POISSON_TABLE_COL; //update per region pointers constRegP += regId; perFlowRegP += regId; //point to correct nuc perNucRegP += ImgRegP.getNumRegions() * ConstFlowP.getNucId() + regId; if(ConfigP.PerformTraceLevelXTalk()){ genericXTalkRegion += ConstFrmP.getMaxCompFrames() * regId; } finenonZeroEmphFrames += regId*MAX_POISSON_TABLE_COL; crudenonZeroEmphFrames += regId*MAX_POISSON_TABLE_COL; finenucRise += regId * ISIG_SUB_STEPS_SINGLE_FLOW * ConstFrmP.getMaxCompFrames() ; coarsenucRise += regId * ISIG_SUB_STEPS_MULTI_FLOW * ConstFrmP.getMaxCompFrames() ; float rezero_t_start = perFlowRegP->getTMidNuc()+perFlowRegP->getTMidNucShift(); #if EMPTY_TRACES_REZERO_SHARED_UNCOMPRESSED_INPUT if( threadIdx.y == 0){ // only first warp float * sm = smEmptyTrace; const float * frameNumber = (RegionFrameCube+RegionFrameStride*RfFrameNumber); float dcoffset = ComputeDcOffsetForUncompressedTrace(emptyTraceAvg,ConstFrmP.getUncompFrames(),constRegP->getTimeStart(), rezero_t_start-MAGIC_OFFSET_FOR_EMPTY_TRACE); for(int fn = threadIdx.x; fn < numf; fn+=blockDim.x){ TShiftAndPseudoCompressionOneFrame(sm,emptyTraceAvg,frameNumber, perFlowRegP->getTshift(), fn, ConstFrmP.getUncompFrames(),dcoffset); } __syncthreads(); // guarantee sm writes are completed and visiable within block } emptyTraceAvg = smEmptyTrace; #endif #if EMPTY_TRACES_REZERO_SHARED_COMPRESSED_INPUT && !EMPTY_TRACES_REZERO_SHARED_UNCOMPRESSED_INPUT if( threadIdx.y == 0){ // only first warp volatile float * sm = smEmptyTrace; const float * frameNumber = (RegionFrameCube+RegionFrameStride*RfFrameNumber); for(int fn = threadIdx.x; fn < numf; fn+=blockDim.x){ sm[fn] = LDG_ACCESS(emptyTraceAvg, fn); } //__syncthreads(); // guarantee sm writes are completed and visiable within block float dcoffset = ComputeDcOffsetForCompressedTrace(smEmptyTrace,1,frameNumber,constRegP->getTimeStart(),rezero_t_start-MAGIC_OFFSET_FOR_EMPTY_TRACE, numf); for(int fn = threadIdx.x; fn < numf; fn+=blockDim.x){ sm[fn] -= dcoffset; } //__syncthreads(); } emptyTraceAvg = smEmptyTrace; #endif __syncthreads(); //end all thread setup code, now excess threads can drop out //////////////////////////////////////////////////////////// //filter blocks that are outside the region in y-direction (ToDO: when 2d blocks this has to be done per warp after the all threads tasks are completed) const size_t ry = iy%ImgRegP.getRegH(); if( ry >= ImgRegP.getRegH(regId)) return; //get actual region Width const size_t regionWidth = ImgRegP.getRegW(regId); //update bead pointers to point to first well in row ry of region regId bfMask += idx; bstateMask += idx; //int * maxIterWarp = (int*)(RawTraces + (ConstFrmP.getRawFrames() - 3 ) * BeadFrameStride); // one value per warp per region row // warps per row = (ImgPregW+31/32) //int warpsPerRegionRow = (ImgRegP.getRegW()+31)/32; //int warpsPerImgRow = warpsPerRegionRow * ImgRegP.getGridDimX(); //int regionRowWarpsStride = ImgRegP.getRegH()* warpsPerImgRow; //maxIterWarp += regionRow * regionRowWarpsStride + ry * warpsPerImgRow + regionCol * warpsPerRegionRow; RawTraces += idx; BeadParamCube += idx; ResultCube += idx; if(ConfigP.PerformTraceLevelXTalk()) XTalkPerBead += idx; //sliding window if thread block is too small to handle a whole row in a region int rx = 0; while(rx < regionWidth){ //while thread inside region size_t liveBeadCountWarp = 0; // how many live beads are found in this warp size_t myOffset = regionWidth; // init to regionWidth so tha tif no bead is found for a thread it will drop out of the execution while(rx < regionWidth && liveBeadCountWarp < blockDim.x) //stop search when end of row reached or warp is full { if( Match(bfMask + rx, MaskLive) && (!Match(bstateMask + rx,BkgMaskCorrupt))){ // if live bead if(liveBeadCountWarp == threadIdx.x) //assign to correct thread (n-th live bead found handled by n-th thread in warp) myOffset = rx; //offset is the actual x-coordinate within the region of the well we are looking at in the current row liveBeadCountWarp++; //keep track of howe many live beads are already found for this warp } rx++; //move to next bead in the row } //ATTENTION: "rx" is not used in the dense packed kernel. from here on "myOffset" contains the x offset within the region for this thread!! if(myOffset < regionWidth){ //filter out threads that do not have a correct live bead to work on #ifdef USE_INTERPOLATED_EMPTY float emptyTraceAvg[MAX_COMPRESSED_FRAMES_GPU] = {0}; //interpolateEmpty( emptyTraceAvg, EmptyTraceRegion, RegionMask, regId, ix+myOffset, iy, numf); interpolateEmpty( emptyTraceAvg, RegionMask, regId, ix+myOffset, iy, numf); //if( regId == 27 && myOffset%20 == 0 && ry%28 == 0 ){ //const float * rt = emptyTraceAvg; //int i=0; //printf("%lu,%lu,%lu,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",regId, myOffset, ry,rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++]); //rt = EmptyTraceRegion + regId*ConstFrmP.getUncompFrames(); //i=0; //if( myOffset == 0 && ry == 0) printf("reg %lu,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",regId,rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++],rt[i++]); //} #endif //update local offsets (region regId does not change since warp only works on beads of one row in one region) unsigned short * lbstateMask = bstateMask + myOffset; const short * lRawTraces = RawTraces + myOffset; const float * lBeadParamCube = BeadParamCube+ myOffset; float* lResultCube = ResultCube + myOffset; const float * XTalkContribution = (ConfigP.PerformTraceLevelXTalk())?(XTalkPerBead + myOffset):(NULL); #if FG_TRACES_REZERO // Compute the per flow dc offset for the bead traces (maybe can be done later when we touch the raw traces the first time. float dcOffset = ComputeDcOffsetForCompressedTrace ( lRawTraces, BeadFrameStride, RegionFrameCube + RegionFrameStride * RfFrameNumber, constRegP->getTimeStart(), rezero_t_start - perFlowRegP->getSigma(),numf ); #endif SingleFlowFitUsingRelaxKmultGaussNewton( //per Bead lResultCube, //Ampl, kmult, avg_error, points to correct Ampl value, stride == frameStride lbstateMask, lRawTraces, // imgW*imgHxF lBeadParamCube, //Copies, R, dmult, gain, tau_adj, phi, stride == frameStride //per region emphasis, //(MAX_POISSON_TABLE_COL)*F finenonZeroEmphFrames, crudeemphasisVec, //(MAX_POISSON_TABLE_COL)*F crudenonZeroEmphFrames, finenucRise, // ISIG_SUB_STEPS_SINGLE_FLOW * F coarsenucRise, // ISIG_SUB_STEPS_SINGLE_FLOW * F constRegP, perFlowRegP, perNucRegP, RegionFrameCube, //bkgTrace, DarkMatter, DeltaFrames, DeltaFramesStd, FrameNumber emptyTraceAvg, //smEmptyTrace //EmptyTraceRegion #if FG_TRACES_REZERO dcOffset, #endif // other scalar inputs numf, // 4 //strides BeadFrameStride, RegionFrameStride, emphStride, //TraceLevelXTalk XTalkContribution, genericXTalkRegion //maxIterWarp //print ); //maxIterWarp++; } //end work for active bead move to next beads } }
the_stack
//#include "timer-util.h" #include "gpu.cuh" #include "ldgm-session-gpu.h" struct coding_params { int num_lost; int k; int m; int packet_size; int max_row_weight; }; __global__ void frame_encode(char * data,int * pcm,struct coding_params * params); __global__ void frame_encode_int_big(int *data, int *pcm,int param_k,int param_m,int w_f,int packet_size); __global__ void frame_encode_staircase(int *data, int *pcm,int param_k,int param_m,int w_f,int packet_size); __global__ void frame_decode(char * received, int * pcm, int * error_vec,int * sync_vec,int packet_size,int max_row_weight,int K); __global__ void frame_encode_int(int *data, int *pcm,int param_k,int param_m,int w_f,int packet_size); __global__ void frame_decode_int(int * received, int * pcm, int * error_vec,int * sync_vec,int packet_size,int max_row_weight,int K); void gpu_encode ( char* source_data,int* pc_matrix, struct coding_params * ); void gpu_decode (char * received,int * pcm,struct coding_params * params,int * error_vec,int * sync_vec,int undecoded,int * frame_size); __device__ unsigned int count = 0; __device__ unsigned int count_M = 0; char *xor_using_sse2 (char *source, char *dest, int packet_size) { //First, do as many 128-bit XORs as possible int iter_bytes_16 = 0; int iter_bytes_4 = 0; int iter_bytes_1 = 0; iter_bytes_16 = (packet_size / 16) * 16; if ( iter_bytes_16 > 0) { // printf ( "iter_bytes: %d\n", iter_bytes ); __m128i *wrd_ptr = (__m128i *) source; __m128i *wrd_end = (__m128i *) (source + iter_bytes_16); __m128i *dst_ptr = (__m128i *) dest; // printf ( "wrd_ptr address: %p\n", wrd_ptr ); do { __m128i xmm1 = _mm_loadu_si128(wrd_ptr); __m128i xmm2 = _mm_loadu_si128(dst_ptr); xmm1 = _mm_xor_si128(xmm1, xmm2); // XOR 4 32-bit words _mm_storeu_si128(dst_ptr, xmm1); ++wrd_ptr; ++dst_ptr; } while (wrd_ptr < wrd_end); } //Check, whether further XORing is necessary if ( iter_bytes_16 < packet_size ) { char *mark_source = source + iter_bytes_16; char *mark_dest = dest + iter_bytes_16; iter_bytes_4 = ((packet_size - iter_bytes_16) / 4) * 4; for ( int i = 0; i < (packet_size - iter_bytes_16) / 4; i++) { int *s = ((int *) mark_source) + i; int *d = ((int *) mark_dest) + i; *d ^= *s; } mark_source += iter_bytes_4; mark_dest += iter_bytes_4; iter_bytes_1 = packet_size - iter_bytes_16 - iter_bytes_4; for ( int i = 0; i < iter_bytes_1; i++) { *(mark_dest + i) ^= *(mark_source + i); } } return dest; } CUDA_DLL_API void gpu_encode_upgrade (char * source_data,int *OUTBUF, int * PCM,int param_k,int param_m,int w_f,int packet_size ,int buf_size) { // cudaError_t error; int blocksize = packet_size/sizeof(int); // printf("blocksize: %d\npacket size: %d\n",blocksize,packet_size ); if(blocksize>256){ if(blocksize>1024) blocksize=1024; // puts("big one"); frame_encode_int_big <<< param_m, blocksize, packet_size >>> (OUTBUF,PCM, param_k, param_m, w_f, packet_size); cuda_check_error("frame_encode_int_big"); frame_encode_staircase<<< 1, blocksize, packet_size >>> (OUTBUF, PCM, param_k, param_m, w_f, packet_size); cuda_check_error("frame_encode_staircase"); cudaMemcpy(source_data + param_k*packet_size,OUTBUF + (param_k*packet_size)/4, param_m*packet_size,cudaMemcpyDeviceToHost ); // // cudaMemcpy(source_data,OUTBUF, buf_size,cudaMemcpyDeviceToHost ); cuda_check_error("memcpy out_buf"); // gettimeofday(&t0, 0); // for ( int m = 1; m < param_m; ++m) // { // char *prev_parity = (char *) source_data + (param_k + m - 1) * packet_size; // char *parity_packet = (char *) source_data + (param_k + m) * packet_size; // xor_using_sse2(prev_parity, parity_packet, packet_size); // } // gettimeofday(&t1, 0); // long elapsed = (t1.tv_sec - t0.tv_sec) * 1000000 + t1.tv_usec - t0.tv_usec; // printf("time staircase: %f\n",elapsed/1000.0 ); } else{ // puts("chudy soused"); frame_encode_int <<< param_m, blocksize, packet_size >>> (OUTBUF,PCM, param_k, param_m, w_f, packet_size); cuda_check_error("frame_encode_int"); cudaMemcpy(source_data + param_k*packet_size,OUTBUF + (param_k*packet_size)/4, param_m*packet_size,cudaMemcpyDeviceToHost ); // cudaMemcpy(source_data,OUTBUF, buf_size,cudaMemcpyDeviceToHost ); cuda_check_error("memcpyu out_buf"); for ( int m = 1; m < param_m; ++m) { char *prev_parity = (char *) source_data + (param_k + m - 1) * packet_size; char *parity_packet = (char *) source_data + (param_k + m) * packet_size; xor_using_sse2(prev_parity, parity_packet, packet_size); } } // cudaEvent_t start, stop; // float time; // cudaEventCreate(&start); // cudaEventCreate(&stop); // cudaEventRecord(start, 0); // cudaStream_t pStream; // cudaStreamCreate(&pStream); // frame_encode_staircase<<< 1, blocksize, packet_size >>> (OUTBUF, PCM, param_k, param_m, w_f, packet_size); // cuda_check_error("frame_encode_staircase"); // cudaStreamSynchronize(); // cudaDeviceSynchronize(); return; } #define CHECK_CUDA(cmd) do { \ cudaError_t err = cmd; \ if (err != cudaSuccess) {\ fprintf(stderr, "[LDGM GPU] %s: %s\n", #cmd, cudaGetErrorString(err)); \ } \ } while(0) CUDA_DLL_API void gpu_decode_upgrade(char *data, int * PCM,int* SYNC_VEC,int* ERROR_VEC, int not_done, int *frame_size,int * error_vec,int * sync_vec,int M,int K,int w_f,int buf_size,int packet_size) { cudaError_t error; int *received_d; // int M = params->m; // int K = params->k; // int w_f = params->max_row_weight + 2; // int buf_size = params->buf_size; int* received = (int*) data; // printf("K: %d, M: %d, max_row_weight: %d, buf_size: %d,\n packet_size: %d\n",K,M,w_f,buf_size,packet_size ); // printf("NOT DONE: %d\n",not_done ); error = cudaHostRegister(received, buf_size, cudaHostRegisterMapped); if (error != cudaSuccess) printf("1 %s\n", cudaGetErrorString(error)); error = cudaHostGetDevicePointer((void **) & (received_d), (void *)received, 0); if (error != cudaSuccess) printf("2 %s\n", cudaGetErrorString(error)); // error = cudaMalloc(&received_d, buf_size); // if (error != cudaSuccess) printf("1 %s\n", cudaGetErrorString(error)); // error = cudaMemcpy(received_d, received, buf_size, cudaMemcpyHostToDevice); // if (error != cudaSuccess) printf("2 %s\n", cudaGetErrorString(error)); // error = cudaMalloc(&pcm_d, w_f * M * sizeof(int)); // if (error != cudaSuccess) printf("3 %s\n", cudaGetErrorString(error)); // error = cudaMemcpy(pcm_d, PCM, w_f * M * sizeof(int), cudaMemcpyHostToDevice); // if (error != cudaSuccess) printf("4 %s\n", cudaGetErrorString(error)); // error = cudaMalloc(&error_vec_d, (K + M) * sizeof(int)); // if (error != cudaSuccess)printf("5 %s\n", cudaGetErrorString(error)); // error = cudaMemcpy(error_vec_d, error_vec, (K + M) * sizeof(int), cudaMemcpyHostToDevice); // if (error != cudaSuccess) printf("6 %s\n", cudaGetErrorString(error)); // error = cudaMalloc(&sync_vec_d, (K + M) * sizeof(int)); // if (error != cudaSuccess) printf("7 %s\n", cudaGetErrorString(error)); // error = cudaMemcpy(sync_vec_d, sync_vec, (K + M) * sizeof(int), cudaMemcpyHostToDevice); // if (error != cudaSuccess) printf("8 %s\n", cudaGetErrorString(error)); int ps = packet_size/sizeof(int); int blocksize = packet_size/sizeof(int) +1; // printf("blocksize: %d\npacket size: %d\n",blocksize,packet_size ); if(blocksize>512) blocksize=512; int not_done_source=0; for (int i = 0; i < K; i++) { if (error_vec[i] == 1) not_done_source++; } // printf("not_done %d\n",not_done ); // printf("not_done_source %d\n",not_done_source); unsigned int count_host = 0; unsigned int count_host_M = 0; CHECK_CUDA(cudaMemcpyToSymbol ( count, (void *)(&count_host), sizeof(unsigned int), 0, cudaMemcpyHostToDevice)); CHECK_CUDA(cudaMemcpyToSymbol ( count_M, (void *)(&count_host_M), sizeof(unsigned int), 0, cudaMemcpyHostToDevice)); int i = 0; for (i = 1; i < 30; ++i) { //__global__ void frame_decode_int(int * received, int * pcm, int * error_vec,int * sync_vec,int packet_size,int max_row_weight,int K); frame_decode_int <<< M, blocksize , packet_size >>> (received_d, PCM, ERROR_VEC, SYNC_VEC, ps, w_f-2, K); error = cudaGetLastError(); if (error != cudaSuccess) printf("3 %s\n", cudaGetErrorString(error)); // cudaDeviceSynchronize(); //error = cudaMemcpyFromSymbol((void *)(&count_host), count, sizeof(int), 0, cudaMemcpyDeviceToHost); count_host=0; error = cudaMemcpyFromSymbol((void*)(&count_host), count, sizeof(unsigned int), 0, cudaMemcpyDeviceToHost); if (error != cudaSuccess) printf("10 %s\n", cudaGetErrorString(error)); // printf("count host %d\n",count_host ); if (count_host == not_done_source) { break; } CHECK_CUDA(cudaMemcpyFromSymbol((void*)(&count_host_M), count_M, sizeof(unsigned int), 0, cudaMemcpyDeviceToHost)); // printf("count host_M %d\n",count_host_M ); if (count_host_M == M) { break; } count_host_M = 0; CHECK_CUDA(cudaMemcpyToSymbol ( count_M, (void *)(&count_host_M), sizeof(unsigned int), 0, cudaMemcpyHostToDevice)); } // printf("iterace: %d\n",i); // cudaDeviceSynchronize(); //cudaThreadSynchronize(); CHECK_CUDA(cudaMemcpy(error_vec, ERROR_VEC, (K + M) * sizeof(int), cudaMemcpyDeviceToHost)); int a = 0; int fs = 0; for (int i = 0; i < K; i++) { if (error_vec[i] == 1) a++; } // printf("UNDECODED: %d NOT DONE: %d DEKODOVANO: %d\n",a,not_done,not_done-a); if (a != 0) { *frame_size = 0; } else { memcpy(&fs, received, 4); // printf("received size %d\n",fs ); *frame_size = fs; } // printf("undecoded: %d, frame_size: %d, undecoded subtract: %d\n",a,fs,not_done-a ); CHECK_CUDA(cudaHostUnregister(received)); // cudaFree(received_d); // cudaFree(pcm_d); // cudaFree(error_vec_d); // cudaFree(sync_vec_d); // cudaFree(params_d); // puts("END"); return; } __global__ void frame_encode_int_big(int *data, int *pcm,int param_k,int param_m,int w_f,int packet_size) { int ps = packet_size/sizeof(int); int bx = blockIdx.x; int x = threadIdx.x; int offset; // printf("K: %d M: %d max_row_weight: %d packet_size: %d\n",param_k,param_m,max_row_weight,ps); extern __shared__ int parity_packet[]; // int *parity_packet = data + (param_k + bx) * ps; // if(x==0)printf("bx %d has parity packet at: %d,%d\n",bx,param_k*ps + bx*ps,param_k+bx ); offset = x; while (offset < ps) { parity_packet[offset]=0; offset += blockDim.x; } // __syncthreads(); for ( int i = 0; i < w_f; i++) { int idx = pcm[bx * w_f + i]; //printf ( "adept: %d\n", idx ); // if(x==0) printf ("block %d xor packet: %d\n",bx,idx); if (idx > -1 && idx < param_k) { //xoring parity_packet ^ idx offset = x; while (offset < ps) { parity_packet[offset]^=data[idx*ps + offset]; offset += blockDim.x; } } } // __syncthreads(); offset = x; while (offset < ps) { data[(param_k + bx) * ps + offset]= parity_packet[offset]; offset += blockDim.x; } } __global__ void frame_encode_int(int *data, int *pcm,int param_k,int param_m,int w_f,int packet_size) { int ps = packet_size/sizeof(int); int bx = blockIdx.x; int offset = threadIdx.x; // printf("K: %d M: %d max_row_weight: %d packet_size: %d\n",param_k,param_m,max_row_weight,ps); extern __shared__ int parity_packet[]; // int *parity_packet = data + (param_k + bx) * ps; // if(x==0)printf("bx %d has parity packet at: %d,%d\n",bx,param_k*ps + bx*ps,param_k+bx ); // while (offset < ps) // { // parity_packet[offset]=0; // offset += blockDim.x; // } parity_packet[offset]=0; // __syncthreads(); for ( int i = 0; i < w_f; i++) { int idx = pcm[bx * w_f + i]; //printf ( "adept: %d\n", idx ); // if(x==0) printf ("block %d xor packet: %d\n",bx,idx); if (idx > -1 && idx < param_k) { //xoring parity_packet ^ idx // offset = x; // while (offset < ps) // { // parity_packet[offset]^=data[idx*ps + offset]; // offset += blockDim.x; // } parity_packet[offset]^=data[idx*ps + offset]; } } // __syncthreads(); // offset = x; // while (offset < ps) // { // data[(param_k + bx) * ps + offset]= parity_packet[offset]; // offset += blockDim.x; // } data[(param_k + bx) * ps + offset]= parity_packet[offset]; // __syncthreads(); } __global__ void frame_decode_int(int *received, int *pcm, int *error_vec, int *sync_vec, int packet_size, int max_row_weight, int K) { //TITAN __shared__ int undecoded; __shared__ int undecoded_index; __shared__ int ret; extern __shared__ int shared_parity_packet[]; int w_f = max_row_weight + 2; int ps = packet_size; int bx = blockIdx.x; int x = threadIdx.x; int offset = 0; if (x == 0) { ret = 0; undecoded = 0; undecoded_index = -1; for (int j = 0; j < w_f; j++) { int p = pcm[bx * w_f + j]; //printf("%d %d %d\n",p, error_vec[p],x); if (p != -1 && error_vec[p] == 1) { undecoded++; undecoded_index = p; } } if (undecoded == 1) { ret = atomicCAS(sync_vec + undecoded_index, 1, 0); } } __syncthreads(); if (ret == 1) { // if(x==0) printf("decoding %7d, bx %7d\n",undecoded_index,bx ); offset = x; while (offset < ps) { shared_parity_packet[offset]=0x0; offset += blockDim.x; } /*int zbyva = ps - offset; if (x < zbyva) { shared_parity_packet[x + offset] = 0; }*/ __syncthreads(); // if(x==0) printf("decoding [%d]\n",undecoded_index); for (int j = 0; j < w_f; j++) { int index = pcm[bx * w_f + j]; if (index != undecoded_index && index != -1) { offset = x; while ( offset < ps) { shared_parity_packet[offset] ^= received[index*ps + offset]; offset += blockDim.x; }/* int zbyva = ps - offset; if (x < zbyva) { shared_parity_packet[x + offset] ^= received[(index * ps) + x + offset]; }*/ } } __syncthreads(); offset = x; while ( offset < ps) { // *((int *)(received + (undecoded_index * ps) + 4*x + a)) = *((int *)(shared_parity_packet + a + 4 * x)); received[(undecoded_index * ps) + offset] = shared_parity_packet[offset]; offset += blockDim.x; } /* zbyva = ps - offset; if (x < zbyva) { received[(undecoded_index * ps) + x + offset] = shared_parity_packet[x + offset]; }*/ } if (x == 0 && ret == 1) { //error_vec[undecoded_index]=0; atomicCAS(error_vec + undecoded_index, 1, 0); // printf("node %d %d done\n",undecoded_index); } if (x == 0 && ret==1 && undecoded_index<K) { atomicAdd(&count, 1); } if (x == 0 && undecoded!=1 ) { atomicAdd(&count_M, 1); } } __global__ void frame_encode_staircase(int *data, int *pcm,int param_k,int param_m,int w_f,int packet_size) { int ps = packet_size/sizeof(int); int x = threadIdx.x; for (int index = param_k; index < param_k + param_m-1; index++) { int offset = x; while (offset < ps) { // *((int *)(data + (index+1)*ps + offset + intSize * x)) ^= *((int *)(data + index * ps + intSize * x + offset)); data[(index+1)*ps + offset] ^= data[index*ps + offset]; offset += blockDim.x; } } }
the_stack
//#define SOLVE_ZERO_INI_GUESS //#define DEBUG //#define DEBUGX namespace amgx { // parameter is used as test name DECLARE_UNITTEST_BEGIN(EnergyminAlgorithmTest); std::string get_A_input_Dir() { return "Internal/energymin/input_matrix/"; } std::string get_ext_str() { return ".mtx"; } void generateInputFilenames(const std::vector<int> &N_range_vec, const std::vector<std::string> &epsilons_str_vec, const std::vector<std::string> &thetas_str_vec, std::vector<std::string> &fnames_str_vec) { for (int nInd = 0; nInd < N_range_vec.size(); nInd++) { int N = N_range_vec[nInd]; for ( int eInd = 0; eInd < epsilons_str_vec.size(); eInd++ ) { std::string epsilon_str = epsilons_str_vec[eInd]; for ( int tInd = 0; tInd < thetas_str_vec.size(); tInd++ ) { std::string theta_str = thetas_str_vec[tInd]; std::stringstream ss; ss << "aniso_matrix_" << N << "x" << N << "_eps_" << epsilon_str << "_theta_" << theta_str; fnames_str_vec.push_back(ss.str()); } } } } void generateAllInputFilenames(std::vector<std::string> &fnames_str_vec) { //fnames_str_vec.push_back("matrix"); std::vector<int> N_range_vec; //N_range_vec.push_back(5); //N_range_vec.push_back(10); //N_range_vec.push_back(15); N_range_vec.push_back(20); std::vector<std::string> epsilons_str_vec; epsilons_str_vec.push_back("1"); //epsilons_str_vec.push_back("0_10"); //epsilons_str_vec.push_back("0_01"); std::vector<std::string> thetas_str_vec; thetas_str_vec.push_back("0"); //thetas_str_vec.push_back("0_79"); //thetas_str_vec.push_back("-0_79"); this->generateInputFilenames(N_range_vec, epsilons_str_vec, thetas_str_vec, fnames_str_vec); } void createConfigString(std::string &base_string) { base_string += "main_solver:coloring_level=1,"; //base_string += "main_solver:convergence=RELATIVE_MAX,"; base_string += "main_solver:cycle=V,"; base_string += "main_solver:matrix_coloring_scheme=MIN_MAX,"; base_string += "main_solver:max_levels=100,"; base_string += "main_solver:norm=L1,"; base_string += "main_solver:postsweeps=2,"; base_string += "main_solver:presweeps=2,"; base_string += "main_solver:smoother=MULTICOLOR_GS,"; base_string += "main_solver:tolerance=0.1,"; base_string += "main_solver:max_iters=100,"; base_string += "main_solver:monitor_residual=1,"; //test_cases[i].config_string += "determinism_flag=1,"; base_string += "max_uncolored_percentage=0.,"; base_string += "main_solver:store_res_history=1,"; base_string += "main_solver:obtain_timings=1"; } struct TestCase { std::string file_name; std::string config_string; bool extract_diagonal; bool insert_diagonal; bool use_pre_setup; TestCase(): use_pre_setup(true), insert_diagonal(false), extract_diagonal(false) {} }; std::vector<double> test_main(TestCase &test_case, bool generate_rhs = 0) { bool insert_diagonal = test_case.insert_diagonal; bool extract_diagonal = test_case.extract_diagonal; // Create matrix arrays from file Matrix_h Atemp; Vector_h btemp, xtemp, x_final; // Read the matrix std::string fail_msg = "Cannot open " + test_case.file_name; this->PrintOnFail(fail_msg.c_str()); if (generate_rhs) { std::string mtxInFileName = UnitTest::get_configuration().data_folder + test_case.file_name; //typedef typename T_Config::template setMemSpace<AMGX_host>::Type Config_h; UNITTEST_ASSERT_TRUE(MatrixIO<TConfig_h>::readSystem(mtxInFileName.c_str(), Atemp) == AMGX_OK); btemp.resize(Atemp.get_num_rows(), 0.); xtemp.resize(Atemp.get_num_rows(), 0.); //srand(1); int dd = 7; for (int i = 0; i < btemp.size(); i++) { //btemp[i] = (float(rand()%100))/100 - 50; btemp[i] = (i % dd - dd / 2); //*(10/float(dd)); //std::cout << " btemp["<<i<<"]=" << btemp[i]; } } else { UNITTEST_ASSERT_TRUE(this->read_system(test_case.file_name.c_str(), Atemp, btemp, xtemp)); } bool hasDiag = Atemp.hasProps(DIAG); // Create row_offsets, col_indices, off_dia_values and dia_values arrays from the matrix just read int num_rows = Atemp.get_num_rows(); int num_nz = Atemp.get_num_nz(); int bsize_x = Atemp.get_block_dimx(); int bsize_y = Atemp.get_block_dimy(); int bsize = bsize_x * bsize_y; xtemp.resize(num_rows * bsize_y, 1.); std::vector<int> row_offsets(num_rows + 1); std::vector<int> col_indices(num_nz); std::vector<double> off_dia_values(num_nz * bsize); std::vector<double> dia_values; if (hasDiag) { dia_values.resize(num_rows * bsize); } std::vector<double> x_vec(num_rows * bsize_y); std::vector<double> b_vec(num_rows * bsize_x); // Fill vectors int *raw_row_ptr = Atemp.row_offsets.raw(); int *raw_col_ptr = Atemp.col_indices.raw(); double *raw_val_ptr = Atemp.values.raw(); // Row offsets for (int i = 0; i < num_rows + 1; i++) { row_offsets[i] = raw_row_ptr[i]; } // Column indices for (int i = 0; i < num_nz; i++) { col_indices[i] = raw_col_ptr[i]; } // Off-diagonal values for (int i = 0; i < num_nz; i++) { for (int j = 0; j < bsize; j++) { off_dia_values[i * bsize + j] = raw_val_ptr[i * bsize + j]; } } // Diagonal values if (hasDiag) { for (int i = 0; i < num_rows; i++) { for (int j = 0; j < bsize; j++) { dia_values[i * bsize + j] = raw_val_ptr[num_nz * bsize + i * bsize + j]; } } } srand(1); // Random RHS double *b_raw_ptr = btemp.raw(); for (int i = 0; i < num_rows; i++) { for (int j = 0; j < bsize_x; j++) { b_vec[i * bsize_x + j] = b_raw_ptr[i * bsize_x + j] + (1.0 * rand() / RAND_MAX); //b_vec[i*bsize_x+j] = b_raw_ptr[i*bsize_x+j]; } } // Random xvector srand(2); double *x_raw_ptr = xtemp.raw(); for (int i = 0; i < num_rows; i++) { for (int j = 0; j < bsize_y; j++) { x_vec[i * bsize_y + j] = x_raw_ptr[i * bsize_y + j] + (1.0 * rand() / RAND_MAX); //x_vec[i*bsize_y+j] = x_raw_ptr[i*bsize_y+j]; } } std::vector<double> x_vec_col = x_vec; std::string option_string = test_case.config_string; //std::cout << "hasDiag=" << hasDiag << "\n"; // Insert diagonal if (hasDiag && insert_diagonal) { std::vector<int> new_col_indices( (num_nz + num_rows) ); std::vector<double> new_off_dia_values( (num_nz + num_rows)*bsize ); int icount = 0; for (int i = 0; i < num_rows; i++) { for (int j = row_offsets[i]; j < row_offsets[i + 1]; j++) { int col = col_indices[j]; new_col_indices[icount] = col; for (int k = 0; k < bsize; k++) { new_off_dia_values[icount * bsize + k] = off_dia_values[j * bsize + k]; } icount++; } // Insert diagonal new_col_indices[icount] = i; for (int k = 0; k < bsize; k++) { new_off_dia_values[icount * bsize + k] = dia_values[i * bsize + k]; } icount++; } // increment row_offsets for (int i = 0; i < num_rows + 1; i++) { row_offsets[i] += i; } off_dia_values = new_off_dia_values; col_indices = new_col_indices; dia_values.resize(0); num_nz += num_rows; } // End Insert diagonal // Extract diagonal if (extract_diagonal) { std::vector<int> old_col_indices = col_indices; std::vector<double> old_off_dia_values = off_dia_values; off_dia_values.resize((num_nz - num_rows)*bsize); col_indices.resize(num_nz - num_rows); dia_values.resize(num_rows * bsize); int icount = 0; for (int i = 0; i < num_rows; i++) { for (int j = row_offsets[i]; j < row_offsets[i + 1]; j++) { int col = old_col_indices[j]; if (col != i) { col_indices[icount] = col; for (int k = 0; k < bsize; k++) { off_dia_values[icount * bsize + k] = old_off_dia_values[j * bsize + k]; } icount++; } else { for (int k = 0; k < bsize; k++) { dia_values[i * bsize + k] = old_off_dia_values[j * bsize + k]; } } } } // decrement row_offsets for (int i = 0; i < num_rows + 1; i++) { row_offsets[i] -= i; } num_nz -= num_rows; } // End Extract diagonal AMGX_config_handle rsrc_cfg = NULL; UNITTEST_ASSERT_EQUAL(AMGX_config_create(&rsrc_cfg, ""), AMGX_OK); // Choosing device 0 int device = 0; AMGX_resources_handle rsrc = NULL; UNITTEST_ASSERT_EQUAL(AMGX_resources_create(&rsrc, rsrc_cfg, NULL, 1, &device), AMGX_OK); std::cout << "\n" << option_string << "\n"; AMGX_config_handle cfg = NULL; UNITTEST_ASSERT_EQUAL(AMGX_config_create( &cfg, option_string.c_str() ), AMGX_OK); AMGX_matrix_handle matrix; UNITTEST_ASSERT_EQUAL(AMGX_matrix_create( &matrix, rsrc, AMGX_mode_dDDI ), AMGX_OK); AMGX_solver_handle solver; UNITTEST_ASSERT_EQUAL(AMGX_solver_create( &solver, rsrc, AMGX_mode_dDDI, cfg), AMGX_OK); AMGX_vector_handle b, x; UNITTEST_ASSERT_EQUAL(AMGX_vector_create( &b, rsrc, AMGX_mode_dDDI ), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_vector_create( &x, rsrc, AMGX_mode_dDDI ), AMGX_OK); int num_setup_iters = 2; for (int i_setup = 0; i_setup < num_setup_iters; i_setup++) { #ifdef DEBUG std::cout << "outer iteration #" << i_setup << std::endl; #endif // Upload the new matrix and call setup if (i_setup == 0) { if (dia_values.size() != 0) { UNITTEST_ASSERT_EQUAL(AMGX_matrix_upload_all( matrix, num_rows, num_nz, bsize_x, bsize_y, &row_offsets[0], &col_indices[0], &off_dia_values[0], &dia_values[0] ), AMGX_OK); } else { UNITTEST_ASSERT_EQUAL(AMGX_matrix_upload_all( matrix, num_rows, num_nz, bsize_x, bsize_y, &row_offsets[0], &col_indices[0], &off_dia_values[0], NULL ), AMGX_OK); } UNITTEST_ASSERT_EQUAL(AMGX_solver_setup( solver, matrix ), AMGX_OK); } else { // Perturb the matrix //for (int i=0;i<num_nz;i++) // for (int j=0;j<bsize;j++) // off_dia_values[i*bsize+j] += .001*i_setup; // perturb the diagonal if (hasDiag) { for (int i = 0; i < num_rows; i++) { for (int j = 0; j < bsize; j++) { dia_values[i * bsize + j] += .001 * i_setup; } } } if (dia_values.size() != 0) { UNITTEST_ASSERT_EQUAL(AMGX_matrix_replace_coefficients( matrix, num_rows, num_nz, &off_dia_values[0], &dia_values[0] ), AMGX_OK); } else { UNITTEST_ASSERT_EQUAL(AMGX_matrix_replace_coefficients( matrix, num_rows, num_nz, &off_dia_values[0], NULL ), AMGX_OK); } if (test_case.use_pre_setup) { UNITTEST_ASSERT_EQUAL(AMGX_solver_resetup( solver, matrix ), AMGX_OK); } else { UNITTEST_ASSERT_EQUAL(AMGX_solver_setup( solver, matrix ), AMGX_OK); } } UNITTEST_ASSERT_EQUAL(AMGX_vector_upload( b, num_rows, bsize_y, &b_vec[0] ), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_vector_upload( x, num_rows, bsize_x, &x_vec[0] ), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_solver_solve( solver, b, x ), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_vector_download( x, &x_vec[0] ), AMGX_OK); } #ifdef DEBUGX std::cout << "final x_vec" << std::endl; for (int i = 0; i < x_vec.size(); i++) { std::cout << i << " " << x_vec[i] << std::endl; } #endif UNITTEST_ASSERT_EQUAL(AMGX_solver_destroy( solver ), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_matrix_destroy( matrix ), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_vector_destroy( b ), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_vector_destroy( x ), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_config_destroy( cfg ), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_config_destroy( rsrc_cfg ), AMGX_OK); UNITTEST_ASSERT_EQUAL(AMGX_resources_destroy( rsrc ), AMGX_OK); return x_vec; } void run() { SignalHandler::hook(); AMGX_finalize_plugins(); AMGX_finalize(); UnitTest::amgx_intialized = false; std::vector<TestCase> test_cases; TestCase temp_case; std::vector<std::string> test_files; //test_files.push_back("Internal/energymin/input_matrix/aniso_matrix_20x20_eps_1_theta_0_79.mtx"); this->generateAllInputFilenames(test_files); //test_files.push_back("Internal/poisson/poisson27x16x16x16.mtx"); //test_files.push_back("Internal/poisson/poisson27x50x50x50.mtx"); std::string base_string, base_string_em, base_string_cl; base_string = "config_version=2,"; base_string += "solver(main_solver)=AMG,"; base_string_em = base_string + "main_solver:algorithm=ENERGYMIN,"; base_string_cl = base_string + "main_solver:algorithm=CLASSICAL,"; //base_string_em += "main_solver:algorithm=CLASSICAL,"; //base_string_em += "main_solver:algorithm=AGGREGATION,"; this->createConfigString(base_string_em); this->createConfigString(base_string_cl); temp_case.insert_diagonal = true; for (int i = 0; i < test_files.size(); i++) { temp_case.config_string = base_string_cl; temp_case.use_pre_setup = false; test_cases.push_back(temp_case); test_cases.back().file_name = this->get_A_input_Dir() + test_files[i] + this->get_ext_str(); temp_case.config_string = base_string_em; temp_case.use_pre_setup = false; test_cases.push_back(temp_case); test_cases.back().file_name = this->get_A_input_Dir() + test_files[i] + this->get_ext_str(); } bool generate_rhs = 1; std::vector<double> x_ref; std::vector<double> x; for (int i = 0; i < test_cases.size(); i++) { AMGX_initialize(); AMGX_initialize_plugins(); if (i % 2 == 0) { x_ref.clear(); x_ref = test_main(test_cases[i], generate_rhs); } else { x.clear(); x = test_main(test_cases[i], generate_rhs); //x_ref.resize(x.size(), 0); std::stringstream fail_msg; fail_msg << "Different result for test_case, " << std::endl; fail_msg << " config string = " << test_cases[i].config_string << std::endl;; fail_msg << " use pre_setup = " << test_cases[i].use_pre_setup << std::endl; this->PrintOnFail(fail_msg.str().c_str()); for (int i = 0; i < x.size(); i++) { UNITTEST_ASSERT_EQUAL_TOL(x[i], x_ref[i], 1.0e-2); //1e-8); //std::cout << "\n x["<<i<<"]=" << x[i] << " x_ref["<<i<<"]=" << x_ref[i]; } } AMGX_finalize_plugins(); AMGX_finalize(); } AMGX_initialize(); AMGX_initialize_plugins(); UnitTest::amgx_intialized = true; } DECLARE_UNITTEST_END(EnergyminAlgorithmTest); // if you want to be able run this test for all available configs you can write this: //#define AMGX_CASE_LINE(CASE) TemplateTest <TemplateMode<CASE>::Type> TemplateTest_##CASE; // AMGX_FORALL_BUILDS(AMGX_CASE_LINE) //#undef AMGX_CASE_LINE // or run for all device configs //#define AMGX_CASE_LINE(CASE) TemplateTest <TemplateMode<CASE>::Type> TemplateTest_##CASE; // AMGX_FORALL_BUILDS_DEVICE(AMGX_CASE_LINE) //#undef AMGX_CASE_LINE EnergyminAlgorithmTest <TemplateMode<AMGX_mode_dDDI>::Type> EnergyminAlgorithmTest_dDDI; // or you can specify several desired configs //TemplateTest <TemplateMode<AMGX_mode_hDFI>::Type> TemplateTest_hDFI; //TemplateTest <TemplateMode<AMGX_mode_dDFI>::Type> TemplateTest_dDFI; }
the_stack
#include <map> #include <set> #include <glog/logging.h> class ConstructGradient { public: template <typename FloatT> static typename TransformStorage<FloatT>::GradientType* construct( const typename Storage<FloatT>::Gradients& gradients, const TransformStorage<FloatT>& param, const ParamIdentifier param_id) { return gradients.get_transform_gradient(param_id); } template <typename FloatT, typename IdxType> static typename RepresentationsStorage<FloatT, IdxType>::GradientType* construct( const typename Storage<FloatT>::Gradients& gradients, const RepresentationsStorage<FloatT, IdxType>& param, const ParamIdentifier param_id) { return gradients.get_representations_gradient(param_id); } }; char const* ParamName[] { "word_representations", "word_entity_mapping", "entity_representations", }; template <typename FloatT, typename IdxType> Representations<FloatT, IdxType>::Representations( const ParamIdentifier id, const size_t num_objects, const size_t size, const UpdateMethodConf& update_method, Streams* const streams) : Parameters<FloatT>(id), RepresentationsStorage<FloatT, IdxType>(num_objects, size, streams), updater_(nullptr) { if (update_method.type() == SGD) { updater_.reset( new SGDRepresentationsGradientUpdater<FloatT, IdxType>()); } else if (update_method.type() == ADAGRAD) { updater_.reset( new AdagradRepresentationsGradientUpdater<FloatT, IdxType>( num_objects, streams)); } else if (update_method.type() == ADAM) { updater_.reset( new AdamRepresentationsGradientUpdater<FloatT, IdxType>( num_objects, size, update_method.adam_conf(), /* dense */ streams)); } CHECK(updater_ != nullptr); } template <typename FloatT, typename IdxType> Representations<FloatT, IdxType>::~Representations() {} template <typename FloatT, typename IdxType> void Representations<FloatT, IdxType>::initialize(RNG* const rng) { PROFILE_FUNCTION(); init_matrix_glorot(reprs_.getStream(), &reprs_, rng); Parameters<FloatT>::initialize(rng); } template <typename FloatT, typename IdxType> __global__ void average_repr_kernel(const size_t window_size, const FloatT* const repr, const IdxType* const indices, const FloatT* const indices_weights, FloatT* const avg_repr) { const IdxType indices_idx = blockIdx.x * window_size; FloatT agg = 0.0; for (IdxType w = 0; w < window_size; ++w) { const IdxType repr_idx = indices[indices_idx + w]; const FloatT repr_weight = indices_weights == nullptr ? 1.0 : indices_weights[indices_idx + w]; agg += repr_weight * repr[blockDim.x * repr_idx + threadIdx.x]; } avg_repr[blockIdx.x * blockDim.x + threadIdx.x] = agg / window_size; } template <typename FloatT, typename IdxType> device_matrix<FloatT>* Representations<FloatT, IdxType>::get_representations( const cudaStream_t stream, const device_matrix<IdxType>& indices) const { PROFILE_FUNCTION_WITH_STREAM(stream); const size_t num_requested = indices.size(); DCHECK_GT(num_requested, 0); device_matrix<FloatT>* const requested_reprs = new device_matrix<FloatT>( size(), num_requested, stream); LAUNCH_KERNEL( average_repr_kernel<<<num_requested, /* num_blocks */ size(), /* threads_per_block */ 0, stream>>>( static_cast<size_t>(1) /* window_size */, reprs_.getData(), /* repr_input */ indices.getData(), /* idx_input */ (FloatT*) nullptr, /* idx_input_weights */ requested_reprs->getData() /* output */)); return requested_reprs; } template <typename FloatT, typename IdxType> device_matrix<FloatT>* Representations<FloatT, IdxType>::get_representation(const IdxType idx) const { PROFILE_FUNCTION(); const cudaStream_t stream = DefaultStream::get()->next(); std::unique_ptr<device_matrix<FloatT>> repr( new device_matrix<FloatT>(size(), 1, stream)); thrust::copy(reprs_.begin(idx), reprs_.begin(idx + 1), repr->begin()); return repr.release(); } template <typename FloatT, typename IdxType> device_matrix<FloatT>* Representations<FloatT, IdxType>::get_average_representations( const cudaStream_t stream, const device_matrix<IdxType>& indices, const size_t window_size, const device_matrix<FloatT>* const indices_weights) const { PROFILE_FUNCTION_WITH_STREAM(stream); const int32 num_requested = indices.size(); const int32 num_average_requested = num_requested / window_size; if (indices_weights != nullptr) { CHECK_EQ(num_requested, indices_weights->size()); } DCHECK_EQ(num_requested % window_size, 0); DCHECK_GT(num_average_requested, 0); device_matrix<FloatT>* avg_requested_reps = new device_matrix<FloatT>(size(), num_average_requested, stream); LAUNCH_KERNEL( average_repr_kernel<<<num_average_requested, /* num_blocks */ size(), /* threads_per_block */ 0, stream>>>( window_size, reprs_.getData(), /* repr_input */ indices.getData(), /* idx_input */ indices_weights != nullptr ? indices_weights->getData() : (FloatT*) nullptr, /* idx_input_weights */ avg_requested_reps->getData() /* output */)); return avg_requested_reps; } template <typename FloatT, typename IdxType> device_matrix<FloatT>* Representations<FloatT, IdxType>::compute_similarity( const device_matrix<FloatT>& first, const device_matrix<FloatT>& second) const { PROFILE_FUNCTION(); CHECK_DIMENSIONS_EQUAL(first, second); const cudaStream_t stream = merge_streams( first.getStream(), second.getStream()); std::unique_ptr<device_matrix<FloatT>> multiplied_reprs( hadamard_product(stream, first, second)); device_matrix<FloatT>* const similarities = new device_matrix<FloatT>( 1, /* num_rows */ multiplied_reprs->getCols(), multiplied_reprs->getStream()); reduce_axis( similarities->getStream(), FIRST_AXIS, *multiplied_reprs, similarities); return similarities; } template <typename FloatT, typename IdxType> std::vector<std::vector<FloatT>> Representations<FloatT, IdxType>::compute_similarity( const device_matrix<FloatT>& query_input_vectors, const std::vector<IdxType>& indices) const { PROFILE_FUNCTION(); CHECK_EQ(query_input_vectors.getRows(), size()); const size_t num_query_vectors = query_input_vectors.getCols(); const size_t num_candidate_representations = indices.size(); std::unique_ptr<device_matrix<IdxType>> d_indices( device_matrix<IdxType>::create_column(query_input_vectors.getStream(), indices)); // get_representations ignores dimensionality of d_indices. std::unique_ptr<device_matrix<FloatT>> candidate_representations( get_representations(reprs_.getStream(), *d_indices)); // Broadcast the candidate representation vectors, such that [e1 e2 e3] // becomes [e1 e2 e3 e1 e2 e3] if num_query_vectors == 2. std::unique_ptr<device_matrix<FloatT>> repeated_candidate_representations( repmat(candidate_representations->getStream(), *candidate_representations, num_query_vectors /* num_repeats */)); // Holds the repeated query vectors and afterwards the multiplied representations. // // Broadcasts the query vectors such that [q1 q2 q3] becomes [q1 q1 q2 q2 q3 q3] if // if num_candidate_representations == 2. std::unique_ptr<device_matrix<FloatT>> broadcasted_query_vectors( broadcast_columns(query_input_vectors.getStream(), query_input_vectors, num_candidate_representations)); CHECK_DIMENSIONS_EQUAL(*repeated_candidate_representations, *broadcasted_query_vectors); std::unique_ptr<device_matrix<FloatT>> d_similarities( compute_similarity(*repeated_candidate_representations, *broadcasted_query_vectors)); const FloatT* const flattened_similarities = get_array( d_similarities->getStream(), *d_similarities); std::vector<std::vector<FloatT>> similarities; for (size_t i = 0; i < num_candidate_representations * num_query_vectors; i += num_candidate_representations) { similarities.push_back(std::vector<FloatT>( flattened_similarities + i, flattened_similarities + i + num_candidate_representations)); } delete [] flattened_similarities; return similarities; } template <typename FloatT, typename IdxType> FloatT Representations<FloatT, IdxType>::compute_similarity(const IdxType first, const IdxType second) const { CHECK_LT(first, num_objects()); CHECK_LT(second, num_objects()); std::unique_ptr<device_matrix<IdxType>> first_indices( device_matrix<IdxType>::create_column(reprs_.getStream(), {first})); std::unique_ptr<device_matrix<IdxType>> second_indices( device_matrix<IdxType>::create_column(reprs_.getStream(), {second})); // get_representations ignores dimensionality of d_indices. std::unique_ptr<device_matrix<FloatT>> first_representations( get_representations(reprs_.getStream(), *first_indices)); inplace_l2_normalize_columns(first_representations.get()); std::unique_ptr<device_matrix<FloatT>> second_representations( get_representations(reprs_.getStream(), *second_indices)); inplace_l2_normalize_columns(second_representations.get()); std::unique_ptr<device_matrix<FloatT>> similarities( compute_similarity(*first_representations, *second_representations)); const FloatT* const flattened_similarities = get_array( similarities->getStream(), *similarities); const FloatT similarity = flattened_similarities[0]; delete [] flattened_similarities; return similarity; } template <typename FloatT, typename IdxType> void Representations<FloatT, IdxType>::update( const typename Storage<FloatT>::Gradients& gradients, const FloatT learning_rate, const FloatT scaled_regularization_lambda, Streams* const streams) { typename std::unique_ptr<typename RepresentationsStorage<FloatT, IdxType>::GradientType> gradient_desc( ConstructGradient::construct(gradients, *this, this->id_)); // No gradient. if (gradient_desc.get() == nullptr) { return; } updater_->update(this, gradient_desc.get(), learning_rate, scaled_regularization_lambda, streams); } template <typename FloatT, typename IdxType> FloatT Representations<FloatT, IdxType>::get_parameter_gradient( const typename Storage<FloatT>::Gradients& gradients, const size_t idx) const { std::unique_ptr<const typename RepresentationsStorage<FloatT, IdxType>::GradientType> gradient_desc( ConstructGradient::construct(gradients, *this, this->id_)); if (gradient_desc == nullptr) { return 0.0; } else { return RepresentationsStorage<FloatT, IdxType>::get_parameter_gradient( *gradient_desc, idx); } } template <typename FloatT> Transform<FloatT>::Transform( const ParamIdentifier id, const lse::ModelDesc::TransformDesc& desc, const size_t word_repr_size, const size_t entity_repr_size, const UpdateMethodConf& update_method, Streams* const streams) : Parameters<FloatT>(id), TransformStorage<FloatT>(word_repr_size, entity_repr_size, streams), desc_(desc), updater_(nullptr) { if (update_method.type() == SGD) { updater_.reset( new SGDTransformGradientUpdater<FloatT>()); } else if (update_method.type() == ADAGRAD) { updater_.reset( new AdagradTransformGradientUpdater<FloatT>( source_repr_size(), target_repr_size(), streams)); } else if (update_method.type() == ADAM) { updater_.reset( new AdamTransformGradientUpdater<FloatT>( source_repr_size(), target_repr_size(), streams)); } CHECK(updater_ != nullptr); } template <typename FloatT> void Transform<FloatT>::initialize(RNG* const rng) { PROFILE_FUNCTION(); // Randomly initialize word-to-entity mapping. init_matrix_glorot(transform_.getStream(), &transform_, rng); // Set bias to null. bias_.fillwith(bias_.getStream(), 0.0); Parameters<FloatT>::initialize(rng); } template <typename FloatT> Transform<FloatT>::~Transform() {} template <typename FloatT> device_matrix<FloatT>* Transform<FloatT>::transform( const cudaStream_t stream, const device_matrix<FloatT>& word_repr, BatchNormalization<FloatT>* const batch_normalization) const { PROFILE_FUNCTION_WITH_STREAM(stream); DCHECK_EQ(word_repr.getRows(), source_repr_size()); DCHECK_GE(word_repr.getCols(), 1); CHECK_MATRIX(word_repr); const size_t num_instances = word_repr.getCols(); CHECK_MATRIX(bias_); // Variable 'res' will hold the result, as well as the bias vector. device_matrix<FloatT>* transformed = nullptr; if (batch_normalization != nullptr) { transformed = new device_matrix<FloatT>( target_repr_size(), num_instances, merge_streams(stream, bias_.getStream())); } else { transformed = broadcast_columns( merge_streams(stream, bias_.getStream()), bias_, /* src */ num_instances /* num_repeats */); } CHECK_DIMENSIONS((*transformed), target_repr_size(), num_instances); CHECK_MATRIX(*transformed); const cudaStream_t params_steam = merge_streams( stream, transform_.getStream()); // transform_ is entity_repr_size by word_repr_size // word_repr is word_repr_size by num_words matrix_mult(stream, transform_, CUBLAS_OP_N, word_repr, CUBLAS_OP_N, transformed, /* dst */ (batch_normalization == nullptr) /* dst_contains_bias */); CHECK_MATRIX(*transformed); if (batch_normalization != nullptr) { batch_normalization->forward( *transformed, bias_, transformed); } switch (desc_.nonlinearity()) { case lse::ModelDesc::TransformDesc::TANH: apply_elemwise<func::tanh<FloatT>>( thrust::cuda::par.on(stream), transformed); break; case lse::ModelDesc::TransformDesc::HARD_TANH: apply_elemwise<func::clip<FloatT>>( thrust::cuda::par.on(stream), transformed, func::clip<FloatT>(-1.0, 1.0)); break; default: LOG(FATAL) << "nonlinearity " << desc_.nonlinearity() << " not implemented."; }; CHECK_MATRIX(*transformed); return transformed; } template <typename FloatT> void Transform<FloatT>::backward( const cudaStream_t stream, const typename Storage<FloatT>::ForwardResult& result, const device_matrix<FloatT>& broadcasted_input, const device_matrix<FloatT>& output, device_matrix<FloatT>* const grad_output, Gradients<FloatT>* const gradients) const { std::unique_ptr<typename TransformStorage<FloatT>::GradientType> gradient_desc_ptr( ConstructGradient::construct(*gradients, *this, this->id_)); CHECK_NOTNULL(gradient_desc_ptr.get()); typename TransformStorage<FloatT>::GradientType gradient_desc = *gradient_desc_ptr; CHECK_DIMENSIONS_EQUAL(*grad_output, output); device_matrix<FloatT>* const grad_transform_matrix = &std::get<0>(gradient_desc); device_matrix<FloatT>* const grad_bias = &std::get<1>(gradient_desc); // d cost / d (Wx + b) switch (desc_.nonlinearity()) { case lse::ModelDesc::TransformDesc::TANH: hadamard_product( thrust::cuda::par.on(stream), output, /* first operand */ grad_output, /* second operand and destination */ func::tanh_to_sech2<FloatT>() /* operation for first operand */); break; case lse::ModelDesc::TransformDesc::HARD_TANH: hadamard_product( thrust::cuda::par.on(stream), output, /* first operand */ grad_output, /* second operand and destination */ func::clip_to_clip_deriv<FloatT>(-1.0, 1.0) /* operation for first operand */); break; }; // d cost / d bias; reduce_axis does not expect nulled output. MAKE_MATRIX_NULL(*grad_bias); // TODO(cvangysel): get rid of this horrible construct. const TextEntity::ForwardResult< FloatT, typename Storage<FloatT>::ForwardResult::WordIdxType, typename Storage<FloatT>::ForwardResult::EntityIdxType>* textentity_result = dynamic_cast< const TextEntity::ForwardResult< FloatT, typename Storage<FloatT>::ForwardResult::WordIdxType, typename Storage<FloatT>::ForwardResult::EntityIdxType>*>(&result); CHECK_NOTNULL(textentity_result); if (textentity_result->batch_normalization_ == nullptr) { reduce_axis( stream, SECOND_AXIS, *grad_output, grad_bias); } else { textentity_result->batch_normalization_->backward( *grad_output, bias_, grad_output, grad_bias); } // d cost / d transform_matrix // // TODO(cvangysel): figure out whether we actually need the copy here?! MAKE_MATRIX_NULL(*grad_transform_matrix); // TODO(cvangysel): figure out whether we need this. matrix_mult(stream, *grad_output, CUBLAS_OP_N, broadcasted_input, CUBLAS_OP_T, grad_transform_matrix); CHECK_MATRIX(*grad_transform_matrix); CHECK_MATRIX(*grad_bias); } template <typename FloatT> void Transform<FloatT>::update( const typename Storage<FloatT>::Gradients& gradients, const FloatT learning_rate, const FloatT scaled_regularization_lambda, Streams* const streams) { std::unique_ptr<typename TransformStorage<FloatT>::GradientType> gradient_desc( ConstructGradient::construct(gradients, *this, this->id_)); // No gradient. if (gradient_desc == nullptr) { return; } updater_->update(this, gradient_desc.get(), learning_rate, scaled_regularization_lambda, streams); } template <typename FloatT> FloatT Transform<FloatT>::get_parameter_gradient( const typename Storage<FloatT>::Gradients& gradients, const size_t idx) const { std::unique_ptr<const typename TransformStorage<FloatT>::GradientType> gradient_desc( ConstructGradient::construct(gradients, *this, this->id_)); if (gradient_desc.get() == nullptr) { return 0.0; } else { return TransformStorage<FloatT>::get_parameter_gradient(*gradient_desc, idx); } } // Explicit instantiations. template class Parameters<FLOATING_POINT_TYPE>; template class Representations<FLOATING_POINT_TYPE, int32>; template class Transform<FLOATING_POINT_TYPE>;
the_stack
#include <thrust/binary_search.h> #include <thrust/device_vector.h> #include <thrust/gather.h> #include <thrust/remove.h> #include <thrust/sort.h> #include <thrust/unique.h> #include "nvblox/core/accessors.h" #include "nvblox/core/common_names.h" #include "nvblox/integrators/integrators_common.h" #include "nvblox/mesh/impl/marching_cubes_table.h" #include "nvblox/mesh/marching_cubes.h" #include "nvblox/mesh/mesh_integrator.h" #include "nvblox/utils/timing.h" namespace nvblox { MeshIntegrator::~MeshIntegrator() { if (cuda_stream_ != nullptr) { cudaStreamDestroy(cuda_stream_); } } bool MeshIntegrator::integrateBlocksGPU( const TsdfLayer& distance_layer, const std::vector<Index3D>& block_indices, BlockLayer<MeshBlock>* mesh_layer) { timing::Timer mesh_timer("mesh/gpu/integrate"); CHECK_NOTNULL(mesh_layer); CHECK_NEAR(distance_layer.block_size(), mesh_layer->block_size(), 1e-4); if (block_indices.empty()) { return true; } // Initialize the stream if not done yet. if (cuda_stream_ == nullptr) { checkCudaErrors(cudaStreamCreate(&cuda_stream_)); } // Figure out which of these actually contain something worth meshing. float block_size = distance_layer.block_size(); float voxel_size = distance_layer.voxel_size(); // Clear all blocks if they exist. for (const Index3D& block_index : block_indices) { MeshBlock::Ptr mesh_block = mesh_layer->getBlockAtIndex(block_index); if (mesh_block) { mesh_block->clear(); } } // First create a list of meshable blocks. std::vector<Index3D> meshable_blocks; timing::Timer meshable_timer("mesh/gpu/get_meshable"); getMeshableBlocksGPU(distance_layer, block_indices, 5 * voxel_size, &meshable_blocks); meshable_timer.Stop(); // Then get all the candidates and mesh each block. timing::Timer mesh_blocks_timer("mesh/gpu/mesh_blocks"); meshBlocksGPU(distance_layer, meshable_blocks, mesh_layer); // TODO: optionally weld here as well. mesh_blocks_timer.Stop(); return true; } // Kernels // Takes in a vector of blocks, and outputs an integer true if that block is // meshable. // Block size MUST be voxels_per_side x voxels_per_side x voxel_per_size. // Grid size can be anything. __global__ void isBlockMeshableKernel(int num_blocks, const VoxelBlock<TsdfVoxel>** blocks, float cutoff_distance, float min_weight, bool* meshable) { dim3 voxel_index = threadIdx; // This for loop allows us to have fewer threadblocks than there are // blocks in this computation. We assume the threadblock size is constant // though to make our lives easier. for (int block_index = blockIdx.x; block_index < num_blocks; block_index += gridDim.x) { // Get the correct voxel for this index. const TsdfVoxel& voxel = blocks[block_index] ->voxels[voxel_index.z][voxel_index.y][voxel_index.x]; if (fabs(voxel.distance) <= cutoff_distance && voxel.weight >= min_weight) { meshable[block_index] = true; } } } // Takes in a set of blocks arranged in neighbor sets and their relative // positions, then finds vertex candidates, and finally creates the output // meshes for them. // Block size MUST be voxels_per_side x voxels_per_side x voxel_per_size. // Grid size can be anything. __global__ void meshBlocksCalculateTableIndicesKernel( int num_blocks, const VoxelBlock<TsdfVoxel>** blocks, const Vector3f* block_positions, float voxel_size, float min_weight, marching_cubes::PerVoxelMarchingCubesResults* marching_cubes_results, int* mesh_block_sizes) { constexpr int kVoxelsPerSide = VoxelBlock<TsdfVoxel>::kVoxelsPerSide; constexpr int kVoxelsPerBlock = kVoxelsPerSide * kVoxelsPerSide * kVoxelsPerSide; constexpr int kCubeNeighbors = 8; const dim3 voxel_index = dim3(threadIdx.z, threadIdx.y, threadIdx.x); const int linear_thread_idx = threadIdx.x + kVoxelsPerSide * (threadIdx.y + kVoxelsPerSide * threadIdx.z); // Preallocate a half voxel size. const Vector3f half_voxel(0.5f, 0.5f, 0.5f); marching_cubes::PerVoxelMarchingCubesResults marching_cubes_results_local; // This for loop allows us to have fewer threadblocks than there are // blocks in this computation. We assume the threadblock size is constant // though to make our lives easier. for (int block_index = blockIdx.x; block_index < num_blocks; block_index += gridDim.x) { // Initialize the calculated output size for this block. __shared__ int mesh_block_size; if (linear_thread_idx == 0) { mesh_block_size = 0; } __syncthreads(); // Getting the block pointer is complicated now so let's just get it. const VoxelBlock<TsdfVoxel>* block = blocks[block_index * kCubeNeighbors]; // Get the linear index of the this voxel in this block const int vertex_neighbor_idx = block_index * kVoxelsPerBlock + linear_thread_idx; // Check all 8 neighbors. bool skip_voxel = false; for (unsigned int i = 0; i < 8; ++i) { Index3D corner_index( voxel_index.x + marching_cubes::kCornerIndexOffsets[i][0], voxel_index.y + marching_cubes::kCornerIndexOffsets[i][1], voxel_index.z + marching_cubes::kCornerIndexOffsets[i][2]); Index3D block_offset(0, 0, 0); bool search_neighbor = false; // Are we in bounds? If not, have to get a neighbor. // The neighbor should correspond to the index in neighbor blocks. for (int j = 0; j < 3; j++) { if (corner_index[j] >= kVoxelsPerSide) { // Here the index is too much. corner_index(j) -= kVoxelsPerSide; block_offset(j) = 1; search_neighbor = true; } } const TsdfVoxel* voxel = nullptr; // Don't look for neighbors for now. if (search_neighbor) { int neighbor_index = marching_cubes::neighborIndexFromDirection(block_offset); const VoxelBlock<TsdfVoxel>* neighbor_block = blocks[block_index * kCubeNeighbors + neighbor_index]; if (neighbor_block == nullptr) { skip_voxel = true; break; } voxel = &neighbor_block ->voxels[corner_index.x()][corner_index.y()][corner_index.z()]; } else { voxel = &block ->voxels[corner_index.x()][corner_index.y()][corner_index.z()]; } // If any of the neighbors are not observed, this can't be a mesh // triangle. if (voxel->weight < min_weight) { skip_voxel = true; break; } // Calculate the position of this voxel. marching_cubes_results_local.vertex_sdf[i] = voxel->distance; marching_cubes_results_local.vertex_coords[i] = block_positions[block_index] + voxel_size * (corner_index.cast<float>() + half_voxel + (kVoxelsPerSide * block_offset).cast<float>()); } if (!skip_voxel) { // If we've made it this far, this needs to be meshed. marching_cubes_results_local.contains_mesh = true; // Calculate the index into the magic marching cubes table marching_cubes_results_local.marching_cubes_table_index = marching_cubes::calculateVertexConfiguration( marching_cubes_results_local.vertex_sdf); // Mesh this cube. This will keep track of what index we're at within // the cube. marching_cubes::calculateOutputIndex(&marching_cubes_results_local, &mesh_block_size); // Write out to global memory marching_cubes_results[vertex_neighbor_idx] = marching_cubes_results_local; } // Writing the shared variable block size to global memory (per block) __syncthreads(); if (linear_thread_idx == 0) { mesh_block_sizes[block_index] = mesh_block_size; } } } __global__ void meshBlocksCalculateVerticesKernel( int num_blocks, const marching_cubes::PerVoxelMarchingCubesResults* marching_cubes_results, const int* mesh_block_sizes, CudaMeshBlock* mesh_blocks) { constexpr int kVoxelsPerSide = VoxelBlock<TsdfVoxel>::kVoxelsPerSide; const int linear_thread_idx = threadIdx.x + kVoxelsPerSide * (threadIdx.y + kVoxelsPerSide * threadIdx.z); // This for loop allows us to have fewer threadblocks than there are // blocks in this computation. We assume the threadblock size is constant // though to make our lives easier. for (int block_index = blockIdx.x; block_index < num_blocks; block_index += gridDim.x) { // If this block contains a mesh if (mesh_block_sizes[block_index] > 0) { // Get the linear index of the this voxel in this block constexpr int kVoxelsPerBlock = kVoxelsPerSide * kVoxelsPerSide * kVoxelsPerSide; const int vertex_neighbor_idx = block_index * kVoxelsPerBlock + linear_thread_idx; // If this voxel contains a mesh if (marching_cubes_results[vertex_neighbor_idx].contains_mesh) { // Convert the marching cube table index into vertex coordinates marching_cubes::calculateVertices( marching_cubes_results[vertex_neighbor_idx], &mesh_blocks[block_index]); } } } } // Wrappers void MeshIntegrator::getMeshableBlocksGPU( const TsdfLayer& distance_layer, const std::vector<Index3D>& block_indices, float cutoff_distance, std::vector<Index3D>* meshable_blocks) { CHECK_NOTNULL(meshable_blocks); if (block_indices.size() == 0) { return; } constexpr int kVoxelsPerSide = VoxelBlock<TsdfVoxel>::kVoxelsPerSide; // One block per block, 1 thread per pixel. :) // Dim block can be smaller, but dim_threads must be the same. int dim_block = block_indices.size(); dim3 dim_threads(kVoxelsPerSide, kVoxelsPerSide, kVoxelsPerSide); // Collect all the meshable blocks as raw pointers. // Get all the block pointers and positions. block_ptrs_host_.resize(block_indices.size()); for (size_t i = 0; i < block_indices.size(); i++) { block_ptrs_host_[i] = distance_layer.getBlockAtIndex(block_indices[i]).get(); } block_ptrs_device_ = block_ptrs_host_; // Allocate a device vector that holds the meshable result. meshable_device_.resize(block_indices.size()); meshable_device_.setZero(); checkCudaErrors(cudaPeekAtLastError()); isBlockMeshableKernel<<<dim_block, dim_threads, 0, cuda_stream_>>>( block_indices.size(), block_ptrs_device_.data(), cutoff_distance, min_weight_, meshable_device_.data()); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaStreamSynchronize(cuda_stream_)); meshable_host_ = meshable_device_; for (size_t i = 0; i < block_indices.size(); i++) { if (meshable_host_[i]) { meshable_blocks->push_back(block_indices[i]); } } } void MeshIntegrator::meshBlocksGPU(const TsdfLayer& distance_layer, const std::vector<Index3D>& block_indices, BlockLayer<MeshBlock>* mesh_layer) { if (block_indices.empty()) { return; } timing::Timer mesh_prep_timer("mesh/gpu/mesh_blocks/prep"); constexpr int kVoxelsPerSide = VoxelBlock<TsdfVoxel>::kVoxelsPerSide; constexpr int kCubeNeighbors = 8; // One block per block, 1 thread per voxel. :) // Dim block can be smaller, but dim_threads must be the same. int dim_block = block_indices.size(); dim3 dim_threads(kVoxelsPerSide, kVoxelsPerSide, kVoxelsPerSide); // Get the block and voxel size. const float block_size = distance_layer.block_size(); const float voxel_size = distance_layer.voxel_size(); // Get all the block pointers and positions. // Block pointers are actually a 2D array of also the neighbor block pointers // The neighbors CAN be null so they need to be checked. block_ptrs_host_.resize(block_indices.size() * kCubeNeighbors); block_positions_host_.resize(block_indices.size()); for (size_t i = 0; i < block_indices.size(); i++) { block_ptrs_host_[i * kCubeNeighbors] = distance_layer.getBlockAtIndex(block_indices[i]).get(); for (size_t j = 1; j < kCubeNeighbors; j++) { // Get the pointers to all the neighbors as well. block_ptrs_host_[i * kCubeNeighbors + j] = distance_layer .getBlockAtIndex(block_indices[i] + marching_cubes::directionFromNeighborIndex(j)) .get(); } block_positions_host_[i] = getPositionFromBlockIndex(block_size, block_indices[i]); } // Create an output mesh blocks vector.. mesh_blocks_host_.resize(block_indices.size()); block_ptrs_device_ = block_ptrs_host_; block_positions_device_ = block_positions_host_; // Allocate working space constexpr int kNumVoxelsPerBlock = kVoxelsPerSide * kVoxelsPerSide * kVoxelsPerSide; marching_cubes_results_device_.resize(block_indices.size() * kNumVoxelsPerBlock); marching_cubes_results_device_.setZero(); mesh_block_sizes_device_.resize(block_indices.size()); mesh_block_sizes_device_.setZero(); mesh_prep_timer.Stop(); // Run the first half of marching cubes and calculate: // - the per-vertex indexes into the magic triangle table // - the number of vertices in each mesh block. timing::Timer mesh_kernel_1_timer("mesh/gpu/mesh_blocks/kernel_table"); meshBlocksCalculateTableIndicesKernel<<<dim_block, dim_threads, 0, cuda_stream_>>>( block_indices.size(), block_ptrs_device_.data(), block_positions_device_.data(), voxel_size, min_weight_, marching_cubes_results_device_.data(), mesh_block_sizes_device_.data()); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaStreamSynchronize(cuda_stream_)); mesh_kernel_1_timer.Stop(); // Copy back the new mesh block sizes (so we can allocate space) timing::Timer mesh_copy_timer("mesh/gpu/mesh_blocks/copy_out"); mesh_block_sizes_host_ = mesh_block_sizes_device_; mesh_copy_timer.Stop(); // Allocate mesh blocks timing::Timer mesh_allocation_timer("mesh/gpu/mesh_blocks/block_allocation"); for (size_t i = 0; i < block_indices.size(); i++) { const int num_vertices = mesh_block_sizes_host_[i]; if (num_vertices > 0) { MeshBlock::Ptr output_block = mesh_layer->allocateBlockAtIndex(block_indices[i]); // Grow the vector with a growth factor and a minimum allocation to avoid // repeated reallocation if (num_vertices > output_block->capacity()) { constexpr int kMinimumMeshBlockTrianglesPerVoxel = 1; constexpr int kMinimumMeshBlockVertices = kNumVoxelsPerBlock * kMinimumMeshBlockTrianglesPerVoxel * 3; constexpr int kMeshBlockOverallocationFactor = 2; const int num_vertices_to_allocate = std::max(kMinimumMeshBlockVertices, num_vertices * kMeshBlockOverallocationFactor); output_block->reserveNumberOfVertices(num_vertices_to_allocate); } output_block->resizeToNumberOfVertices(num_vertices); mesh_blocks_host_[i] = CudaMeshBlock(output_block.get()); } } mesh_blocks_device_ = mesh_blocks_host_; mesh_allocation_timer.Stop(); // Run the second half of marching cubes // - Translating the magic table indices into triangle vertices and writing // them into the mesh layer. timing::Timer mesh_kernel_2_timer("mesh/gpu/mesh_blocks/kernel_vertices"); meshBlocksCalculateVerticesKernel<<<dim_block, dim_threads, 0, cuda_stream_>>>( block_indices.size(), marching_cubes_results_device_.data(), mesh_block_sizes_device_.data(), mesh_blocks_device_.data()); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaStreamSynchronize(cuda_stream_)); mesh_kernel_2_timer.Stop(); // Optional third stage: welding. if (weld_vertices_) { timing::Timer welding_timer("mesh/gpu/mesh_blocks/welding"); weldVertices(block_indices, mesh_layer); } } void MeshIntegrator::weldVertices(const std::vector<Index3D>& block_indices, BlockLayer<MeshBlock>* mesh_layer) { for (const Index3D& index : block_indices) { MeshBlock::Ptr mesh_block = mesh_layer->getBlockAtIndex(index); if (!mesh_block || mesh_block->size() <= 3) { continue; } // Store a copy of the input vertices. input_vertices_ = mesh_block->vertices; input_normals_ = mesh_block->normals; // sort vertices to bring duplicates together thrust::sort(thrust::device, mesh_block->vertices.begin(), mesh_block->vertices.end(), VectorCompare<Vector3f>()); // Find unique vertices and erase redundancies. The iterator will point to // the new last index. auto iterator = thrust::unique(thrust::device, mesh_block->vertices.begin(), mesh_block->vertices.end()); // Figure out the new size. size_t new_size = iterator - mesh_block->vertices.begin(); mesh_block->vertices.resize(new_size); mesh_block->normals.resize(new_size); // Find the indices of the original triangles. thrust::lower_bound(thrust::device, mesh_block->vertices.begin(), mesh_block->vertices.end(), input_vertices_.begin(), input_vertices_.end(), mesh_block->triangles.begin(), VectorCompare<Vector3f>()); // Reshuffle the normals to match. thrust::scatter(thrust::device, input_normals_.begin(), input_normals_.end(), mesh_block->triangles.begin(), mesh_block->normals.begin()); } } } // namespace nvblox
the_stack
#define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) __global__ void CropAndResizeKernel( const int nthreads, const float *image_ptr, const float *boxes_ptr, const int *box_ind_ptr, int num_boxes, int batch, int image_height, int image_width, int image_zdepth, int crop_height, int crop_width, int crop_zdepth, int depth, float extrapolation_value, float *crops_ptr) { CUDA_1D_KERNEL_LOOP(out_idx, nthreads) // nthreads = total_count! { // NHWC: out_idx = d + depth * (w + crop_width * (h + crop_height * b)) position in out grid!!! // NCHW: out_idx = w + crop_width * (h + crop_height * (d + depth * b)) NCYX yes seems like xy is exchanged! // NCHWZ: out_idx = z + crop_zdepth * (w + crop_width * (h + crop_height * (d + depth * b))) z == last. int idx = out_idx; const int z = idx % crop_zdepth; idx /= crop_zdepth; const int x = idx % crop_width; idx /= crop_width; const int y = idx % crop_height; idx /= crop_height; const int d = idx % depth; const int b = idx / depth; // batch const float y1 = boxes_ptr[b * 6]; // b = batch -> 0 // normalized coords!! const float x1 = boxes_ptr[b * 6 + 1]; const float y2 = boxes_ptr[b * 6 + 2]; const float x2 = boxes_ptr[b * 6 + 3]; const float z1 = boxes_ptr[b * 6 + 4]; const float z2 = boxes_ptr[b * 6 + 5]; const int b_in = box_ind_ptr[b]; // == 0 in my case. if (b_in < 0 || b_in >= batch) { continue; } // e.g. (0.4-0.3)*100 = 10 / 7 = 1.3 ratio proposal_size / crops_size. one cell in crops has size 1.3 in_pixel. const float height_scale = (crop_height > 1) ? (y2 - y1) * (image_height ) / (crop_height ) : 0; const float width_scale = (crop_width > 1) ? (x2 - x1) * (image_width ) / (crop_width ) : 0; const float zdepth_scale = (crop_zdepth > 1) ? (z2 - z1) * (image_zdepth ) / (crop_zdepth ) : 0; // e.g. 0.3*100 + 5 * 1.3 . Which floating coordinate is going into cell? // e.g. y: 30 (lower bound prop) + 7.5 (current crop position * scale) float tmp_in_y = (crop_height > 1) ? y1 * (image_height ) + y * height_scale + height_scale/2 - 0.5 : 0.5 * (y1 + y2) * (image_height); if (tmp_in_y > image_height - 1) { tmp_in_y = image_height - 1; } if (tmp_in_y < 0) { tmp_in_y = 0; } const float in_y = tmp_in_y; float tmp_in_x = (crop_width > 1) ? x1 * (image_width ) + x * width_scale + width_scale/2 - 0.5 : 0.5 * (x1 + x2) * (image_width ); if (tmp_in_x > image_width - 1) { tmp_in_x = image_width - 1; } if (tmp_in_x < 0) { tmp_in_x= 0; } const float in_x = tmp_in_x; float tmp_in_z = (crop_zdepth > 1) ? z1 * (image_zdepth ) + z * zdepth_scale + zdepth_scale/2 - 0.5 : 0.5 * (z1 + z2) * (image_zdepth); if (tmp_in_z > image_zdepth - 1) { tmp_in_z = image_zdepth - 1; } if (tmp_in_z < 0) { tmp_in_z= 0; } const float in_z = tmp_in_z; // this is just rounding of the floating coord of grid cell. The distances to nearest grid points are // memorized (lerp) to be used for bilinear interpolation later. const int top_y_index = floorf(in_y); const int bottom_y_index = ceilf(in_y); const float y_lerp = in_y - top_y_index; const int left_x_index = floorf(in_x); const int right_x_index = ceilf(in_x); const float x_lerp = in_x - left_x_index; // const int front_z_index = floorf(in_z); const int back_z_index = ceilf(in_z); const float z_lerp = in_z - front_z_index; // address of image + going to the right feature map. const float *pimage = image_ptr + (b_in * depth + d) * image_height * image_width * image_zdepth; // 1D address of corner points of in_coords to grid cell. // NCHWZ: out_idx = z + crop_zdepth * (w + crop_width * (h + crop_height * (d + depth * b))) z == last. const float top_left_front = pimage[front_z_index + image_zdepth * (left_x_index + image_width * top_y_index)]; const float top_right_front = pimage[front_z_index + image_zdepth * (right_x_index + image_width * top_y_index)]; const float bottom_left_front = pimage[front_z_index + image_zdepth * (left_x_index + image_width * bottom_y_index)]; const float bottom_right_front = pimage[front_z_index + image_zdepth * (right_x_index + image_width * bottom_y_index)]; const float top_left_back = pimage[back_z_index + image_zdepth * (left_x_index + image_width * top_y_index)]; const float top_right_back = pimage[back_z_index + image_zdepth * (right_x_index + image_width * top_y_index)]; const float bottom_left_back = pimage[back_z_index + image_zdepth * (left_x_index + image_width * bottom_y_index)]; const float bottom_right_back = pimage[back_z_index + image_zdepth * (right_x_index + image_width * bottom_y_index)]; // Bilinear Interpolation!! These are pixel values now! lerp is the interpolation distance! // No Maxpool, only one point is sampled! const float top_front = top_left_front + (top_right_front - top_left_front) * x_lerp; const float bottom_front = bottom_left_front + (bottom_right_front - bottom_left_front) * x_lerp; const float top_back = top_left_back + (top_right_back - top_left_back) * x_lerp; const float bottom_back = bottom_left_back + (bottom_right_back - bottom_left_back) * x_lerp; const float front = top_front + (bottom_front - top_front) * y_lerp; const float back = top_back + (bottom_back - top_back) * y_lerp; crops_ptr[out_idx] = front + (back - front) * z_lerp; // assign interpolated value to Grid cell! } } __global__ void CropAndResizeBackpropImageKernel( const int nthreads, const float *grads_ptr, const float *boxes_ptr, const int *box_ind_ptr, int num_boxes, int batch, int image_height, int image_width, int image_zdepth, int crop_height, int crop_width, int crop_zdepth, int depth, float *grads_image_ptr) { CUDA_1D_KERNEL_LOOP(out_idx, nthreads) { // NHWC: out_idx = d + depth * (w + crop_width * (h + crop_height * b)) // NCHW: out_idx = w + crop_width * (h + crop_height * (d + depth * b)) // NCHWZ: out_idx = z + crop_zdepth * (w + crop_width * (h + crop_height * (d + depth * b))) z == last. int idx = out_idx; const int z = idx % crop_zdepth; idx /= crop_zdepth; const int x = idx % crop_width; idx /= crop_width; const int y = idx % crop_height; idx /= crop_height; const int d = idx % depth; const int b = idx / depth; const float y1 = boxes_ptr[b * 6]; // b = batch -> 0 // normalized coords!! const float x1 = boxes_ptr[b * 6 + 1]; const float y2 = boxes_ptr[b * 6 + 2]; const float x2 = boxes_ptr[b * 6 + 3]; const float z1 = boxes_ptr[b * 6 + 4]; const float z2 = boxes_ptr[b * 6 + 5]; const int b_in = box_ind_ptr[b]; if (b_in < 0 || b_in >= batch) { continue; } const float height_scale = (crop_height > 1) ? (y2 - y1) * (image_height ) / (crop_height ) : 0; const float width_scale = (crop_width > 1) ? (x2 - x1) * (image_width ) / (crop_width ) : 0; const float zdepth_scale = (crop_zdepth > 1) ? (z2 - z1) * (image_zdepth ) / (crop_zdepth ) : 0; float tmp_in_y = (crop_height > 1) ? y1 * (image_height ) + y * height_scale + height_scale/2 - 0.5 : 0.5 * (y1 + y2) * (image_height); if (tmp_in_y > image_height - 1) { tmp_in_y = image_height - 1; } if (tmp_in_y < 0) { tmp_in_y = 0; } const float in_y = tmp_in_y; float tmp_in_x = (crop_width > 1) ? x1 * (image_width ) + x * width_scale + width_scale/2 - 0.5 : 0.5 * (x1 + x2) * (image_width ); if (tmp_in_x > image_width - 1) { tmp_in_x = image_width - 1; } if (tmp_in_x < 0) { tmp_in_x= 0; } const float in_x = tmp_in_x; float tmp_in_z = (crop_zdepth > 1) ? z1 * (image_zdepth ) + z * zdepth_scale + zdepth_scale/2 - 0.5 : 0.5 * (z1 + z2) * (image_zdepth); if (tmp_in_z > image_zdepth - 1) { tmp_in_z = image_zdepth - 1; } if (tmp_in_z < 0) { tmp_in_z= 0; } const float in_z = tmp_in_z; const int top_y_index = floorf(in_y); const int bottom_y_index = ceilf(in_y); const float y_lerp = in_y - top_y_index; const int left_x_index = floorf(in_x); const int right_x_index = ceilf(in_x); const float x_lerp = in_x - left_x_index; const int front_z_index = floorf(in_z); const int back_z_index = ceilf(in_z); const float z_lerp = in_z - front_z_index; float *pimage = grads_image_ptr + (b_in * depth + d) * image_height * image_width * image_zdepth; // top left front atomicAdd( pimage + front_z_index + image_zdepth * (left_x_index + image_width * top_y_index), (1 - x_lerp) * (1 - z_lerp) * (1 - y_lerp) * grads_ptr[out_idx] // THIS IS BACKWARD INTERPOL. ); // top left back atomicAdd( pimage + back_z_index + image_zdepth * (left_x_index + image_width * top_y_index), (1 - x_lerp) * (z_lerp) * (1 - y_lerp) * grads_ptr[out_idx] // THIS IS BACKWARD INTERPOL. ); // top right front atomicAdd( pimage + front_z_index + image_zdepth * (right_x_index + image_width * top_y_index), (x_lerp) * (1 - z_lerp) * (1 - y_lerp) * grads_ptr[out_idx] // THIS IS backward INTERPOL. ); // top right back atomicAdd( pimage + back_z_index + image_zdepth * (right_x_index + image_width * top_y_index), (x_lerp) * (z_lerp) * (1 - y_lerp) * grads_ptr[out_idx] // THIS IS backward INTERPOL. ); // bottom left front atomicAdd( pimage + front_z_index + image_zdepth * (left_x_index + image_width * bottom_y_index), (1 - x_lerp) * (1 - z_lerp) * (y_lerp) * grads_ptr[out_idx] // THIS IS backward INTERPOL. ); // bottom left back atomicAdd( pimage + back_z_index + image_zdepth * (left_x_index + image_width * bottom_y_index), (1 - x_lerp) * (z_lerp) * (y_lerp) * grads_ptr[out_idx] // THIS IS backward INTERPOL. ); // bottom right front atomicAdd( pimage + front_z_index + image_zdepth * (right_x_index + image_width * bottom_y_index), (x_lerp) * (1 - z_lerp) * (y_lerp) * grads_ptr[out_idx] // THIS IS backward INTERPOL. ); // bottom right back atomicAdd( pimage + back_z_index + image_zdepth * (right_x_index + image_width * bottom_y_index), (x_lerp) * (z_lerp) * (y_lerp) * grads_ptr[out_idx] // THIS IS backward INTERPOL. ); } } void CropAndResizeLaucher( const float *image_ptr, const float *boxes_ptr, const int *box_ind_ptr, int num_boxes, int batch, int image_height, int image_width, int image_zdepth, int crop_height, int crop_width, int crop_zdepth, int depth, float extrapolation_value, float *crops_ptr, cudaStream_t stream) { const int total_count = num_boxes * crop_height * crop_width * crop_zdepth * depth; const int thread_per_block = 1024; const int block_count = (total_count + thread_per_block - 1) / thread_per_block; cudaError_t err; if (total_count > 0) { CropAndResizeKernel<<<block_count, thread_per_block, 0, stream>>>( total_count, image_ptr, boxes_ptr, box_ind_ptr, num_boxes, batch, image_height, image_width, image_zdepth, crop_height, crop_width, crop_zdepth, depth, extrapolation_value, crops_ptr); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } } } void CropAndResizeBackpropImageLaucher( const float *grads_ptr, const float *boxes_ptr, const int *box_ind_ptr, int num_boxes, int batch, int image_height, int image_width, int image_zdepth, int crop_height, int crop_width, int crop_zdepth, int depth, float *grads_image_ptr, cudaStream_t stream) { const int total_count = num_boxes * crop_height * crop_width * crop_zdepth * depth; const int thread_per_block = 1024; const int block_count = (total_count + thread_per_block - 1) / thread_per_block; cudaError_t err; if (total_count > 0) { CropAndResizeBackpropImageKernel<<<block_count, thread_per_block, 0, stream>>>( total_count, grads_ptr, boxes_ptr, box_ind_ptr, num_boxes, batch, image_height, image_width, image_zdepth, crop_height, crop_width, crop_zdepth, depth, grads_image_ptr); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed in Roi Align : %s\n", cudaGetErrorString(err)); exit(-1); } } }
the_stack
#if ( MODEL == HYDRO ) /******************************************************** 1. Isothermal EoS (EOS_ISOTHERMAL) 2. This file is shared by both CPU and GPU GPU_EoS_Isothermal.cu -> CPU_EoS_Isothermal.cpp 3. Three steps are required to implement an EoS I. Set EoS auxiliary arrays II. Implement EoS conversion functions III. Set EoS initialization functions ********************************************************/ // ============================================= // I. Set EoS auxiliary arrays // ============================================= //------------------------------------------------------------------------------------------------------- // Function : EoS_SetAuxArray_Isothermal // Description : Set the auxiliary arrays AuxArray_Flt/Int[] // // AuxArray_Flt[0] = sound_speed^2 // AuxArray_Flt[1] = temperature in K // // Note : 1. Invoked by EoS_Init_Isothermal() // 2. AuxArray_Flt/Int[] have the size of EOS_NAUX_MAX defined in Macro.h (default = 20) // 3. Add "#ifndef __CUDACC__" since this routine is only useful on CPU // 4. Physical constants such as Const_amu/Const_kB should be set to unity when disabling OPT__UNIT // // Parameter : AuxArray_Flt/Int : Floating-point/Integer arrays to be filled up // // Return : AuxArray_Flt/Int[] //------------------------------------------------------------------------------------------------------- #ifndef __CUDACC__ void EoS_SetAuxArray_Isothermal( double AuxArray_Flt[], int AuxArray_Int[] ) { // Cs^2 = kB*T/m = P/rho AuxArray_Flt[0] = ( OPT__UNIT ) ? ( Const_kB*ISO_TEMP/UNIT_E ) / ( MOLECULAR_WEIGHT*Const_amu/UNIT_M ) : ISO_TEMP / MOLECULAR_WEIGHT; AuxArray_Flt[1] = ISO_TEMP; if ( MPI_Rank == 0 ) { Aux_Message( stdout, " Temperature = %13.7e K\n", ISO_TEMP ); Aux_Message( stdout, " Mean molecular weight = %13.7e\n", MOLECULAR_WEIGHT ); if ( OPT__UNIT ) Aux_Message( stdout, " Sound speed = %13.7e km/s\n", SQRT(AuxArray_Flt[0])*UNIT_V/Const_km ); else Aux_Message( stdout, " Sound speed = %13.7e\n", SQRT(AuxArray_Flt[0]) ); } # ifdef GAMER_DEBUG if ( Hydro_CheckNegative(AuxArray_Flt[0]) ) printf( "ERROR : invalid sound speed squared (%13.7e in code unit) in %s() !!\n", AuxArray_Flt[0], __FUNCTION__ ); # endif } // FUNCTION : EoS_SetAuxArray_Isothermal #endif // #ifndef __CUDACC__ // ============================================= // II. Implement EoS conversion functions // (1) EoS_DensEint2Pres_* // (2) EoS_DensPres2Eint_* // (3) EoS_DensPres2CSqr_* // (4) EoS_DensEint2Temp_* [OPTIONAL] // (5) EoS_DensTemp2Pres_* [OPTIONAL] // (6) EoS_General_* [OPTIONAL] // ============================================= //------------------------------------------------------------------------------------------------------- // Function : EoS_DensEint2Pres_Isothermal // Description : Convert gas mass density and internal energy density to gas pressure // // Note : 1. Internal energy density here is per unit volume instead of per unit mass // 2. See EoS_SetAuxArray_Isothermal() for the values stored in AuxArray_Flt/Int[] // // Parameter : Dens : Gas mass density // Eint : Gas internal energy density // Passive : Passive scalars (must not used here) // AuxArray_* : Auxiliary arrays (see the Note above) // Table : EoS tables // // Return : Gas pressure //------------------------------------------------------------------------------------------------------- GPU_DEVICE_NOINLINE static real EoS_DensEint2Pres_Isothermal( const real Dens, const real Eint, const real Passive[], const double AuxArray_Flt[], const int AuxArray_Int[], const real *const Table[EOS_NTABLE_MAX] ) { // check # ifdef GAMER_DEBUG if ( AuxArray_Flt == NULL ) printf( "ERROR : AuxArray_Flt == NULL in %s !!\n", __FUNCTION__ ); if ( Hydro_CheckNegative(Dens) ) printf( "ERROR : invalid input density (%14.7e) at file <%s>, line <%d>, function <%s>\n", Dens, __FILE__, __LINE__, __FUNCTION__ ); # endif const real Cs2 = AuxArray_Flt[0]; const real Pres = Cs2*Dens; return Pres; } // FUNCTION : EoS_DensEint2Pres_Isothermal //------------------------------------------------------------------------------------------------------- // Function : EoS_DensPres2Eint_Isothermal // Description : Convert gas mass density and pressure to gas internal energy density // // Note : 1. See EoS_DensEint2Pres_Isothermal() // // Parameter : Dens : Gas mass density // Pres : Gas pressure // Passive : Passive scalars (must not used here) // AuxArray_* : Auxiliary arrays (see the Note above) // Table : EoS tables // // Return : Gas internal energy density //------------------------------------------------------------------------------------------------------- GPU_DEVICE_NOINLINE static real EoS_DensPres2Eint_Isothermal( const real Dens, const real Pres, const real Passive[], const double AuxArray_Flt[], const int AuxArray_Int[], const real *const Table[EOS_NTABLE_MAX] ) { // check # ifdef GAMER_DEBUG if ( Hydro_CheckNegative(Pres) ) printf( "ERROR : invalid input pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n", Pres, __FILE__, __LINE__, __FUNCTION__ ); # endif const real Eint = (real)1.0e4*Pres; // in principle, it can be set rather arbitrarily since Eint should be useless anyway // --> but still better to have reasonably large Eint to avoid error messages about // Eint<0 during evolution return Eint; } // FUNCTION : EoS_DensPres2Eint_Isothermal //------------------------------------------------------------------------------------------------------- // Function : EoS_DensPres2CSqr_Isothermal // Description : Convert gas mass density and pressure to sound speed squared // // Note : 1. See EoS_DensEint2Pres_Isothermal() // // Parameter : Dens : Gas mass density // Pres : Gas pressure // Passive : Passive scalars (must not used here) // AuxArray_* : Auxiliary arrays (see the Note above) // Table : EoS tables // // Return : Sound speed squared //------------------------------------------------------------------------------------------------------- GPU_DEVICE_NOINLINE static real EoS_DensPres2CSqr_Isothermal( const real Dens, const real Pres, const real Passive[], const double AuxArray_Flt[], const int AuxArray_Int[], const real *const Table[EOS_NTABLE_MAX] ) { // check # ifdef GAMER_DEBUG if ( AuxArray_Flt == NULL ) printf( "ERROR : AuxArray_Flt == NULL in %s !!\n", __FUNCTION__ ); # endif const real Cs2 = AuxArray_Flt[0]; return Cs2; } // FUNCTION : EoS_DensPres2CSqr_Isothermal //------------------------------------------------------------------------------------------------------- // Function : EoS_DensEint2Temp_Isothermal // Description : Convert gas mass density and internal energy density to gas temperature // // Note : 1. Internal energy density here is per unit volume instead of per unit mass // 2. See EoS_SetAuxArray_Isothermal() for the values stored in AuxArray_Flt/Int[] // 3. Temperature is in kelvin // // Parameter : Dens : Gas mass density // Eint : Gas internal energy density // Passive : Passive scalars (must not used here) // AuxArray_* : Auxiliary arrays (see the Note above) // Table : EoS tables // // Return : Gas temperature in kelvin //------------------------------------------------------------------------------------------------------- GPU_DEVICE_NOINLINE static real EoS_DensEint2Temp_Isothermal( const real Dens, const real Eint, const real Passive[], const double AuxArray_Flt[], const int AuxArray_Int[], const real *const Table[EOS_NTABLE_MAX] ) { // check # ifdef GAMER_DEBUG if ( AuxArray_Flt == NULL ) printf( "ERROR : AuxArray_Flt == NULL in %s !!\n", __FUNCTION__ ); # endif const real Temp = AuxArray_Flt[1]; return Temp; } // FUNCTION : EoS_DensEint2Temp_Isothermal //------------------------------------------------------------------------------------------------------- // Function : EoS_DensTemp2Pres_Isothermal // Description : Convert gas mass density and temperature to gas pressure // // Note : 1. See EoS_SetAuxArray_Isothermal() for the values stored in AuxArray_Flt/Int[] // 2. Temperature is in kelvin // // Parameter : Dens : Gas mass density // Temp : Gas temperature in kelvin // Passive : Passive scalars (must not used here) // AuxArray_* : Auxiliary arrays (see the Note above) // Table : EoS tables // // Return : Gas pressure //------------------------------------------------------------------------------------------------------- GPU_DEVICE_NOINLINE static real EoS_DensTemp2Pres_Isothermal( const real Dens, const real Temp, const real Passive[], const double AuxArray_Flt[], const int AuxArray_Int[], const real *const Table[EOS_NTABLE_MAX] ) { // check # ifdef GAMER_DEBUG if ( AuxArray_Flt == NULL ) printf( "ERROR : AuxArray_Flt == NULL in %s !!\n", __FUNCTION__ ); if ( Hydro_CheckNegative(Dens) ) printf( "ERROR : invalid input density (%14.7e) at file <%s>, line <%d>, function <%s>\n", Dens, __FILE__, __LINE__, __FUNCTION__ ); # endif const real Cs2 = AuxArray_Flt[0]; const real Pres = Cs2*Dens; return Pres; } // FUNCTION : EoS_DensTemp2Pres_Isothermal //------------------------------------------------------------------------------------------------------- // Function : EoS_General_Isothermal // Description : General EoS converter: In_*[] -> Out[] // // Note : 1. See EoS_DensEint2Pres_Isothermal() // 2. In_*[] and Out[] must NOT overlap // 3. Useless for this EoS // // Parameter : Mode : To support multiple modes in this general converter // Out : Output array // In_* : Input array // AuxArray_* : Auxiliary arrays (see the Note above) // Table : EoS tables // // Return : Out[] //------------------------------------------------------------------------------------------------------- GPU_DEVICE_NOINLINE static void EoS_General_Isothermal( const int Mode, real Out[], const real In_Flt[], const int In_Int[], const double AuxArray_Flt[], const int AuxArray_Int[], const real *const Table[EOS_NTABLE_MAX] ) { // not used by this EoS } // FUNCTION : EoS_General_Isothermal // ============================================= // III. Set EoS initialization functions // ============================================= #ifdef __CUDACC__ # define FUNC_SPACE __device__ static #else # define FUNC_SPACE static #endif FUNC_SPACE EoS_DE2P_t EoS_DensEint2Pres_Ptr = EoS_DensEint2Pres_Isothermal; FUNC_SPACE EoS_DP2E_t EoS_DensPres2Eint_Ptr = EoS_DensPres2Eint_Isothermal; FUNC_SPACE EoS_DP2C_t EoS_DensPres2CSqr_Ptr = EoS_DensPres2CSqr_Isothermal; FUNC_SPACE EoS_DE2T_t EoS_DensEint2Temp_Ptr = EoS_DensEint2Temp_Isothermal; FUNC_SPACE EoS_DT2P_t EoS_DensTemp2Pres_Ptr = EoS_DensTemp2Pres_Isothermal; FUNC_SPACE EoS_GENE_t EoS_General_Ptr = EoS_General_Isothermal; //----------------------------------------------------------------------------------------- // Function : EoS_SetCPU/GPUFunc_Isothermal // Description : Return the function pointers of the CPU/GPU EoS routines // // Note : 1. Invoked by EoS_Init_Isothermal() // 2. Must obtain the CPU and GPU function pointers by **separate** routines // since CPU and GPU functions are compiled completely separately in GAMER // --> In other words, a unified routine like the following won't work // // EoS_SetFunc_Isothermal( CPU_FuncPtr, GPU_FuncPtr ); // // 3. Call-by-reference // // Parameter : EoS_DensEint2Pres_CPU/GPUPtr : CPU/GPU function pointers to be set // EoS_DensPres2Eint_CPU/GPUPtr : ... // EoS_DensPres2CSqr_CPU/GPUPtr : ... // EoS_DensEint2Temp_CPU/GPUPtr : ... // EoS_DensTemp2Pres_CPU/GPUPtr : ... // EoS_General_CPU/GPUPtr : ... // // Return : EoS_DensEint2Pres_CPU/GPUPtr, EoS_DensPres2Eint_CPU/GPUPtr, // EoS_DensPres2CSqr_CPU/GPUPtr, EoS_DensEint2Temp_CPU/GPUPtr, // EoS_DensTemp2Pres_CPU/GPUPtr, EoS_General_CPU/GPUPtr //----------------------------------------------------------------------------------------- #ifdef __CUDACC__ __host__ void EoS_SetGPUFunc_Isothermal( EoS_DE2P_t &EoS_DensEint2Pres_GPUPtr, EoS_DP2E_t &EoS_DensPres2Eint_GPUPtr, EoS_DP2C_t &EoS_DensPres2CSqr_GPUPtr, EoS_DE2T_t &EoS_DensEint2Temp_GPUPtr, EoS_DT2P_t &EoS_DensTemp2Pres_GPUPtr, EoS_GENE_t &EoS_General_GPUPtr ) { CUDA_CHECK_ERROR( cudaMemcpyFromSymbol( &EoS_DensEint2Pres_GPUPtr, EoS_DensEint2Pres_Ptr, sizeof(EoS_DE2P_t) ) ); CUDA_CHECK_ERROR( cudaMemcpyFromSymbol( &EoS_DensPres2Eint_GPUPtr, EoS_DensPres2Eint_Ptr, sizeof(EoS_DP2E_t) ) ); CUDA_CHECK_ERROR( cudaMemcpyFromSymbol( &EoS_DensPres2CSqr_GPUPtr, EoS_DensPres2CSqr_Ptr, sizeof(EoS_DP2C_t) ) ); CUDA_CHECK_ERROR( cudaMemcpyFromSymbol( &EoS_DensEint2Temp_GPUPtr, EoS_DensEint2Temp_Ptr, sizeof(EoS_DE2T_t) ) ); CUDA_CHECK_ERROR( cudaMemcpyFromSymbol( &EoS_DensTemp2Pres_GPUPtr, EoS_DensTemp2Pres_Ptr, sizeof(EoS_DT2P_t) ) ); CUDA_CHECK_ERROR( cudaMemcpyFromSymbol( &EoS_General_GPUPtr, EoS_General_Ptr, sizeof(EoS_GENE_t) ) ); } #else // #ifdef __CUDACC__ void EoS_SetCPUFunc_Isothermal( EoS_DE2P_t &EoS_DensEint2Pres_CPUPtr, EoS_DP2E_t &EoS_DensPres2Eint_CPUPtr, EoS_DP2C_t &EoS_DensPres2CSqr_CPUPtr, EoS_DE2T_t &EoS_DensEint2Temp_CPUPtr, EoS_DT2P_t &EoS_DensTemp2Pres_CPUPtr, EoS_GENE_t &EoS_General_CPUPtr ) { EoS_DensEint2Pres_CPUPtr = EoS_DensEint2Pres_Ptr; EoS_DensPres2Eint_CPUPtr = EoS_DensPres2Eint_Ptr; EoS_DensPres2CSqr_CPUPtr = EoS_DensPres2CSqr_Ptr; EoS_DensEint2Temp_CPUPtr = EoS_DensEint2Temp_Ptr; EoS_DensTemp2Pres_CPUPtr = EoS_DensTemp2Pres_Ptr; EoS_General_CPUPtr = EoS_General_Ptr; } #endif // #ifdef __CUDACC__ ... else ... #ifndef __CUDACC__ // local function prototypes void EoS_SetAuxArray_Isothermal( double [], int [] ); void EoS_SetCPUFunc_Isothermal( EoS_DE2P_t &, EoS_DP2E_t &, EoS_DP2C_t &, EoS_DE2T_t &, EoS_DT2P_t &, EoS_GENE_t & ); #ifdef GPU void EoS_SetGPUFunc_Isothermal( EoS_DE2P_t &, EoS_DP2E_t &, EoS_DP2C_t &, EoS_DE2T_t &, EoS_DT2P_t &, EoS_GENE_t & ); #endif //----------------------------------------------------------------------------------------- // Function : EoS_Init_Isothermal // Description : Initialize EoS // // Note : 1. Set auxiliary arrays by invoking EoS_SetAuxArray_*() // --> It will be copied to GPU automatically in CUAPI_SetConstMemory() // 2. Set the CPU/GPU EoS routines by invoking EoS_SetCPU/GPUFunc_*() // 3. Invoked by EoS_Init() // --> Enable it by linking to the function pointer "EoS_Init_Ptr" // 4. Add "#ifndef __CUDACC__" since this routine is only useful on CPU // // Parameter : None // // Return : None //----------------------------------------------------------------------------------------- void EoS_Init_Isothermal() { // check # ifndef BAROTROPIC_EOS Aux_Error( ERROR_INFO, "must enable BAROTROPIC_EOS in the Makefile for the isothermal EoS !!\n" ); # endif EoS_SetAuxArray_Isothermal( EoS_AuxArray_Flt, EoS_AuxArray_Int ); EoS_SetCPUFunc_Isothermal( EoS_DensEint2Pres_CPUPtr, EoS_DensPres2Eint_CPUPtr, EoS_DensPres2CSqr_CPUPtr, EoS_DensEint2Temp_CPUPtr, EoS_DensTemp2Pres_CPUPtr, EoS_General_CPUPtr ); # ifdef GPU EoS_SetGPUFunc_Isothermal( EoS_DensEint2Pres_GPUPtr, EoS_DensPres2Eint_GPUPtr, EoS_DensPres2CSqr_GPUPtr, EoS_DensEint2Temp_GPUPtr, EoS_DensTemp2Pres_GPUPtr, EoS_General_GPUPtr ); # endif } // FUNCTION : EoS_Init_Isothermal #endif // #ifndef __CUDACC__ #endif // #if ( MODEL == HYDRO )
the_stack
#include "dali/kernels/signal/dct/dct_gpu.h" #include <cmath> #include "dali/core/common.h" #include "dali/core/convert.h" #include "dali/core/error_handling.h" #include "dali/core/format.h" #include "dali/core/util.h" #include "dali/kernels/common/utils.h" #include "dali/kernels/kernel.h" #include "dali/kernels/signal/dct/table.h" #include "dali/core/tensor_shape_print.h" namespace dali { namespace kernels { namespace signal { namespace dct { // The kernel processes data with the shape reduced to 3D. // Transform is applied over the middle axis. template <typename OutputType, typename InputType, bool HasLifter> __global__ void ApplyDct(const typename Dct1DGpu<OutputType, InputType>::SampleDesc *samples, const BlockDesc<3> *blocks, const float *lifter_coeffs) { extern __shared__ char cos_table_shm[]; OutputType *cos_table = reinterpret_cast<OutputType*>(cos_table_shm); int bid = blockIdx.x + gridDim.x * (blockIdx.y + gridDim.y * blockIdx.z); int tid = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_vol = blockDim.x * blockDim.y * blockDim.z; auto block = blocks[bid]; const auto &sample = samples[block.sample_idx]; ivec3 in_stride = sample.in_stride; ivec3 out_stride = sample.out_stride; const OutputType *cos_table_inp = sample.cos_table + block.start.y * sample.input_length; size_t size = (block.end.y - block.start.y) * sample.input_length; for (size_t i = tid; i < size; i += block_vol) { cos_table[i] = cos_table_inp[i]; } __syncthreads(); for (int z = block.start.z + threadIdx.z; z < block.end.z; z += blockDim.z) { for (int y = block.start.y + threadIdx.y; y < block.end.y; y += blockDim.y) { const OutputType *cos_row = cos_table + sample.input_length * (y - block.start.y); float coeff = HasLifter ? lifter_coeffs[y] : 1.f; for (int x = block.start.x + threadIdx.x; x < block.end.x; x += blockDim.x) { int output_idx = out_stride[0]*z + out_stride[1]*y + x; const InputType *input = sample.input + in_stride[0]*z + x; OutputType out_val = 0; for (int i = 0; i < sample.input_length; ++i) { out_val = fma(*input, cos_row[i], out_val); input += in_stride[1]; } sample.output[output_idx] = HasLifter ? out_val * coeff : out_val; } } } } template <typename OutputType, typename InputType, bool HasLifter> __global__ void ApplyDctInner(const typename Dct1DGpu<OutputType, InputType>::SampleDesc *samples, const BlockSetupInner::BlockDesc *blocks, const float *lifter_coeffs) { extern __shared__ char shm[]; auto block = blocks[blockIdx.x]; auto sample = samples[block.sample_idx]; int ndct = sample.out_stride[0]; int64_t nframes = block.frame_count; int input_len = sample.input_length * nframes; auto *in_frames = reinterpret_cast<InputType*>(shm); auto *cos_table = reinterpret_cast<OutputType*>(in_frames + input_len); auto *input = sample.input + block.frame_start * sample.input_length; auto *output = sample.output + block.frame_start * ndct; int tid = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_vol = blockDim.x * blockDim.y * blockDim.z; int table_len = sample.input_length * ndct; for (int idx = tid; idx < table_len; idx += block_vol) { cos_table[idx] = sample.cos_table[idx]; } for (int idx = tid; idx < input_len; idx += block_vol) { in_frames[idx] = input[idx]; } __syncthreads(); for (int f = 0; f < nframes; ++f) { const auto *in_frame = in_frames + sample.input_length * f; auto *out_frame = output + ndct * f; for (int y = threadIdx.y; y < ndct; y += blockDim.y) { float lifter_coeff = HasLifter ? lifter_coeffs[y] : 1.f; const auto *cos_row = &cos_table[y * sample.input_length]; OutputType acc = 0; for (int x = threadIdx.x; x < sample.input_length; x += blockDim.x) { acc = fma(in_frame[x], cos_row[x], acc); } acc += __shfl_down_sync(0xffffffff, acc, 16); acc += __shfl_down_sync(0xffffffff, acc, 8); acc += __shfl_down_sync(0xffffffff, acc, 4); acc += __shfl_down_sync(0xffffffff, acc, 2); acc += __shfl_down_sync(0xffffffff, acc, 1); if (threadIdx.x == 0) { out_frame[y] = HasLifter ? acc * lifter_coeff : acc; } } } } template <typename OutputType, typename InputType> KernelRequirements Dct1DGpu<OutputType, InputType>::Setup(KernelContext &ctx, const InListGPU<InputType> &in, span<const DctArgs> args, int axis) { DALI_ENFORCE(args.size() == in.num_samples()); KernelRequirements req{}; ScratchpadEstimator se{}; args_.clear(); cos_tables_.clear(); sample_descs_.clear(); int64_t dims = in.sample_dim(); TensorListShape<> out_shape(in.num_samples(), dims); TensorListShape<3> reduced_shape(in.num_samples()); max_cos_table_size_ = 0; axis_ = axis >= 0 ? axis : dims - 1; DALI_ENFORCE(axis_ >= 0 && axis_ < dims, make_string("Axis is out of bounds: ", axis_)); inner_axis_ = true; for (int s = 0; s < args.size(); ++s) { args_.push_back(args[s]); auto &arg = args_.back(); auto in_shape = in.tensor_shape_span(s); int64_t n = in_shape[axis_]; if (arg.dct_type == 1) { DALI_ENFORCE(n > 1, "DCT type I requires an input length > 1"); if (arg.normalize) { DALI_WARN("DCT type-I does not support orthogonal normalization. Ignoring"); arg.normalize = false; } } if (arg.ndct <= 0) { arg.ndct = n; } if (cos_tables_.find({n, arg}) == cos_tables_.end()) { cos_tables_[{n, arg}] = nullptr; se.add<mm::memory_kind::device, OutputType>(n * arg.ndct); if (n * arg.ndct > max_cos_table_size_) { max_cos_table_size_ = n * arg.ndct; } } auto reduced_samle_shape = reduce_shape(in_shape, axis_, arg.ndct); reduced_shape.set_tensor_shape(s, reduced_samle_shape); if (reduced_samle_shape[2] != 1) inner_axis_ = false; auto sample_shape = in.shape[s]; sample_shape[axis_] = arg.ndct; out_shape.set_tensor_shape(s, sample_shape); } se.add<mm::memory_kind::pinned, OutputType>(max_cos_table_size_); if (cos_tables_.size() > 1) { se.add<mm::memory_kind::pinned, OutputType>(max_cos_table_size_); } se.add<mm::memory_kind::device, SampleDesc>(in.num_samples()); if (inner_axis_) { block_setup_inner_.Setup(reduced_shape); se.add<mm::memory_kind::device, BlockSetupInner::BlockDesc>(block_setup_inner_.Blocks().size()); } else { block_setup_.SetupBlocks(reduced_shape, true); se.add<mm::memory_kind::device, BlockDesc<3>>(block_setup_.Blocks().size()); } req.output_shapes = {out_shape}; req.scratch_sizes = se.sizes; return req; } template <typename OutputType, typename InputType> DLL_PUBLIC void Dct1DGpu<OutputType, InputType>::Run(KernelContext &ctx, const OutListGPU<OutputType> &out, const InListGPU<InputType> &in, InTensorGPU<float, 1> lifter_coeffs) { OutputType *cpu_cos_table[2]; cpu_cos_table[0] = ctx.scratchpad->AllocatePinned<OutputType>(max_cos_table_size_); if (cos_tables_.size() > 1) { cpu_cos_table[1] = ctx.scratchpad->AllocatePinned<OutputType>(max_cos_table_size_); } int i = 0; for (auto &table_entry : cos_tables_) { auto cpu_table = cpu_cos_table[i % 2]; auto &buffer_event = buffer_events_[i % 2]; int n; DctArgs arg; std::tie(n, arg) = table_entry.first; CUDA_CALL(cudaEventSynchronize(buffer_event)); FillCosineTable(cpu_table, n, arg); table_entry.second = ctx.scratchpad->ToGPU(ctx.gpu.stream, span<OutputType>(cpu_table, n * arg.ndct)); CUDA_CALL(cudaEventRecord(buffer_event, ctx.gpu.stream)); ++i; } sample_descs_.clear(); sample_descs_.reserve(args_.size()); int s = 0; int max_ndct = 0; int max_input_length = 0; for (auto arg : args_) { auto in_shape = reduce_shape(in.tensor_shape_span(s), axis_); auto out_shape = reduce_shape(out.tensor_shape_span(s), axis_); DALI_ENFORCE(lifter_coeffs.num_elements() == 0 || out_shape[1] <= lifter_coeffs.num_elements(), make_string("Not enough lifter coefficients. NDCT for sample ", s, " is ", out_shape[1], " and only ", lifter_coeffs.num_elements(), " coefficients were passed.")); ivec3 out_stride = GetStrides(ivec3{out_shape[0], out_shape[1], out_shape[2]}); ivec3 in_stride = GetStrides(ivec3{in_shape[0], in_shape[1], in_shape[2]});; int n = in_shape[1]; auto *cos_tables = cos_tables_[{n, arg}]; sample_descs_.push_back(SampleDesc{out.tensor_data(s), in.tensor_data(s), cos_tables, in_stride, out_stride, n}); max_ndct = std::max(max_ndct, arg.ndct); max_input_length = std::max(max_input_length, n); ++s; } if (inner_axis_) { RunInnerDCT(ctx, max_input_length, lifter_coeffs); } else { RunPlanarDCT(ctx, max_ndct, lifter_coeffs); } } void BlockSetupInner::Setup(const TensorListShape<3> &reduced_shape) { blocks_.clear(); int64_t bid = 0; for (int s = 0; s < reduced_shape.num_samples(); ++s) { assert(reduced_shape[s][2] == 1); int64_t nframes = reduced_shape[s][0]; int64_t nblocks = div_ceil(nframes, frames_per_block_); blocks_.resize(blocks_.size() + nblocks); for (int64_t f = 0; f < nframes; f += frames_per_block_, ++bid) { blocks_[bid].sample_idx = s; blocks_[bid].frame_start = f; blocks_[bid].frame_count = std::min(frames_per_block_, nframes - f); } } } template <typename OutputType, typename InputType> void Dct1DGpu<OutputType, InputType>::RunInnerDCT(KernelContext &ctx, int64_t max_input_length, InTensorGPU<float, 1> lifter_coeffs) { SampleDesc *sample_descs_gpu; BlockSetupInner::BlockDesc *block_descs_gpu; std::tie(sample_descs_gpu, block_descs_gpu) = ctx.scratchpad->ToContiguousGPU(ctx.gpu.stream, sample_descs_, block_setup_inner_.Blocks()); dim3 block_dim = block_setup_inner_.BlockDim(); dim3 grid_dim = block_setup_inner_.GridDim(); size_t shm_size = block_setup_inner_.SharedMemSize<OutputType, InputType>(max_input_length, max_cos_table_size_); if (lifter_coeffs.num_elements() > 0) { ApplyDctInner<OutputType, InputType, true> <<<grid_dim, block_dim, shm_size, ctx.gpu.stream>>>(sample_descs_gpu, block_descs_gpu, lifter_coeffs.data); } else { ApplyDctInner<OutputType, InputType, false> <<<grid_dim, block_dim, shm_size, ctx.gpu.stream>>>(sample_descs_gpu, block_descs_gpu, nullptr); } } template <typename OutputType, typename InputType> void Dct1DGpu<OutputType, InputType>::RunPlanarDCT(KernelContext &ctx, int max_ndct, InTensorGPU<float, 1> lifter_coeffs) { SampleDesc *sample_descs_gpu; BlockDesc<3> *block_descs_gpu; std::tie(sample_descs_gpu, block_descs_gpu) = ctx.scratchpad->ToContiguousGPU(ctx.gpu.stream, sample_descs_, block_setup_.Blocks()); dim3 grid_dim = block_setup_.GridDim(); dim3 block_dim = block_setup_.BlockDim(); size_t shm_size = sizeof(OutputType) * (max_cos_table_size_ + 32 * max_ndct); auto block = block_setup_.Blocks()[0]; if (lifter_coeffs.num_elements() > 0) { ApplyDct<OutputType, InputType, true> <<<grid_dim, block_dim, shm_size, ctx.gpu.stream>>>(sample_descs_gpu, block_descs_gpu, lifter_coeffs.data); } else { ApplyDct<OutputType, InputType, false> <<<grid_dim, block_dim, shm_size, ctx.gpu.stream>>>(sample_descs_gpu, block_descs_gpu, nullptr); } } template class Dct1DGpu<float, float>; template class Dct1DGpu<double, double>; } // namespace dct } // namespace signal } // namespace kernels } // namespace dali
the_stack
#define ROUND_OFF 50000 #define CUDA_NUM_THREADS 1024 #define WARPS_PER_BLOCK 1 #define THREADS_PER_WARP 32 #define CUDA_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) #define GET_BLOCKS(n, t) (n+t-1) / t // == Dimension rearrangement Kernel __global__ void blob_rearrange_kernel2_1d(const float *in, float *out, int num, int channels, int width, int height, int widthheight, int padding, int pwidthheight) { int xy = blockIdx.x*blockDim.x + threadIdx.x; if(xy>=widthheight) return; int ch = blockIdx.y; int n = blockIdx.z; float value=in[(n*channels+ch)*widthheight+xy]; __syncthreads(); int xpad = (xy % width + padding); int ypad = (xy / width + 0); int xypad = ypad * (width+2*padding) + xpad; out[(n*pwidthheight+xypad)*channels + ch] = value; } void blob_rearrange_ongpu_1d(const float *in, float *out, int num, int channels, int width, int height, int widthheight, int padding, int pwidthheight, cudaStream_t stream) { int threads_per_block=16; dim3 totalBlocksRearr((widthheight-1)/threads_per_block+1, channels, num); cudaError_t err; blob_rearrange_kernel2_1d<<<totalBlocksRearr, threads_per_block, 0, stream>>> (in, out, num, channels, width, height, widthheight, padding, pwidthheight); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed: %s\n", cudaGetErrorString(err)); exit(-1); } } // == Correlation Kernel __global__ void CorrelateData_1d(const int nthreads, int num, int topwidth, int topheight, int topchannels, int topcount, int max_displacement, int x_shift, int neighborhood_grid_width, int kernel_radius, int kernel_size, int stride1, int stride2, int bottomwidth, int bottomheight, int bottomchannels, const float *bottom0, const float *bottom1, float *top) { extern __shared__ char patch_data_char[]; float *patch_data = (float *)patch_data_char; // First (upper left) position of kernel upper-left corner in current center position of neighborhood in image 1 int x1 = blockIdx.x*stride1 + max_displacement; int y1 = blockIdx.y*stride1; int item = blockIdx.z; int ch_off = threadIdx.x; // Load 3D patch into shared shared memory for(int j = 0; j < kernel_size; j++) { // HEIGHT for(int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch; int idxPatchData = ji_off + ch; patch_data[idxPatchData] = bottom0[idx1]; } } } __syncthreads(); __shared__ float sum[WARPS_PER_BLOCK*THREADS_PER_WARP]; // Compute correlation for(int top_channel = 0; top_channel < topchannels; top_channel++) { sum[ch_off] = 0; int s2o = (top_channel % neighborhood_grid_width + x_shift) * stride2; for(int j = 0; j < kernel_size; j++) { // HEIGHT for(int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS int x2 = x1 + s2o; int idxPatchData = ji_off + ch; int idx2 = ((item * bottomheight + y1+j) * bottomwidth + x2+i) * bottomchannels + ch; //int idx2 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch; //printf("x1 %d x2 %d bh %d bw %d bc %d i %d ch %d y1 %d idx2 %d\n", x1, x2, bottomheight, bottomwidth, bottomchannels, item, ch, y1, idx2); sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2]; } } } __syncthreads(); if(ch_off == 0) { float total_sum = 0; for(int idx = 0; idx < WARPS_PER_BLOCK*THREADS_PER_WARP; idx++) { total_sum += sum[idx]; } //printf("ch_off %d sum %f\n", ch_off, total_sum); const int sumelems = kernel_size*kernel_size*bottomchannels; const int index = ((top_channel*topheight + blockIdx.y)*topwidth)+blockIdx.x; top[index + item*topcount] = total_sum / (float)sumelems; } } // Aggregate } __global__ void CorrelateDataSubtract_1d(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int topcount, int max_displacement, int x_shift, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int bottomchannels, const float *bottom0, const float *bottom1, float *top) { CUDA_KERNEL_LOOP(index, nthreads) { int x = index % topwidth; //w-pos int y = (index / topwidth) % topheight; //h-pos int c = (index / topwidth / topheight) % topchannels; //channels // Offset of patch in image 2 int s2o = (c % neighborhood_grid_width + x_shift) * stride2; // First (upper left) position of kernel center in current neighborhood in image 1 int x1 = x*stride1 + kernel_radius + max_displacement; int y1 = y*stride1 + kernel_radius + 0; // Iterate through 3D patch float sum = 0; for(int j = -kernel_radius; j <= kernel_radius; j++) { // HEIGHT for(int i = -kernel_radius; i <= kernel_radius; i++) { // WIDTH for(int l = 0; l < bottomchannels; l++) { // CHANNELS // Calculate position in image 2 int x2 = x1 + s2o; int y2 = y1; // Indices in bottom data: (CH=l,W=x2,H=y2,N) int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + l; int idx2 = ((item * bottomheight + y2+j) * bottomwidth + x2+i) * bottomchannels + l; // Do the correlation: sum += fabsf(bottom0[idx1] - bottom1[idx2]); } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; top[index + item*topcount] = sum / (float)sumelems; } } void CorrelateData_ongpu_1d(const float *rbot1, const float *rbot2, float *output, int batchSize, int nOutputCols, int nOutputRows, int nOutputPlane, int max_displacement, int x_shift, int neighborhood_grid_width_, int kernel_radius_, int kernel_size, int stride1, int stride2, int paddedbottomwidth, int paddedbottomheight, int nInputPlane, int corr_type_multiply, cudaStream_t stream) { dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK); int shared_memory_per_block = (kernel_size*kernel_size)*nInputPlane; int outputCount = nOutputCols * nOutputRows * nOutputPlane; int outputThreadCount = outputCount; if (corr_type_multiply == 1) { dim3 totalBlocksCorr(nOutputCols, nOutputRows, batchSize); CorrelateData_1d<<<totalBlocksCorr, threadsPerBlock, shared_memory_per_block * sizeof(float), stream>>>( outputThreadCount, batchSize, nOutputCols, nOutputRows, nOutputPlane, outputCount, max_displacement, x_shift, neighborhood_grid_width_, kernel_radius_, kernel_size, stride1, stride2, paddedbottomwidth, paddedbottomheight, nInputPlane, rbot1, rbot2, output ); } else { for (int n = 0; n < batchSize; n++) { CorrelateDataSubtract_1d<<<GET_BLOCKS(outputThreadCount, CUDA_NUM_THREADS), CUDA_NUM_THREADS, 0, stream>>>( outputThreadCount, batchSize, n, nOutputCols, nOutputRows, nOutputPlane, outputCount, max_displacement, x_shift, neighborhood_grid_width_, kernel_radius_, stride1, stride2, paddedbottomwidth, paddedbottomheight, nInputPlane, rbot1, rbot2, output ); } } } // == Correlation Backward Pass Kernel (For Blob 0) __global__ void CorrelateDataBackward0_1d(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int x_shift, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, float *bottom0diff, const float *bottom1, const float *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int n = index % bottomchannels; //channels int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight; //h-pos //Get X,Y ranges and clamp // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 int ymin = (m - 2*kernel_radius - 0 + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 // Same here: int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1 int ymax = (m - 0 + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1 float sum = 0; if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); { for(int o = x_shift; o < x_shift + neighborhood_grid_width; o++) { // Get bottom1 data: int s2o = stride2 * o; int idxbot1 = ((item * pbottomheight + m) * pbottomwidth + (l+s2o)) * bottomchannels + n; float bot1tmp = bottom1[idxbot1]; // bottom1[l+s2o,m,n] // Index offset for topdiff in following loops: int op = (o-x_shift); // index [o,p] int idxopoffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot1tmp; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot0index = ((n * bottomheight) + m) * bottomwidth + (l-pad_size); bottom0diff[bot0index + item*bottomcount] = sum / (float)sumelems; } } // == Correlation Backward Pass Kernel (For Blob 1) __global__ void CorrelateDataBackward1_1d(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int x_shift, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, const float *bottom0, float *bottom1diff, const float *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { //int l = index % bottomwidth + pad_size; //w-pos //int m = (index / bottomwidth) % bottomheight + pad_size; //h-pos //int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels int n = index % bottomchannels; //channels int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight; //h-pos // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; float sum = 0; { for(int o = x_shift; o < x_shift + neighborhood_grid_width; o++) { int s2o = stride2 * o; //Get X,Y ranges and clamp // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 int ymin = (m - 2*kernel_radius - 0 - 0 + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 // Same here: int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1 int ymax = (m - 0 - 0 + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - s2p) / stride1 if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); // Get bottom0 data: int idxbot0 = ((item * pbottomheight + m) * pbottomwidth + (l-s2o)) * bottomchannels + n; float bot0tmp = bottom0[idxbot0]; // bottom1[l+s2o,m,n] // Index offset for topdiff in following loops: int op = (o-x_shift); // index [o,p] int idxOpOffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot0tmp; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot1index = ((n * bottomheight) + m) * bottomwidth + (l-pad_size); bottom1diff[bot1index + item*bottomcount] = sum / (float)sumelems; } } // == Correlation Kernel Subtraction // == Correlation Backward Pass Kernel (For Blob 0) __global__ void CorrelateDataBackward0Subtract_1d(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int x_shift, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, float *bottom0diff, const float *bottom0, const float *bottom1, const float *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int l = index % bottomwidth + pad_size; //w-pos int m = (index / bottomwidth) % bottomheight; //h-pos int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels //Get X,Y ranges and clamp // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 int ymin = (m - 2*kernel_radius - 0 + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 // Same here: int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1 int ymax = (m - 0 + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1 float sum = 0; if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); { for(int o = x_shift; o < x_shift + neighborhood_grid_width; o++) { // Get bottom1 data: int s2o = stride2 * o; int idxbot = ((item * pbottomheight + (m)) * pbottomwidth + (l+s2o)) * bottomchannels + n; float bot0tmp = bottom0[idxbot]; // bottom0[l+s2o,m,n] float bot1tmp = bottom1[idxbot]; // bottom1[l+s2o,m,n] float sign = (bot0tmp >= bot1tmp) ? float(1.0) : float(-1.0); // Index offset for topdiff in following loops: int op = (o-x_shift); // index [o,p] int idxopoffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * sign; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; bottom0diff[index + item*bottomcount] = sum / (float)sumelems; } } // == Correlation Backward Pass Kernel (For Blob 1) __global__ void CorrelateDataBackward1Subtract_1d(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int x_shift, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, const float *bottom0, const float *bottom1, float *bottom1diff, const float *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int l = index % bottomwidth + pad_size; //w-pos int m = (index / bottomwidth) % bottomheight; //h-pos int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; float sum = 0; { for(int o = x_shift; o < x_shift + neighborhood_grid_width; o++) { int s2o = stride2 * o; //Get X,Y ranges and clamp // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 int ymin = (m - 2*kernel_radius - 0 - 0 + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 // Same here: int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1 int ymax = (m - 0 - 0 + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - s2p) / stride1 if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); // Get bottom0 data: int idxbot = ((item * pbottomheight + (m)) * pbottomwidth + (l-s2o)) * bottomchannels + n; float bot0tmp = bottom0[idxbot]; // bottom0[l+s2o,m,n] float bot1tmp = bottom1[idxbot]; // bottom1[l+s2o,m,n] float sign = (bot0tmp >= bot1tmp) ? float(-1.0) : float(1.0); // Index offset for topdiff in following loops: int op = (o-x_shift); // index [o,p] int idxOpOffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * sign; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; bottom1diff[index + item*bottomcount] = sum / (float)sumelems; } } void CorrelateDataBackward_ongpu_1d(const float *rbot1, const float *rbot2, const float *gradOutput, float *gradInput1, float *gradInput2, int batchSize, int nOutputCols, int nOutputRows, int nOutputPlane, int max_displacement, int x_shift, int neighborhood_grid_width_, int kernel_radius_, int stride1, int stride2, int nInputCols, int nInputRows, int paddedbottomwidth, int paddedbottomheight, int nInputPlane, int pad_size, int corr_type_multiply, cudaStream_t stream) { int inputCount = nInputPlane * nInputRows * nInputCols; int botThreadCount = inputCount; if (corr_type_multiply == 1) { // == Run kernel Backward 0 for (int n = 0; n < batchSize; n++) { //Bottom0 CorrelateDataBackward0_1d<<<GET_BLOCKS(botThreadCount, CUDA_NUM_THREADS), CUDA_NUM_THREADS, 0, stream>>>( botThreadCount, batchSize, n, nOutputCols, nOutputRows, nOutputPlane, max_displacement, x_shift, neighborhood_grid_width_, kernel_radius_, stride1, stride2, nInputCols, nInputRows, paddedbottomwidth, paddedbottomheight, nInputPlane, inputCount, pad_size, gradInput1, rbot2, gradOutput ); } // == Run kernel Backward 1 for (int n = 0; n < batchSize; n++) { CorrelateDataBackward1_1d<<<GET_BLOCKS(botThreadCount, CUDA_NUM_THREADS), CUDA_NUM_THREADS, 0, stream>>>( botThreadCount, batchSize, n, nOutputCols, nOutputRows, nOutputPlane, max_displacement, x_shift, neighborhood_grid_width_, kernel_radius_, stride1, stride2, nInputCols, nInputRows, paddedbottomwidth, paddedbottomheight, nInputPlane, inputCount, pad_size, rbot1, gradInput2, gradOutput ); } } else { for ( int n = 0; n < batchSize; n++ ) { //Bottom0 CorrelateDataBackward0Subtract_1d<<<GET_BLOCKS(botThreadCount, CUDA_NUM_THREADS), CUDA_NUM_THREADS, 0, stream>>> ( botThreadCount, batchSize, n, nOutputCols, nOutputRows, nOutputPlane, max_displacement, x_shift, neighborhood_grid_width_, kernel_radius_, stride1, stride2, nInputCols, nInputRows, paddedbottomwidth, paddedbottomheight, nInputPlane, inputCount, pad_size, gradInput1, rbot1, rbot2, gradOutput ); } for (int n = 0; n < batchSize; n++ ) { //Bottom0 CorrelateDataBackward1Subtract_1d<<<GET_BLOCKS(botThreadCount, CUDA_NUM_THREADS), CUDA_NUM_THREADS, 0, stream>>>( botThreadCount, batchSize, n, nOutputCols, nOutputRows, nOutputPlane, max_displacement, x_shift, neighborhood_grid_width_, kernel_radius_, stride1, stride2, nInputCols, nInputRows, paddedbottomwidth, paddedbottomheight, nInputPlane, inputCount, pad_size, rbot1, rbot2, gradInput2, gradOutput ); } } }
the_stack
#include "datacu.hpp" #include <assert.h> #include <cfloat> #include <algorithm> #include <sm_20_atomic_functions.h> // ------------------------------------------------------------------- // Helpers // ------------------------------------------------------------------- template<typename T> struct Geom { int subdivisions[2] ; T transform[6] ; Geom(std::array<int,2> const &subdivisions, std::array<double,6> const &transform) { this->subdivisions[0] = subdivisions[0] ; this->subdivisions[1] = subdivisions[1] ; this->transform[0] = transform[0] ; this->transform[1] = transform[1] ; this->transform[2] = transform[2] ; this->transform[3] = transform[3] ; this->transform[4] = transform[4] ; this->transform[5] = transform[5] ; } } ; struct Bounds { int image, offset, hstart, hend, wstart, wend ; bool isEmpty ; } ; template<typename T> __device__ __forceinline__ static Bounds getBounds(int outputIndex, int height, int width, int numChannels, int size, const T* rois, int numROIs, Geom<T> geom) { Bounds b ; int ph = outputIndex ; int pw = ph / geom.subdivisions[0] ; int pc = pw / geom.subdivisions[1] ; int pr = pc / numChannels ; ph %= geom.subdivisions[0] ; pw %= geom.subdivisions[1] ; pc %= numChannels ; rois += 5 * pr ; // Apply sacle and offset to each ROI coordinate. T u1_ = rois[1] ; T v1_ = rois[2] ; T u2_ = rois[3] ; T v2_ = rois[4] ; T u1 = geom.transform[0] * u1_ + geom.transform[2] * v1_ + geom.transform[4] ; T v1 = geom.transform[1] * u1_ + geom.transform[3] * v1_ + geom.transform[5] ; T u2 = geom.transform[0] * u2_ + geom.transform[2] * v2_ + geom.transform[4] ; T v2 = geom.transform[1] * u2_ + geom.transform[3] * v2_ + geom.transform[5] ; // First and last pixel of each ROI (rounded // for compatibility with the Caffe definition). int roi_image = (int)rois[0]; int roi_start_h = (int)::round(v1) - 1 ; int roi_start_w = (int)::round(u1) - 1 ; int roi_end_h = (int)::round(v2) - 1 ; int roi_end_w = (int)::round(u2) - 1 ; int roi_height = max(roi_end_h - roi_start_h + 1, 1) ; int roi_width = max(roi_end_w - roi_start_w + 1, 1) ; T bin_size_h = (T)roi_height / geom.subdivisions[0] ; T bin_size_w = (T)roi_width / geom.subdivisions[1] ; roi_image = min(max(roi_image - 1,0), (int)size - 1) ; b.offset = (roi_image * numChannels + pc) * (width*height) ; b.wstart = (int)floor(((T)pw) * bin_size_w) ; b.wend = (int)ceil(((T)(pw + 1)) * bin_size_w) ; b.wstart = min(max(b.wstart + roi_start_w, 0), (int)width) ; b.wend = min(max(b.wend + roi_start_w, 0), (int)width) ; b.hstart = (int)floor(((T)ph) * bin_size_h) ; b.hend = (int)ceil(((T)(ph + 1)) * bin_size_h) ; b.hstart = min(max(b.hstart + roi_start_h, 0), (int)height) ; b.hend = min(max(b.hend + roi_start_h, 0), (int)height) ; b.isEmpty = (b.hend <= b.hstart) || (b.wend <= b.wstart) ; return b ; } /* ---------------------------------------------------------------- */ /* roipooling_average_forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void roipooling_average_kernel (T* output, const T* data, int height, int width, int numChannels, int size, const T* rois, int numROIs, Geom<T> geom) { int outputIndex = threadIdx.x + blockIdx.x * blockDim.x; int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs; if (outputIndex < outputVolume) { Bounds b = getBounds<T>(outputIndex, height,width,numChannels,size, rois,numROIs, geom) ; data += b.offset ; T bestValue = 0; const T coeff = ((T)1.) / (T)((b.wend-b.wstart) * (b.hend-b.hstart)); for (int w = b.wstart; w < b.wend; ++w) { for (int h = b.hstart; h < b.hend; ++h) { int index = w * height + h ; bestValue += data[index] * coeff ; } } output[outputIndex] = bestValue ; } } /* ---------------------------------------------------------------- */ /* roipooling_max_forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void roipooling_max_kernel (T* output, const T* data, int height, int width, int numChannels, int size, const T* rois, int numROIs, Geom<T> geom) { int outputIndex = threadIdx.x + blockIdx.x * blockDim.x ; int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs ; if (outputIndex < outputVolume) { Bounds b = getBounds<T>(outputIndex, height,width,numChannels,size, rois,numROIs, geom) ; data += b.offset ; if (! b.isEmpty) { T bestValue = -FLT_MAX; for (int w = b.wstart; w < b.wend; ++w) { for (int h = b.hstart; h < b.hend; ++h) { int index = w * height + h ; bestValue = max(bestValue, data[index]) ; } } output[outputIndex] = bestValue ; } else { output[outputIndex] = 0 ; } } } /* ---------------------------------------------------------------- */ /* roipooling_average_backward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void roipooling_average_backward_kernel (T* derData, const T* data, int height, int width, int numChannels, int size, const T* rois, int numROIs, const T* derOutput, Geom<T> geom) { int outputIndex = threadIdx.x + blockIdx.x * blockDim.x; int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs ; if (outputIndex < outputVolume) { Bounds b = getBounds<T>(outputIndex, height,width,numChannels,size, rois,numROIs, geom) ; data += b.offset ; derData += b.offset ; const T coeff = ((T)1.) / (T)((b.wend-b.wstart)*(b.hend-b.hstart)) ; for (int h = b.hstart; h < b.hend; ++h) { for (int w = b.wstart; w < b.wend; ++w) { int index = w * height + h ; atomicAdd(derData + index, derOutput[outputIndex] * coeff) ; } } } } /* ---------------------------------------------------------------- */ /* roipooling_max_backward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void roipooling_max_backward_kernel (T* derData, const T* data, int height, int width, int numChannels, int size, const T* rois, int numROIs, const T* derOutput, Geom<T> geom) { int outputIndex = threadIdx.x + blockIdx.x * blockDim.x; int outputVolume = geom.subdivisions[0] * geom.subdivisions[1] * numChannels * numROIs; if (outputIndex < outputVolume) { Bounds b = getBounds<T>(outputIndex, height,width,numChannels,size, rois,numROIs, geom) ; if (! b.isEmpty) { data += b.offset ; derData += b.offset ; int bestIndex = min(b.wstart,width-1) * height + min(b.hstart,height-1); T bestValue = -FLT_MAX; for (int h = b.hstart; h < b.hend; ++h) { for (int w = b.wstart; w < b.wend; ++w) { int index = w * height + h ; T value = data[index] ; if (value > bestValue) { bestValue = value ; bestIndex = index ; } } } atomicAdd(derData + bestIndex, derOutput[outputIndex]) ; } } } // ------------------------------------------------------------------- // Forward // ------------------------------------------------------------------- template<DataType dataType, ROIPooling::Method method> struct ROIPoolingForwardGPU { vl::ErrorCode operator()(ROIPooling &op, Tensor &output, Tensor const &input, Tensor const &rois) { typedef typename vl::DataTypeTraits<dataType>::type type ; auto numROIs = rois.getNumElements() / 5 ; auto outputData = (type*)output.getMemory() ; auto inputData = (type const*)input.getMemory() ; auto height = input.getHeight() ; auto width = input.getWidth() ; auto numChannels = input.getDepth() ; auto size = input.getSize() ; auto roisData = (type const*)rois.getMemory() ; size_t outputVolume = op.subdivisions[0] * op.subdivisions[1] * numChannels * numROIs ; if (method == ROIPooling::Max) { roipooling_max_kernel<type> <<< divideAndRoundUp(outputVolume, (size_t)VL_CUDA_NUM_THREADS),VL_CUDA_NUM_THREADS >>> (outputData, inputData, height, width, numChannels, size, roisData, numROIs, Geom<type>(op.subdivisions,op.transform)) ; } else if (method == ROIPooling::Average) { roipooling_average_kernel<type> <<< divideAndRoundUp(outputVolume, (size_t)VL_CUDA_NUM_THREADS),VL_CUDA_NUM_THREADS >>> (outputData, inputData, height, width, numChannels, size, roisData, numROIs, Geom<type>(op.subdivisions,op.transform)) ; } else { assert(false) ; } cudaError_t status = cudaPeekAtLastError() ; return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } } ; template<DataType dataType> struct ROIPoolingForward<VLDT_GPU,dataType> { vl::ErrorCode operator()(ROIPooling &op, Tensor pooled, Tensor input, Tensor rois) { switch (op.method) { case ROIPooling::Max: return ROIPoolingForwardGPU<dataType,ROIPooling::Max> ()(op,pooled,input,rois) ; case ROIPooling::Average: return ROIPoolingForwardGPU<dataType,ROIPooling::Average> ()(op,pooled,input,rois) ; default: return VLE_IllegalArgument ; } } } ; // ------------------------------------------------------------------- // Backward // ------------------------------------------------------------------- template<DataType dataType, ROIPooling::Method method> struct ROIPoolingBackwardGPU { vl::ErrorCode operator()(ROIPooling &op, Tensor &derInput, Tensor const &input, Tensor const &rois, Tensor const &derOutput) { typedef typename vl::DataTypeTraits<dataType>::type type ; auto numROIs = rois.getNumElements() / 5 ; auto derInputData = (type*)derInput.getMemory() ; auto derOutputData = (type const*)derOutput.getMemory() ; auto inputData = (type const*)input.getMemory() ; auto height = input.getHeight() ; auto width = input.getWidth() ; auto numChannels = input.getDepth() ; auto size = input.getSize() ; auto roisData = (type const*)rois.getMemory() ; size_t outputVolume = op.subdivisions[0] * op.subdivisions[1] * numChannels * numROIs ; if (method == ROIPooling::Max) { roipooling_max_backward_kernel<type> <<< divideAndRoundUp(outputVolume, (size_t)VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (derInputData, inputData, height, width, numChannels, size, roisData, numROIs, derOutputData, Geom<type>(op.subdivisions,op.transform)) ; } else if (method == ROIPooling::Average) { roipooling_average_backward_kernel<type> <<< divideAndRoundUp(outputVolume, (size_t)VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (derInputData, inputData, height, width, numChannels, size, roisData, numROIs, derOutputData, Geom<type>(op.subdivisions,op.transform)) ; } else { assert(false) ; } cudaError_t status = cudaPeekAtLastError() ; return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } } ; template<DataType dataType> struct ROIPoolingBackward<VLDT_GPU,dataType> { vl::ErrorCode operator()(ROIPooling &op, Tensor &derInput, Tensor const &input, Tensor const &rois, Tensor const &derOutput) { switch (op.method) { case ROIPooling::Max: return ROIPoolingBackwardGPU<dataType,ROIPooling::Max> ()(op,derInput,input,rois,derOutput) ; case ROIPooling::Average: return ROIPoolingBackwardGPU<dataType,ROIPooling::Average> ()(op,derInput,input,rois,derOutput) ; default: return VLE_IllegalArgument ; } } } ;
the_stack
using namespace nvinfer1; using nvinfer1::plugin::InstanceNormalizationPlugin; using nvinfer1::plugin::InstanceNormalizationPluginCreator; template <typename T, int32_t THREADS_PER_CTA> __global__ __launch_bounds__(THREADS_PER_CTA) void in3dReluActivation( T* __restrict dst, T* __restrict src, float alpha, int32_t count) { int32_t idx = blockIdx.x * THREADS_PER_CTA + threadIdx.x; if (idx >= count) return; float val = src[idx]; dst[idx] = (val < 0.f) ? val * alpha : val; } cudnnStatus_t convertTrt2cudnnDtype(nvinfer1::DataType trt_dtype, cudnnDataType_t* cudnn_dtype) { switch (trt_dtype) { case nvinfer1::DataType::kFLOAT: *cudnn_dtype = CUDNN_DATA_FLOAT; break; case nvinfer1::DataType::kHALF: *cudnn_dtype = CUDNN_DATA_HALF; break; default: return CUDNN_STATUS_BAD_PARAM; } return CUDNN_STATUS_SUCCESS; } namespace { constexpr const char* INSTANCE_PLUGIN_VERSION{"1"}; constexpr const char* INSTANCE_PLUGIN_NAME{"InstanceNormalization_TRT"}; } // namespace PluginFieldCollection InstanceNormalizationPluginCreator::mFC{}; std::vector<PluginField> InstanceNormalizationPluginCreator::mPluginAttributes; InstanceNormalizationPlugin::InstanceNormalizationPlugin( float epsilon, const std::vector<float>& scale, const std::vector<float>& bias, int32_t relu, float alpha) : mEpsilon(epsilon) , mAlpha(alpha) , mRelu(relu) , mNchan(scale.size()) , mHostScale(scale) , mHostBias(bias) { ASSERT(scale.size() == bias.size()); } InstanceNormalizationPlugin::InstanceNormalizationPlugin( float epsilon, nvinfer1::Weights const& scale, nvinfer1::Weights const& bias, int32_t relu, float alpha) : mEpsilon(epsilon) , mAlpha(alpha) , mRelu(relu) , mNchan(scale.count) { ASSERT(scale.count == bias.count); const auto copyWeights = [](nvinfer1::Weights const& input, std::vector<float>& output) { output.reserve(input.count); if (input.type == nvinfer1::DataType::kFLOAT) { output.assign(static_cast<const float*>(input.values), static_cast<const float*>(input.values) + input.count); } else if (input.type == nvinfer1::DataType::kHALF) { for (int32_t c = 0; c < input.count; ++c) { const auto value = static_cast<const unsigned short*>(input.values); output.push_back(__internal_half2float(value[c])); } } else { throw std::runtime_error("Unsupported scale/bias dtype"); } }; copyWeights(scale, mHostScale); copyWeights(bias, mHostBias); } InstanceNormalizationPlugin::InstanceNormalizationPlugin(void const* serialData, size_t serialLength) { deserialize_value(&serialData, &serialLength, &mEpsilon); deserialize_value(&serialData, &serialLength, &mNchan); deserialize_value(&serialData, &serialLength, &mHostScale); deserialize_value(&serialData, &serialLength, &mHostBias); deserialize_value(&serialData, &serialLength, &mRelu); deserialize_value(&serialData, &serialLength, &mAlpha); } InstanceNormalizationPlugin::~InstanceNormalizationPlugin() { terminate(); } // InstanceNormalizationPlugin returns one output. int32_t InstanceNormalizationPlugin::getNbOutputs() const noexcept { return 1; } DimsExprs InstanceNormalizationPlugin::getOutputDimensions(int32_t outputIndex, const nvinfer1::DimsExprs* inputs, int32_t nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept { nvinfer1::DimsExprs output(inputs[0]); return output; } int32_t InstanceNormalizationPlugin::initialize() noexcept { if (!mInitialized) { CHECK_CUDNN(cudnnCreate(&mCudnnHandle)); CHECK_CUDNN(cudnnCreateTensorDescriptor(&mBDescriptor)); CHECK_CUDNN(cudnnCreateTensorDescriptor(&mXDescriptor)); CHECK_CUDNN(cudnnCreateTensorDescriptor(&mYDescriptor)); // NDHWC path // Device info. int32_t device; CHECK_CUDA(cudaGetDevice(&device)); cudaDeviceProp props; CHECK_CUDA(cudaGetDeviceProperties(&props, device)); mContext.sm_count = props.multiProcessorCount; mContext.sm_shared_size = props.sharedMemPerMultiprocessor; mContext.sm_version = props.major * 100 + props.minor * 10; CHECK_CUDA(cudaMalloc(&mDeviceScale, mNchan * sizeof(float))); CHECK_CUDA(cudaMalloc(&mDeviceBias, mNchan * sizeof(float))); CHECK_CUDA(cudaMemcpy(mDeviceScale, &mHostScale[0], mNchan * sizeof(float), cudaMemcpyHostToDevice)); CHECK_CUDA(cudaMemcpy(mDeviceBias, &mHostBias[0], mNchan * sizeof(float), cudaMemcpyHostToDevice)); } mInitialized = true; return 0; } void InstanceNormalizationPlugin::terminate() noexcept { if (mInitialized) { cudnnDestroyTensorDescriptor(mYDescriptor); cudnnDestroyTensorDescriptor(mXDescriptor); cudnnDestroyTensorDescriptor(mBDescriptor); cudnnDestroy(mCudnnHandle); CUASSERT(cudaFree(mDeviceBias)); CUASSERT(cudaFree(mDeviceScale)); } mInitialized = false; } size_t InstanceNormalizationPlugin::getWorkspaceSize(const nvinfer1::PluginTensorDesc* inputs, int32_t nbInputs, const nvinfer1::PluginTensorDesc* outputs, int32_t nbOutputs) const noexcept { nvinfer1::Dims input_dims = inputs[0].dims; ASSERT(input_dims.nbDims == 4 || input_dims.nbDims == 5); if (inputs[0].format == nvinfer1::PluginFormat::kLINEAR) { nvinfer1::Dims input_dims = inputs[0].dims; int32_t n = input_dims.d[0]; int32_t c = input_dims.d[1]; size_t nchan_bytes = c * sizeof(float); size_t scale_size = n * nchan_bytes; size_t bias_size = n * nchan_bytes; size_t total_wss = scale_size + bias_size; return total_wss; } else if (inputs[0].format == nvinfer1::PluginFormat::kDHWC8 || inputs[0].format == nvinfer1::PluginFormat::kCDHW32) { ASSERT(input_dims.nbDims == 5); int32_t input_data_type = (inputs[0].type == nvinfer1::DataType::kHALF) ? 1 : 2; int32_t output_data_type = (outputs[0].type == nvinfer1::DataType::kHALF) ? 1 : 2; nvinfer1::Dims input_dims = inputs[0].dims; int32_t n = input_dims.d[0]; int32_t c = input_dims.d[1]; int32_t d = input_dims.d[2]; int32_t h = input_dims.d[3]; int32_t w = input_dims.d[4]; InstanceNormFwdParams params; // only these parameters are required for workspace computation params.nhw = d * h * w; params.c = c; params.n = n; // Reserve memory for the workspaces. size_t size_sums, size_counts, size_retired_ctas; instanceNormBufferSizesDispatch( mContext, params, size_sums, size_counts, size_retired_ctas, input_data_type, output_data_type); size_t size_nc = n * c * sizeof(float); size_nc = ((size_nc + 256 - 1) / 256) * 256; return size_sums + size_counts + size_retired_ctas + 4 * size_nc; } else { ASSERT(0); } return 0; } int32_t InstanceNormalizationPlugin::enqueue(const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) noexcept { nvinfer1::Dims input_dims = inputDesc[0].dims; // early return for empty tensor if (std::any_of(input_dims.d, input_dims.d + input_dims.nbDims, [](int32_t d) { return d == 0; })) { return 0; } const auto callRelu = [this, &stream](void* inOut, int32_t count, nvinfer1::DataType type) { if (mRelu > 0) { const int32_t kBLOCK_SZ = 256; switch (type) { case nvinfer1::DataType::kFLOAT: in3dReluActivation<float, kBLOCK_SZ><<<(count + kBLOCK_SZ - 1) / kBLOCK_SZ, kBLOCK_SZ, 0, stream>>>( static_cast<float*>(inOut), static_cast<float*>(inOut), mAlpha, count); break; case nvinfer1::DataType::kHALF: in3dReluActivation<__half, kBLOCK_SZ><<<(count + kBLOCK_SZ - 1) / kBLOCK_SZ, kBLOCK_SZ, 0, stream>>>( static_cast<__half*>(inOut), static_cast<__half*>(inOut), mAlpha, count); break; default: ASSERT(0); } } }; if (input_dims.nbDims <= 4) { nvinfer1::Dims input_dims = inputDesc[0].dims; int32_t n = input_dims.d[0]; int32_t c = input_dims.d[1]; int32_t h = input_dims.d[2]; int32_t w = input_dims.nbDims > 3 ? input_dims.d[3] : 1; size_t nchan_bytes = c * sizeof(float); float* _d_array = static_cast<float*>(workspace); float* d_scale = &_d_array[0]; float* d_bias = &_d_array[n * c]; for (int32_t i = 0; i < n; ++i) { CUASSERT(cudaMemcpyAsync(d_scale + i * c, mDeviceScale, nchan_bytes, cudaMemcpyDeviceToDevice, stream)); CUASSERT(cudaMemcpyAsync(d_bias + i * c, mDeviceBias, nchan_bytes, cudaMemcpyDeviceToDevice, stream)); } CUDNNASSERT(cudnnSetTensor4dDescriptor(mBDescriptor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, n * c, 1, 1)); cudnnDataType_t cudnn_dtype{}; CUDNNASSERT(convertTrt2cudnnDtype(inputDesc[0].type, &cudnn_dtype)); CUDNNASSERT(cudnnSetTensor4dDescriptor(mXDescriptor, CUDNN_TENSOR_NCHW, cudnn_dtype, 1, n * c, h, w)); CUDNNASSERT(cudnnSetTensor4dDescriptor(mYDescriptor, CUDNN_TENSOR_NCHW, cudnn_dtype, 1, n * c, h, w)); float alpha = 1; float beta = 0; void const* x_ptr = inputs[0]; void* y_ptr = outputs[0]; CUDNNASSERT(cudnnSetStream(mCudnnHandle, stream)); // Note: Use of CUDNN_BATCHNORM_SPATIAL_PERSISTENT can cause numerical // overflows (NaNs) for fp32 data in some circumstances. The lower- // performance CUDNN_BATCHNORM_SPATIAL should be used if this is not // acceptable. CUDNNASSERT(cudnnBatchNormalizationForwardTraining(mCudnnHandle, CUDNN_BATCHNORM_SPATIAL_PERSISTENT, &alpha, &beta, mXDescriptor, x_ptr, mYDescriptor, y_ptr, mBDescriptor, d_scale, d_bias, 1., nullptr, nullptr, mEpsilon, nullptr, nullptr)); callRelu(y_ptr, n * c * h * w, inputDesc[0].type); } else { if (inputDesc[0].format == nvinfer1::PluginFormat::kLINEAR) { CHECK_CUDNN(cudnnSetStream(mCudnnHandle, stream)); nvinfer1::Dims input_dims = inputDesc[0].dims; int32_t n = input_dims.d[0]; int32_t c = input_dims.d[1]; int32_t d = input_dims.d[2]; int32_t h = input_dims.d[3]; int32_t w = input_dims.d[4]; size_t nchan_bytes = c * sizeof(float); // Note: We repeat the data for each batch entry so that we can do the full // computation in a single CUDNN call in enqueue(). float* _d_array = (float*) workspace; float* d_scale = &_d_array[0]; float* d_bias = &_d_array[n * c]; for (int32_t i = 0; i < n; ++i) { CHECK_CUDA( cudaMemcpyAsync(d_scale + i * c, mDeviceScale, nchan_bytes, cudaMemcpyDeviceToDevice, stream)); CHECK_CUDA(cudaMemcpyAsync(d_bias + i * c, mDeviceBias, nchan_bytes, cudaMemcpyDeviceToDevice, stream)); } int32_t nc_dimA[] = {1, n * c, 1, 1, 1}; int32_t nc_strideA[] = {nc_dimA[1] * nc_dimA[2] * nc_dimA[3] * nc_dimA[4], nc_dimA[2] * nc_dimA[3] * nc_dimA[4], nc_dimA[3] * nc_dimA[4], nc_dimA[4], 1}; int32_t img_dimA[] = {1, n * c, d, h, w}; int32_t img_strideA[] = {img_dimA[1] * img_dimA[2] * img_dimA[3] * img_dimA[4], img_dimA[2] * img_dimA[3] * img_dimA[4], img_dimA[3] * img_dimA[4], img_dimA[4], 1}; CHECK_CUDNN(cudnnSetTensorNdDescriptor(mBDescriptor, CUDNN_DATA_FLOAT, 5, nc_dimA, nc_strideA)); cudnnDataType_t cudnn_dtype; CHECK_CUDNN(convertTrt2cudnnDtype(inputDesc[0].type, &cudnn_dtype)); CHECK_CUDNN(cudnnSetTensorNdDescriptor(mXDescriptor, cudnn_dtype, 5, img_dimA, img_strideA)); CHECK_CUDNN(cudnnSetTensorNdDescriptor(mYDescriptor, cudnn_dtype, 5, img_dimA, img_strideA)); float alpha = 1; float beta = 0; void const* x_ptr = inputs[0]; void* y_ptr = outputs[0]; // Note: Use of CUDNN_BATCHNORM_SPATIAL_PERSISTENT can cause numerical // overflows (NaNs) for fp32 data in some circumstances. The lower- // performance CUDNN_BATCHNORM_SPATIAL should be used if this is not // acceptable. CHECK_CUDNN(cudnnBatchNormalizationForwardTraining(mCudnnHandle, CUDNN_BATCHNORM_SPATIAL_PERSISTENT, &alpha, &beta, mXDescriptor, x_ptr, mYDescriptor, y_ptr, mBDescriptor, d_scale, d_bias, 1., nullptr, nullptr, mEpsilon, nullptr, nullptr)); callRelu(y_ptr, n * c * d * h * w, inputDesc[0].type); } else if (inputDesc[0].format == nvinfer1::PluginFormat::kDHWC8 || inputDesc[0].format == nvinfer1::PluginFormat::kCDHW32) { int32_t input_data_type = (inputDesc[0].type == nvinfer1::DataType::kHALF) ? 1 : 2; int32_t output_data_type = (outputDesc[0].type == nvinfer1::DataType::kHALF) ? 1 : 2; nvinfer1::Dims input_dims = inputDesc[0].dims; int32_t n = input_dims.d[0]; int32_t c = input_dims.d[1]; int32_t d = input_dims.d[2]; int32_t h = input_dims.d[3]; int32_t w = input_dims.d[4]; InstanceNormFwdParams params; params.nhw = d * h * w; params.c = c; params.n = n; size_t size_sums, size_counts, size_retired_ctas; instanceNormBufferSizesDispatch( mContext, params, size_sums, size_counts, size_retired_ctas, input_data_type, output_data_type); size_t size_nc = n * c * sizeof(float); size_nc = ((size_nc + 256 - 1) / 256) * 256; char* d_buf = static_cast<char*>(workspace); params.gmem_sums = reinterpret_cast<GMEM_SUMS_TYPE*>(d_buf); d_buf += size_sums; params.gmem_counts = reinterpret_cast<int32_t*>(d_buf); d_buf += size_counts; params.gmem_retired_ctas = reinterpret_cast<int32_t*>(d_buf); d_buf += size_retired_ctas; params.gmem_running_mean = reinterpret_cast<float*>(d_buf); d_buf += size_nc; params.gmem_running_var = reinterpret_cast<float*>(d_buf); d_buf += size_nc; params.gmem_saved_mean = reinterpret_cast<float*>(d_buf); d_buf += size_nc; params.gmem_saved_var = reinterpret_cast<float*>(d_buf); d_buf += size_nc; params.gmem_src = inputs[0]; params.gmem_dst = outputs[0]; params.gmem_bias = mDeviceBias; params.gmem_scale = mDeviceScale; params.var_eps = mEpsilon; params.exp_avg_factor = 1.F; //(float)exp_avg_factor; params.use_relu = mRelu; // use_relu; params.relu_alpha = mAlpha; // relu_alpha; params.in_scale = inputDesc[0].scale; ASSERT(outputDesc[0].scale != 0.F); params.out_scale = 1.F / outputDesc[0].scale; instanceNormFwdDispatch(mContext, params, stream, input_data_type, output_data_type); } else { ASSERT(false && "Unexpected input format"); } } return 0; } size_t InstanceNormalizationPlugin::getSerializationSize() const noexcept { return (serialized_size(mEpsilon) + serialized_size(mNchan) + serialized_size(mHostScale) + serialized_size(mHostBias) + serialized_size(mRelu) + serialized_size(mAlpha)); } void InstanceNormalizationPlugin::serialize(void* buffer) const noexcept { serialize_value(&buffer, mEpsilon); serialize_value(&buffer, mNchan); serialize_value(&buffer, mHostScale); serialize_value(&buffer, mHostBias); serialize_value(&buffer, mRelu); serialize_value(&buffer, mAlpha); } bool InstanceNormalizationPlugin::supportsFormatCombination( int32_t pos, const nvinfer1::PluginTensorDesc* inOut, int32_t nbInputs, int32_t nbOutputs) noexcept { ASSERT(inOut && pos < (nbInputs + nbOutputs)); ASSERT(pos == 0 || pos == 1); // For 4-D or 3-D tensor (nbSpatialDims == 1 or 2), only FP32_Linear and FP16_Linear are supported. // For 5-D tensor (nbSpatialDims == 3), FP32_Linear, FP16_Linear, FP16_DHWC8, and INT8_CDHW32 are supported. // This is because we have special InstanceNorm3D kernels for vectorized formats from MLPerf-Inference. const int32_t nbDims = inOut[pos].dims.nbDims; ASSERT(nbDims >= 3); ASSERT(nbDims <= 5); const bool is3DInstanceNorm = (nbDims == 5); const bool isFP32Linear = (inOut[pos].type == nvinfer1::DataType::kFLOAT && inOut[pos].format == nvinfer1::PluginFormat::kLINEAR && inOut[pos].type == inOut[0].type && inOut[pos].format == inOut[0].format); const bool isFP16Linear = (inOut[pos].type == nvinfer1::DataType::kHALF && inOut[pos].format == nvinfer1::PluginFormat::kLINEAR && inOut[pos].type == inOut[0].type && inOut[pos].format == inOut[0].format); const bool isFP16DHWC8 = (inOut[pos].type == nvinfer1::DataType::kHALF && inOut[pos].format == nvinfer1::PluginFormat::kDHWC8 && inOut[pos].type == inOut[0].type && inOut[pos].format == inOut[0].format); const bool isINT8CDHW32 = (inOut[pos].type == nvinfer1::DataType::kINT8 && inOut[pos].format == nvinfer1::PluginFormat::kCDHW32 && inOut[pos].type == inOut[0].type && inOut[pos].format == inOut[0].format); const bool isFormatOK = isFP32Linear || isFP16Linear || (is3DInstanceNorm && (isFP16DHWC8 || isINT8CDHW32)); // Kernels for vectorized formats only support the case of C % spv == 0. int32_t spv{1}; switch (inOut[pos].format) { case nvinfer1::PluginFormat::kDHWC8: spv = 8; break; case nvinfer1::PluginFormat::kCDHW32: spv = 32; break; default: break; } const int32_t isAlignmentOK = (inOut[pos].dims.d[1] % spv == 0); return isFormatOK && isAlignmentOK; } const char* InstanceNormalizationPlugin::getPluginType() const noexcept { return INSTANCE_PLUGIN_NAME; } const char* InstanceNormalizationPlugin::getPluginVersion() const noexcept { return INSTANCE_PLUGIN_VERSION; } void InstanceNormalizationPlugin::destroy() noexcept { delete this; } IPluginV2DynamicExt* InstanceNormalizationPlugin::clone() const noexcept { auto* plugin = new InstanceNormalizationPlugin{mEpsilon, mHostScale, mHostBias, mRelu, mAlpha}; plugin->setPluginNamespace(mPluginNamespace.c_str()); plugin->initialize(); return plugin; } // Set plugin namespace void InstanceNormalizationPlugin::setPluginNamespace(const char* pluginNamespace) noexcept { mPluginNamespace = pluginNamespace; } const char* InstanceNormalizationPlugin::getPluginNamespace() const noexcept { return mPluginNamespace.c_str(); } nvinfer1::DataType InstanceNormalizationPlugin::getOutputDataType( int32_t index, const nvinfer1::DataType* inputTypes, int32_t nbInputs) const noexcept { ASSERT(inputTypes && nbInputs > 0 && index == 0); return inputTypes[0]; } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void InstanceNormalizationPlugin::attachToContext( cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) noexcept { } // Detach the plugin object from its execution context. void InstanceNormalizationPlugin::detachFromContext() noexcept {} void InstanceNormalizationPlugin::configurePlugin(const nvinfer1::DynamicPluginTensorDesc* in, int32_t nbInputs, const nvinfer1::DynamicPluginTensorDesc* out, int32_t nbOutputs) noexcept { // Not support dynamic shape in C dimension ASSERT(nbInputs == 1 && in[0].desc.dims.d[1] != -1); } // InstanceNormalizationPluginCreator methods InstanceNormalizationPluginCreator::InstanceNormalizationPluginCreator() { mPluginAttributes.clear(); mPluginAttributes.emplace_back(PluginField("epsilon", nullptr, PluginFieldType::kFLOAT32, 1)); mPluginAttributes.emplace_back(PluginField("scales", nullptr, PluginFieldType::kFLOAT32, 1)); mPluginAttributes.emplace_back(PluginField("bias", nullptr, PluginFieldType::kFLOAT32, 1)); mPluginAttributes.emplace_back(PluginField("relu", nullptr, PluginFieldType::kINT32, 1)); mPluginAttributes.emplace_back(PluginField("alpha", nullptr, PluginFieldType::kFLOAT32, 1)); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* InstanceNormalizationPluginCreator::getPluginName() const noexcept { return INSTANCE_PLUGIN_NAME; } const char* InstanceNormalizationPluginCreator::getPluginVersion() const noexcept { return INSTANCE_PLUGIN_VERSION; } const PluginFieldCollection* InstanceNormalizationPluginCreator::getFieldNames() noexcept { return &mFC; } IPluginV2DynamicExt* InstanceNormalizationPluginCreator::createPlugin( const char* name, const nvinfer1::PluginFieldCollection* fc) noexcept { std::vector<float> scaleValues; std::vector<float> biasValues; float epsilon{}; int32_t relu{}; float alpha{}; const PluginField* fields = fc->fields; for (int32_t i = 0; i < fc->nbFields; ++i) { const char* attrName = fields[i].name; if (!strcmp(attrName, "epsilon")) { ASSERT(fields[i].type == PluginFieldType::kFLOAT32); epsilon = *(static_cast<const float*>(fields[i].data)); } else if (!strcmp(attrName, "scales")) { ASSERT(fields[i].type == PluginFieldType::kFLOAT32); int32_t size = fields[i].length; scaleValues.reserve(size); const auto* w = static_cast<const float*>(fields[i].data); for (int32_t j = 0; j < size; j++) { scaleValues.push_back(*w); w++; } } else if (!strcmp(attrName, "bias")) { ASSERT(fields[i].type == PluginFieldType::kFLOAT32); int32_t size = fields[i].length; biasValues.reserve(size); const auto* w = static_cast<const float*>(fields[i].data); for (int32_t j = 0; j < size; j++) { biasValues.push_back(*w); w++; } } else if (!strcmp(attrName, "relu")) { ASSERT(fields[i].type == PluginFieldType::kINT32); relu = *(static_cast<const int32_t*>(fields[i].data)); } else if (!strcmp(attrName, "alpha")) { ASSERT(fields[i].type == PluginFieldType::kFLOAT32); alpha = *(static_cast<const float*>(fields[i].data)); } } Weights scaleWeights{DataType::kFLOAT, scaleValues.data(), (int64_t) scaleValues.size()}; Weights biasWeights{DataType::kFLOAT, biasValues.data(), (int64_t) biasValues.size()}; InstanceNormalizationPlugin* obj = new InstanceNormalizationPlugin(epsilon, scaleWeights, biasWeights, relu, alpha); obj->setPluginNamespace(mNamespace.c_str()); obj->initialize(); return obj; } IPluginV2DynamicExt* InstanceNormalizationPluginCreator::deserializePlugin( const char* name, const void* serialData, size_t serialLength) noexcept { InstanceNormalizationPlugin* obj = new InstanceNormalizationPlugin{serialData, serialLength}; obj->setPluginNamespace(mNamespace.c_str()); obj->initialize(); return obj; }
the_stack
#include "octnet/gpu/unpool.h" #include "octnet/gpu/gpu.h" #include <cstdlib> __global__ void kernel_gridunpool2x2x2_struct(octree out, int n_blocks, ot_size_t feature_size, const octree in) { CUDA_KERNEL_LOOP(out_grid_idx, n_blocks) { int gn,ogd,ogh,ogw; octree_split_grid_idx(&out, out_grid_idx, &gn, &ogd, &ogh, &ogw); int igd = ogd / 2; int igh = ogh / 2; int igw = ogw / 2; int in_grid_idx = octree_grid_idx(&in, gn, igd, igh, igw); const ot_tree_t* in_tree = octree_get_tree(&in, in_grid_idx); ot_tree_t* out_tree = octree_get_tree(&out, out_grid_idx); int in_bit_idx = 1 + (ogd % 2) * 4 + (ogh % 2) * 2 + (ogw % 2); if(tree_isset_bit(in_tree, in_bit_idx)) { tree_set_bit(out_tree, 0); in_bit_idx = tree_child_bit_idx(in_bit_idx); for(int out_bit_idx = 1; out_bit_idx < 9; ++out_bit_idx) { if(tree_isset_bit(in_tree, in_bit_idx)) { tree_set_bit(out_tree, out_bit_idx); } in_bit_idx++; } } } } __global__ void kernel_gridunpoolguided2x2x2(octree out, int n_leafs, const octree in) { CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { const int out_grid_idx = out.data[leaf_idx * out.feature_size]; const ot_tree_t* out_tree = octree_get_tree(&out, out_grid_idx); // const int cum_n_leafs = n_leafs_upto(&out, out_grid_idx); const int cum_n_leafs = out.prefix_leafs[out_grid_idx]; const int out_data_idx = leaf_idx - cum_n_leafs; const int out_bit_idx = data_idx_to_bit_idx(out_tree, out_data_idx); // ot_data_t* out_data = out.data_ptrs[out_grid_idx] + out_data_idx * out.feature_size; ot_data_t* out_data = octree_get_data(&out, out_grid_idx) + out_data_idx * out.feature_size; const int depth = depth_from_bit_idx(out_bit_idx); int gn,ogd,ogh,ogw; octree_split_grid_idx(&out, out_grid_idx, &gn, &ogd, &ogh, &ogw); const int igd = ogd / 2; const int igh = ogh / 2; const int igw = ogw / 2; const int in_grid_idx = octree_grid_idx(&in, gn, igd, igh, igw); const ot_tree_t* in_tree = octree_get_tree(&in, in_grid_idx); // const ot_data_t* in_data = in.data_ptrs[in_grid_idx]; const ot_data_t* in_data = octree_get_data(&in, in_grid_idx); int in_bit_idx; if(depth == 0) { in_bit_idx = 1 + (ogd % 2) * 4 + (ogh % 2) * 2 + (ogw % 2); // printf(" %d,%d <= %d,%d (%d,%d,%d) (%d,%d,%d)\n", out_grid_idx,out_bit_idx, in_grid_idx,in_bit_idx, ogd,ogh,ogw, -1,-1,-1); } else if(depth == 1) { // bdhw_from_idx_l1(out_bit_idx, &bd,&bh,&bw); int bd = ((out_bit_idx - 1)/4); int bh = ((out_bit_idx - 1)%4/2); int bw = ((out_bit_idx - 1)%2); in_bit_idx = tree_child_bit_idx(1 + (ogd % 2) * 4 + (ogh % 2) * 2 + (ogw % 2)) + bd * 4 + bh * 2 + bw; // printf(" %d,%d <= %d,%d (%d,%d,%d) (%d,%d,%d)\n", out_grid_idx,out_bit_idx, in_grid_idx,in_bit_idx, ogd,ogh,ogw, bd,bh,bw); } else if(depth == 2) { // bdhw_from_idx_l2(out_bit_idx, &bd,&bh,&bw) int bd1 = ((out_bit_idx - 9) / 8/4); int bh1 = ((out_bit_idx - 9) / 8%4/2); int bw1 = ((out_bit_idx - 9) / 8%2); int bd2 = (((out_bit_idx - 9) % 8)/4); int bh2 = ((((out_bit_idx - 9) % 8)%4)/2); int bw2 = (((out_bit_idx - 9) % 8)%2); in_bit_idx = tree_child_bit_idx(tree_child_bit_idx(1 + (ogd % 2) * 4 + (ogh % 2) * 2 + (ogw % 2)) + bd1 * 4 + bh1 * 2 + bw1) + bd2 * 4 + bh2 * 2 + bw2; // printf(" %d,%d <= %d,%d (%d,%d,%d) (%d,%d,%d) (%d,%d,%d)\n", out_grid_idx,out_bit_idx, in_grid_idx,in_bit_idx, ogd,ogh,ogw, bd1,bh1,bw1, bd2,bh2,bw2); } else if(depth == 3) { // bdhw_from_idx_l3(out_bit_idx, &bd,&bh,&bw); int bd1 = (((tree_parent_bit_idx(out_bit_idx) - 9) / 8)/4); int bh1 = (((tree_parent_bit_idx(out_bit_idx) - 9) / 8)%4/2); int bw1 = (((tree_parent_bit_idx(out_bit_idx) - 9) / 8)%2); int bd2 = (((tree_parent_bit_idx(out_bit_idx) - 9) % 8)/4); int bh2 = (((tree_parent_bit_idx(out_bit_idx) - 9) % 8)%4/2); int bw2 = (((tree_parent_bit_idx(out_bit_idx) - 9) % 8)%2); in_bit_idx = tree_child_bit_idx(tree_child_bit_idx(1 + (ogd % 2) * 4 + (ogh % 2) * 2 + (ogw % 2)) + bd1 * 4 + bh1 * 2 + bw1) + bd2 * 4 + bh2 * 2 + bw2; // printf(" %d,%d <= %d,%d (%d,%d,%d) (%d,%d,%d) (%d,%d,%d)\n", out_grid_idx,out_bit_idx, in_grid_idx,in_bit_idx, ogd,ogh,ogw, bd1,bh1,bw1, bd2,bh2,bw2); } // int out_data_idx = tree_data_idx(out_tree, out_bit_idx, in.feature_size); int in_bit_idx2 = tree_bit_idx_leaf(in_tree, in_bit_idx); int in_data_idx = tree_data_idx(in_tree, in_bit_idx2, in.feature_size); // for(int f = 0; f < in.feature_size; ++f) { // out_data[f] = in_data[in_data_idx + f]; // } octree_cpy_leaf(in_data + in_data_idx, in.feature_size, out_data); } } void octree_gridunpool2x2x2_gpu(const octree* in, octree* out) { out->n = in->n; out->grid_depth = in->grid_depth * 2; out->grid_height = in->grid_height * 2; out->grid_width = in->grid_width * 2; out->feature_size = in->feature_size; octree_resize_as_gpu(out, out); const int n_blocks = octree_num_blocks(out); const int feature_size = in->feature_size; octree_clr_trees_gpu(out); kernel_gridunpool2x2x2_struct<<<GET_BLOCKS(n_blocks), CUDA_NUM_THREADS>>>( *out, n_blocks, feature_size, *in ); CUDA_POST_KERNEL_CHECK; octree_upd_n_leafs_gpu(out); octree_resize_as_gpu(out, out); octree_upd_prefix_leafs_gpu(out); octree_leaf_idx_to_grid_idx_gpu(out, out->feature_size, out->data_capacity, out->data); kernel_gridunpoolguided2x2x2<<<GET_BLOCKS(out->n_leafs), CUDA_NUM_THREADS>>>( *out, out->n_leafs, *in ); CUDA_POST_KERNEL_CHECK; } void octree_gridunpoolguided2x2x2_gpu(const octree* in, const octree* in_struct, octree* out) { if(in->grid_depth != in_struct->grid_depth / 2 || in->grid_height != in_struct->grid_height / 2 || in->grid_width != in_struct->grid_width / 2) { printf("[ERROR] octree_gridunpoolguided2x2x2_gpu in dim does not fit in_struct dim\n"); exit(-1); } octree_cpy_scalars(in_struct, out); octree_resize_as_gpu(in_struct, out); octree_cpy_trees_gpu_gpu(in_struct, out); octree_cpy_prefix_leafs_gpu_gpu(in_struct, out); octree_leaf_idx_to_grid_idx_gpu(out, out->feature_size, out->data_capacity, out->data); kernel_gridunpoolguided2x2x2<<<GET_BLOCKS(out->n_leafs), CUDA_NUM_THREADS>>>( *out, out->n_leafs, *in ); CUDA_POST_KERNEL_CHECK; } __global__ void kernel_gridunpoolguided2x2x2_bwd(octree grad_in, int n_leafs, const octree grad_out) { CUDA_KERNEL_LOOP(leaf_idx, n_leafs) { // const int out_grid_idx = out.data[leaf_idx * out.feature_size]; const int out_grid_idx = leaf_idx_to_grid_idx(&grad_out, leaf_idx); const ot_tree_t* out_tree = octree_get_tree(&grad_out, out_grid_idx); // const int cum_n_leafs = n_leafs_upto(&grad_out, out_grid_idx); const int cum_n_leafs = grad_out.prefix_leafs[out_grid_idx]; const int out_data_idx = leaf_idx - cum_n_leafs; const int out_bit_idx = data_idx_to_bit_idx(out_tree, out_data_idx); // const ot_data_t* out_data = grad_out.data_ptrs[out_grid_idx] + out_data_idx * grad_out.feature_size; const ot_data_t* out_data = octree_get_data(&grad_out, out_grid_idx) + out_data_idx * grad_out.feature_size; const int depth = depth_from_bit_idx(out_bit_idx); int gn,ogd,ogh,ogw; octree_split_grid_idx(&grad_out, out_grid_idx, &gn, &ogd, &ogh, &ogw); const int igd = ogd / 2; const int igh = ogh / 2; const int igw = ogw / 2; const int in_grid_idx = octree_grid_idx(&grad_in, gn, igd, igh, igw); const ot_tree_t* in_tree = octree_get_tree(&grad_in, in_grid_idx); // ot_data_t* in_data = grad_in.data_ptrs[in_grid_idx]; ot_data_t* in_data = octree_get_data(&grad_in, in_grid_idx); int in_bit_idx; if(depth == 0) { in_bit_idx = 1 + (ogd % 2) * 4 + (ogh % 2) * 2 + (ogw % 2); // printf(" %d,%d <= %d,%d (%d,%d,%d) (%d,%d,%d)\n", out_grid_idx,out_bit_idx, in_grid_idx,in_bit_idx, ogd,ogh,ogw, -1,-1,-1); } else if(depth == 1) { // bdhw_from_idx_l1(out_bit_idx, &bd,&bh,&bw); int bd = ((out_bit_idx - 1)/4); int bh = ((out_bit_idx - 1)%4/2); int bw = ((out_bit_idx - 1)%2); in_bit_idx = tree_child_bit_idx(1 + (ogd % 2) * 4 + (ogh % 2) * 2 + (ogw % 2)) + bd * 4 + bh * 2 + bw; // printf(" %d,%d <= %d,%d (%d,%d,%d) (%d,%d,%d)\n", out_grid_idx,out_bit_idx, in_grid_idx,in_bit_idx, ogd,ogh,ogw, bd,bh,bw); } else if(depth == 2) { // bdhw_from_idx_l2(out_bit_idx, &bd,&bh,&bw) int bd1 = ((out_bit_idx - 9) / 8/4); int bh1 = ((out_bit_idx - 9) / 8%4/2); int bw1 = ((out_bit_idx - 9) / 8%2); int bd2 = (((out_bit_idx - 9) % 8)/4); int bh2 = ((((out_bit_idx - 9) % 8)%4)/2); int bw2 = (((out_bit_idx - 9) % 8)%2); in_bit_idx = tree_child_bit_idx(tree_child_bit_idx(1 + (ogd % 2) * 4 + (ogh % 2) * 2 + (ogw % 2)) + bd1 * 4 + bh1 * 2 + bw1) + bd2 * 4 + bh2 * 2 + bw2; // printf(" %d,%d <= %d,%d (%d,%d,%d) (%d,%d,%d) (%d,%d,%d)\n", out_grid_idx,out_bit_idx, in_grid_idx,in_bit_idx, ogd,ogh,ogw, bd1,bh1,bw1, bd2,bh2,bw2); } else if(depth == 3) { // bdhw_from_idx_l3(out_bit_idx, &bd,&bh,&bw); int bd1 = (((tree_parent_bit_idx(out_bit_idx) - 9) / 8)/4); int bh1 = (((tree_parent_bit_idx(out_bit_idx) - 9) / 8)%4/2); int bw1 = (((tree_parent_bit_idx(out_bit_idx) - 9) / 8)%2); int bd2 = (((tree_parent_bit_idx(out_bit_idx) - 9) % 8)/4); int bh2 = (((tree_parent_bit_idx(out_bit_idx) - 9) % 8)%4/2); int bw2 = (((tree_parent_bit_idx(out_bit_idx) - 9) % 8)%2); in_bit_idx = tree_child_bit_idx(tree_child_bit_idx(1 + (ogd % 2) * 4 + (ogh % 2) * 2 + (ogw % 2)) + bd1 * 4 + bh1 * 2 + bw1) + bd2 * 4 + bh2 * 2 + bw2; // printf(" %d,%d <= %d,%d (%d,%d,%d) (%d,%d,%d) (%d,%d,%d)\n", out_grid_idx,out_bit_idx, in_grid_idx,in_bit_idx, ogd,ogh,ogw, bd1,bh1,bw1, bd2,bh2,bw2); } // int out_data_idx = tree_data_idx(out_tree, out_bit_idx, grad_in.feature_size); int in_bit_idx2 = tree_bit_idx_leaf(in_tree, in_bit_idx); int in_data_idx = tree_data_idx(in_tree, in_bit_idx2, grad_in.feature_size); for(int f = 0; f < grad_in.feature_size; ++f) { // out_data[f] = in_data[in_data_idx + f]; atomicAdd(in_data + (in_data_idx + f), out_data[f]); } } } void octree_gridunpool2x2x2_bwd_gpu(const octree* in, const octree* grad_out, octree* grad_in) { octree_cpy_scalars(in, grad_in); octree_resize_as_gpu(in, grad_in); octree_cpy_trees_gpu_gpu(in, grad_in); octree_cpy_prefix_leafs_gpu_gpu(in, grad_in); octree_fill_data_gpu(grad_in, 0); kernel_gridunpoolguided2x2x2_bwd<<<GET_BLOCKS(grad_out->n_leafs), CUDA_NUM_THREADS>>>( *grad_in, grad_out->n_leafs, *grad_out ); CUDA_POST_KERNEL_CHECK; } void octree_gridunpoolguided2x2x2_bwd_gpu(const octree* in, const octree* in_struct, const octree* grad_out, octree* grad_in) { octree_cpy_scalars(in, grad_in); octree_resize_as_gpu(in, grad_in); octree_cpy_trees_gpu_gpu(in, grad_in); octree_cpy_prefix_leafs_gpu_gpu(in, grad_in); octree_fill_data_gpu(grad_in, 0); kernel_gridunpoolguided2x2x2_bwd<<<GET_BLOCKS(grad_out->n_leafs), CUDA_NUM_THREADS>>>( *grad_in, grad_out->n_leafs, *grad_out ); CUDA_POST_KERNEL_CHECK; }
the_stack
#include "caffe/common.hpp" #include "caffe/util/im2col.hpp" namespace caffe { template <typename Dtype, int num_axes> __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, Dtype* data_col) { int d_temp[num_axes]; // NOLINT(runtime/arrays) int d_iter[num_axes]; // NOLINT(runtime/arrays) int i; CUDA_KERNEL_LOOP(index, n) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. int channel_in = index; int channel_out = 1; for (i = num_axes - 1; i >= 0; --i) { d_temp[i] = channel_in % col_shape[i + 1]; channel_in /= col_shape[i + 1]; channel_out *= kernel_shape[i]; } channel_out *= channel_in; int data_col_inc = 1; for (i = 0; i < num_axes; ++i) { channel_out *= col_shape[i + 1]; channel_out += d_temp[i]; d_temp[i] = d_temp[i] * stride[i] - pad[i]; channel_in *= im_shape[i + 1]; channel_in += d_temp[i]; data_col_inc *= col_shape[i + 1]; d_iter[i] = 0; } Dtype* data_col_ptr = data_col + channel_out; const Dtype* data_im_ptr = data_im + channel_in; bool incremented; do { bool in_range = true; for (i = 0; i < num_axes; ++i) { const int d_iter_im = d_iter[i] + d_temp[i]; in_range &= d_iter_im >= 0 && d_iter_im < im_shape[i + 1]; if (!in_range) { break; } } if (in_range) { int data_im_offset = d_iter[0]; for (i = 1; i < num_axes; ++i) { data_im_offset *= im_shape[i + 1]; data_im_offset += d_iter[i]; } *data_col_ptr = data_im_ptr[data_im_offset]; } else { *data_col_ptr = 0; } data_col_ptr += data_col_inc; incremented = false; for (i = num_axes - 1; i >= 0; --i) { const int d_max = kernel_shape[i]; if (d_iter[i] == d_max - 1) { d_iter[i] = 0; } else { // d_iter[i] < d_max - 1 ++d_iter[i]; incremented = true; break; } } // for (int i = num_axes - 1; i >= 0; --i) } while (incremented); // do } // CUDA_KERNEL_LOOP(index, n) } template <typename Dtype> void im2col_gpu(const Dtype* data_im, const int num_spatial_axes, const int num_kernels, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, Dtype* data_col) { switch (num_spatial_axes) { case 1: im2col_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, data_col); break; case 2: im2col_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, data_col); break; case 3: im2col_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, data_col); break; case 4: im2col_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, data_col); break; case 5: im2col_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, data_col); break; case 6: im2col_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, data_col); break; case 7: im2col_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, data_col); break; case 8: im2col_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, data_col); break; case 9: im2col_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, data_col); break; case 10: im2col_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, data_col); break; default: LOG(FATAL) << "im2col_gpu does not support computation with " << num_spatial_axes << " spatial axes"; } CUDA_POST_KERNEL_CHECK; } // Explicit instantiation template void im2col_gpu<float>(const float* data_im, const int num_spatial_axes, const int col_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, float* data_col); template void im2col_gpu<double>(const double* data_im, const int num_spatial_axes, const int col_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, double* data_col); template <typename Dtype, int num_axes> __global__ void col2im_gpu_kernel(const int n, const Dtype* data_col, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, Dtype* data_im) { int d_im[num_axes]; // NOLINT(runtime/arrays) int d_col_iter[num_axes]; // NOLINT(runtime/arrays) int d_col_start[num_axes]; // NOLINT(runtime/arrays) int d_col_end[num_axes]; // NOLINT(runtime/arrays) CUDA_KERNEL_LOOP(index, n) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. int channel_im = index; // Calculate d_im (image dimensions). for (int i = num_axes - 1; i >= 0; --i) { d_im[i] = channel_im % im_shape[i + 1] + pad[i]; channel_im /= im_shape[i + 1]; } // Calculate col start/end indices. bool done = false; for (int i = 0; i < num_axes; ++i) { d_col_start[i] = d_col_iter[i] = (d_im[i] < kernel_shape[i]) ? 0 : (d_im[i] - kernel_shape[i]) / stride[i] + 1; d_col_end[i] = min(d_im[i] / stride[i] + 1, col_shape[i + 1]); if (d_col_start[i] >= d_col_end[i]) { // Skip computation if the dimension is 0 at any spatial axis -- // final val will be 0. data_im[index] = 0; done = true; break; // for (int i = 0; i < num_axes; ++i) } } if (done) { continue; // CUDA_KERNEL_LOOP(index, n) } // Loop over the col to compute the output val. Dtype val = 0; bool incremented = true; do { // Compute the final offset. int final_offset = 0; int kernel_shape_prod = 1; for (int i = num_axes - 1; i >= 0; --i) { final_offset += (d_im[i] - d_col_iter[i] * stride[i]) * kernel_shape_prod; kernel_shape_prod *= kernel_shape[i]; } final_offset += kernel_shape_prod * channel_im; for (int i = 0; i < num_axes; ++i) { final_offset *= col_shape[i + 1]; final_offset += d_col_iter[i]; } val += data_col[final_offset]; incremented = false; for (int i = num_axes - 1; i >= 0; --i) { const int d_max = d_col_end[i]; if (d_col_iter[i] == d_max - 1) { d_col_iter[i] = d_col_start[i]; } else { // d_col_iter[i] < d_max - 1 ++d_col_iter[i]; incremented = true; break; // for (int i = num_axes - 1; i >= 0; --i) } } // for (int i = num_axes - 1; i >= 0; --i) } while (incremented); data_im[index] = val; } // CUDA_KERNEL_LOOP(index, n) } template <typename Dtype> void col2im_gpu(const Dtype* data_col, const int num_spatial_axes, const int im_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, Dtype* data_im) { switch (num_spatial_axes) { case 1: col2im_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, data_im); break; case 2: col2im_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, data_im); break; case 3: col2im_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, data_im); break; case 4: col2im_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, data_im); break; case 5: col2im_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, data_im); break; case 6: col2im_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, data_im); break; case 7: col2im_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, data_im); break; case 8: col2im_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, data_im); break; case 9: col2im_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, data_im); break; case 10: col2im_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, data_im); break; default: LOG(FATAL) << "im2col_gpu does not support computation with " << num_spatial_axes << " spatial axes"; } CUDA_POST_KERNEL_CHECK; } // Explicit instantiation template void col2im_gpu<float>(const float* data_col, const int num_spatial_axes, const int im_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, float* data_im); template void col2im_gpu<double>(const double* data_col, const int num_spatial_axes, const int im_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, double* data_im); } // namespace caffe
the_stack
#include "FlutterBinarize.h" #include "Histogram.h" #include <iostream> #include <string.h> using namespace std; #include "time.h" #include "stdlib.h" #include "curand.h" #include "curand_kernel.h" // 宏:GET_MIN(a, b) // 返回两个数的最小值。 #define GET_MIN(a, b) ((a) < (b) ? (a) : (b)) // 宏:GET_MAX(a, b) // 返回两个数的最大值。 #define GET_MAX(a, b) ((a) > (b) ? (a) : (b)) // 宏:GET_ABS(x) // 返回一个数的绝对值。 #define GET_ABS(x) ((x) >= 0 ? (x) : (-(x))) // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 宏:FLUTTER_RADIO // 定义了每个点向周围扩散的概率。 #define FLUTTER_RADIO (4.0f / 7.0f) // 宏:RANDOM_BOUND // 定义了生成随机数的上限,生成 [0 , RANDOM_BOUND - 1 ] 范围内的随机数。 #define RANDOM_BOUND 100 // 宏:MAX_INF // 定义正无穷的最大值。 #define MAX_INF 1 << 29 // static变量:imgTpl[MAX_PIXEL] // 每个像素值对应的 TEMPLATE_SIZE * TEMPLATE_SIZE 大小的模板图像指针。 static Image *imgTpl[MAX_PIXEL] = {NULL}; // static变量:subimgCudTpl[MAX_PIXEL] // 每个像素值对应的模板对应的图像逻辑数据。 static ImageCuda subimgCudTpl[MAX_PIXEL]; // static变量:map[TEMPLATE_SIZE * TEMPLATE_SIZE] // 存储当前的图像模板。 static unsigned char tempTpl[TEMPLATE_SIZE * TEMPLATE_SIZE]; // static变量:cover[TEMPLATE_SIZE][TEMPLATE_SIZE] // visit[i][j] 表示当前点是否被访问过。 static bool visit[TEMPLATE_SIZE][TEMPLATE_SIZE]; // static变量:weight[TEMPLATE_SIZE][TEMPLATE_SIZE] // weight[i][j] 表示当前点的权值。 static int weight[TEMPLATE_SIZE][TEMPLATE_SIZE]; // static变量:save // 存储满足条件的二维坐标点。 static CoordiSet *save = NULL; // Host 函数:manhattan(计算两个点的曼哈顿(manhattan)距离) // 计算 (x , y) 与 (ox , oy) 的 manhattan 距离。 static __host__ int // 返回值:返回两个点的 manhattan 距离。 manhattan( int x, // 第一个点的行坐标 int y, // 第一个点的列坐标 int ox, // 第二个点的行坐标 int oy // 第二个点的列坐标 ); // Host 函数:initalVariables(初始化变量) // 在每次生成一个模板之前对变量进行初始化并赋初始值。 static __host__ void // 无返回值。 initalVariables( int dis // 任意两像素值为 255 的最小 manhattan 距离 ); // Host 函数:findMinWeightPoint(找到最小权值的点) // 在当前模板中找到权值最小的点,即下一个标记为 255 的点。 static __host__ CoordiSet* // 返回值:返回一个 CoordiSet 指针。 findMinWeightPoint(); // Host 函数:check (检查当前放置模板的合法性) // 检查放置 pixel 个像素值为 255 的像素点且其中任两个像素点 manhattan 距离大于 // 等于 dis 方案的合法性。 static __host__ bool // 返回值:如果能找到一个满足条件的模板,则返回 true, // 否则返回 false。 checkValidity( int dis, // 任意两像素值为 255 的最小 manhattan 距离 int pixel // 当前模板的像素值 ); // Host函数:scatter(将模板中像素点分散) // 对一组可行模板中像素值为 255 的像素点进行发散操作。 static __host__ void // 无返回值。 scatter( int dis, // 任意两像素值为 255 的最小 manhattan 距离 int pixel // 当前模板的像素值 ); // Host函数:binarySearch(二分法找到并生成一个与像素值为 pixel 对应的模板) // 二分法找到任意两像素值为 255 的最大 manhattan 距离,并生成与像素值为 pixel // 对应的模板。 static __host__ void binarySearch( int pixel ); // Kernel 函数:_makeTemplateKer(制作模板) // 根据每层像素点数量,把输入图像的坐标存放到模板中的相应位置,得到按层划分的 // 坐标模板。 static __global__ void // Kernel 函数无返回值。 _makeTemplateKer( ImageCuda imgCud, // 输入图像 TemplateCuda coordinate, // 模板 int packrange, // 图像每层像素值数量 unsigned int *devcnt // 图像每层像素点数量 ); // Kernel 函数:_genRandomKer(生成随机数) // 在 Device 端生成与输入图片大小相同的随机数数组,用于抖动二值化像素值扩散。 static __global__ void _genRandomKer( int *randnumdev, // 随机数矩阵 int timehost, // 时间参数 int width // 随机数矩阵的宽度 ); // Kernel 函数:_flutterBinarizeLayerKer(抖动二值化) // 对图像进行抖动二值化处理。根据对像素点划分出的不同层定义不同的扩散能力(扩 // 散理解为把周围点像素值设为 0),分层处理,从而实现图像的抖动二值化操作。 static __global__ void // Kernel 函数无返回值。 _flutterBinarizeLayerKer( ImageCuda imgCud, // 输出图像 TemplateCuda coordinateCud, // 模板 int *randnumdev, // 随机数数组 int from, // 当前层在模板中的起始位置 int to, // 当前层在模板中的终止位置 int threshold // 当前层像素点扩散的阈值 ); // Kernel 函数:_flutterBinarizeKer(抖动二值化) // 对图像进行抖动二值化处理。根据像素点值对应的模板处理当前像素点,从而实现图 // 像的抖动二值化操作。 static __global__ void // Kernel 函数无返回值。 _flutterBinarizeKer( ImageCuda inimgCud, // 输出图像 ImageCuda outimgCud, // 输出图像 ImageCuda *subimgCudTpl // 每个像素值对应的模板对应的图像逻辑数据指针 ); // Host 函数:manhattan(计算两个点的曼哈顿(manhattan)距离) static __host__ int manhattan(int x, int y, int ox, int oy) { // 返回 x 与 y 坐标差的绝对值。 return GET_ABS(x - ox) + GET_ABS(y - oy); } // Host 函数:initalVariables(初始化变量) static __host__ void initalVariables(int dis) { // 初始化 map,visit,weight 变量。 memset(tempTpl, 0, sizeof (tempTpl)); memset(visit, false, sizeof (visit)); memset(weight, 0, sizeof (weight)); // 对 weight 数组赋初值。 for (int i = 0; i < TEMPLATE_SIZE; i++) { for (int j = 0; j < TEMPLATE_SIZE; j++) { // 局部变量,up 表示与当前点的 manhattan 距离在 dis 范围内最上端的 // 点的行坐标。 int up = GET_MAX(0, i - dis); // 局部变量,dw 表示与当前点的 manhattan 距离在 dis 范围内最下端的 // 点的行坐标。 int dw = GET_MIN(TEMPLATE_SIZE - 1, i + dis); // 枚举满足条件的行更新 weight 数组。 for (int k = up; k <= dw; k++) { // 局部变量,delta 表示第 k 行点与当前点 manhattan 距离剩余值。 int delta = dis - GET_ABS(i - k); // 局部变量,le 表示在第 k 行点中与当前点 manhattan 距离在 dis // 范围内最左端的点的列坐标。 int le = GET_MAX(0, j - delta); // 局部变量,ri 表示在第 k 行点中与当前点 manhattan 距离在 dis // 范围内最右端的点的列坐标。 int ri = GET_MIN(TEMPLATE_SIZE - 1, j + delta); // 累加当前点的权值。 weight[i][j] += ri - le + 1; } } } return; } // Host 函数:findMinWeightPoint(找到最小权值的点) static __host__ CoordiSet* findMinWeightPoint() { // 声明答案节点。 CoordiSet *ans = NULL; // 初始化该节点。 CoordiSetBasicOp::newCoordiSet(&ans); CoordiSetBasicOp::makeAtHost(ans, 1); // 局部变量,minValue 表示最小权值,初始化为 MAX_INF。 int minValue = MAX_INF; for (int i = 0; i < TEMPLATE_SIZE; i++) { for (int j = 0; j < TEMPLATE_SIZE; j++) { // 如果当前点未被访问并权值小于 minValue,则进行赋值操作。 if (visit[i][j] == false && weight[i][j] < minValue) { // 更新答案。 ans->tplData[0] = i; ans->tplData[1] = j; minValue = weight[i][j]; } } } // 如果找不到答案节点返回 NULL。 if (minValue == MAX_INF) return NULL; // 返回答案节点。 return ans; } // Host 函数:check (检查当前放置模板的合法性) static __host__ bool checkValidity(int dis, int pixel) { // 初始化变量。 initalVariables(dis); // 局部变量:left 表示还需要放置像素值为 255 的像素点个数。 int left = GET_MIN(pixel, MAX_PIXEL - pixel); while(left) { // 找到当前应该放置的下一个点的 CoordiSet 指针。 CoordiSet *cur = findMinWeightPoint(); // 如果没找到则跳出循环。 if (cur == NULL) break; // 将当前点像素值标记为 255。 tempTpl[cur->tplData[0] * TEMPLATE_SIZE + cur->tplData[1]] = 255; // 更新 left。 left--; // 更新 visit 和 weight 数组。 for (int i = 0; i < TEMPLATE_SIZE; i++) { for (int j = 0; j < TEMPLATE_SIZE; j++) { // 如果当前点未访问过且与放置点的 manhattan 小于等于 dis,则 // 更新对应的 visit 和 weight 数组。 if (visit[i][j] == false && manhattan(cur->tplData[0], cur->tplData[1], i, j) <= dis) { // 将当前点标记为访问过。 visit[i][j] = true; // 更新与当前点 manhattan 距离小于等于 dis 的权值。 for (int ii = 0; ii < TEMPLATE_SIZE; ii++) { for (int jj = 0; jj < TEMPLATE_SIZE; jj++) { // 如果与当前点的 manhattan 距离小于等于 dis 则 // 更新该权值。 if (manhattan(i, j, ii, jj) <= dis) weight[ii][jj]--; } } } } } } // 返回剩余放置像素值为 255 的点的个数是否为 0。 return (left == 0); } // Host函数:scatter(将模板中像素点分散) static __host__ void scatter(int dis, int pixel) { // 初始化 weight 数组。 memset(weight, 0, sizeof (weight)); // 给 weight 数组赋初值。 for (int i = 0; i < TEMPLATE_SIZE; i++) { for (int j = 0; j < TEMPLATE_SIZE; j++) { // 如果当前点像素值为 255,则更新 weight 数组。 if (tempTpl[i * TEMPLATE_SIZE + j] == 255) { // 局部变量,up 表示与当前点的 manhattan 距离在 dis * 2 范围内 // 最上端的点的行坐标。 int up = GET_MAX(0, i - dis * 2); // 局部变量,dw 表示与当前点的 manhattan 距离在 dis * 2 范围内 // 最下端的点的行坐标。 int dw = GET_MIN(TEMPLATE_SIZE - 1, i + dis * 2); // 枚举满足条件的行更新 weight 数组。 for (int k = up; k <= dw; k++) { // 局部变量,delta 表示第 k 行点与当前点 manhattan 距离 // 的剩余值。 int delta = dis * 2 - GET_ABS(i-k); // 局部变量,le 表示在第 k 行点中与当前点 manhattan 距离在 // dis * 2 范围内最左端的点的列坐标。 int le = GET_MAX(0, j - delta); // 局部变量,ri 表示在第 k 行点中与当前点 manhattan 距离在 // dis * 2 范围内最右端的点的列坐标。 int ri = GET_MIN(TEMPLATE_SIZE - 1, j + delta); // 累加当前点的权值。每个点的权值表示当前点被像素值为 255 // 的点覆盖的次数。 weight[i][j] += ri - le + 1; } } } } // 如果 save 为空,则初始化开辟空间,否则说明已经初始化了。 if (save == NULL) { // 如果 save 为空,则初始化为 TEMPLATE_SIZE * TEMPLATE_SIZE 的模板。 CoordiSetBasicOp::newCoordiSet(&save); CoordiSetBasicOp::makeAtHost(save, TEMPLATE_SIZE * TEMPLATE_SIZE); } // 局部变量,times 表示循环的次数。 int times = MAX_PIXEL; while(times--) { // 局部变量,(frx, fry) 表示被交换的节点坐标,(tox, toy) 表示交换到的 // 节点坐标。 int frx,fry,tox,toy; // 局部变量,记录最大权值。 int maxValue = 0; // 局部变量,top 表示可交换节点的个数。 int top = 0; // 找到权值最大的被交换的节点组。 for (int i = 0; i < TEMPLATE_SIZE; i++) { for (int j = 0; j < TEMPLATE_SIZE; j++) { // 如果当前点像素值为 255,则更新。 if (tempTpl[i * TEMPLATE_SIZE + j] == 255) { // 如果小于最大权值,则更新 maxValue 并重置 top 为 0。 if (weight[i][j] > maxValue) { maxValue = weight[i][j]; top = 0; save->tplData[top++] = i; save->tplData[top++] = j; // 如果等于最大权值,则存储 save 指针中。 } else if (weight[i][j] == maxValue) { save->tplData[top++] = i; save->tplData[top++] = j; } } } } // 如果不存在权值最小的点,跳出循环。 if (top == 0) break; // 随机找到一个点作为被交换的节点下标。 int id = rand() % (top/2); // 得到对应的 (frx, fry)。 frx = save->tplData[id << 1]; fry = save->tplData[id << 1 | 1]; // 重置 top 变量。 top = 0; // 找到满足 dis 限制的交换到的节点组。 for (int i = 0; i < TEMPLATE_SIZE; i++) { for (int j = 0; j < TEMPLATE_SIZE; j++) { // 如果当前点像素值为 0,则判断该点是否满足条件。 if (tempTpl[i * TEMPLATE_SIZE + j] == 0) { // 局部变量,flag 表示当前点是否满足条件。 bool flag = true; // 局部变量,up 表示与当前点的 manhattan 距离在 dis 范围内 // 最上端的点的行坐标。 int up = GET_MAX(0, i - dis); // 局部变量,dw 表示与当前点的 manhattan 距离在 dis 范围内 // 最下端的点的行坐标。 int dw = GET_MIN(TEMPLATE_SIZE - 1, i + dis); // 枚举满足条件的行判断当前点是否满足条件。 for (int ii = up; ii <= dw; ii++) { // 局部变量,delta 表示第 ii 行点与当前点 manhattan // 距离的剩余值。 int delta = dis - GET_ABS(i - ii); // 局部变量,le 表示在第 ii 行点中与当前点 manhattan // 距离在 dis 范围内最左端的点的列坐标。 int le = GET_MAX(0, j - delta); // 局部变量,ri 表示在第 ii 行点中与当前点 manhattan // 距离在 dis 范围内最右端的点的列坐标。 int ri = GET_MIN(TEMPLATE_SIZE - 1, j + delta); // 枚举当前行中的点判断当前点是否满足条件。 for (int jj = le; jj <= ri; jj++) { // 如果是 (frx, fry) 点则跳过改循环。 if (ii == frx && jj == fry) continue; // 如果该点在 dis 内的 manhattan 距离存在像素值为 // 255 的点,则该点不满足条件,跳出循环。 if (tempTpl[ii * TEMPLATE_SIZE + jj] == 255) { flag = false; break; } } // 如果当前点不满足条件,则跳出循环。 if (flag == false) break; } // 如果该点满足条件,则存储 save 指针中。 if (flag) { save->tplData[top++] = i; save->tplData[top++] = j; } } } } // 如果不存在满足条件的点,跳过本次循环。 if (top == 0) continue; // 随机找到一个点作为被交换的节点下标。 id = rand() % (top/2); // 得到对应的 (tox, toy)。 tox = save->tplData[id << 1]; toy = save->tplData[id << 1 | 1]; // 交换 (frx, fry) 和 (tox, toy) 两个节点的像素值。 tempTpl[frx * TEMPLATE_SIZE + fry] = 0; tempTpl[tox * TEMPLATE_SIZE + toy] = 255; // 更新 weight 数组。 for (int i = 0; i < TEMPLATE_SIZE; i++) { for (int j = 0; j < TEMPLATE_SIZE; j++) { // 如果该点与 fr 点 manhattan 距离小于 dis * 2,则执行减操作。 if (manhattan(i,j,frx,fry) <= dis * 2) weight[i][j]--; // 如果该点与 to 点 manhattan 距离小于 dis * 2,则执行加操作。 if (manhattan(i,j,tox, toy) <= dis * 2) weight[i][j]++; } } } } // Host函数:binarySearch(二分法找到并生成一个与像素值为 pixel 对应的模板) static __host__ void binarySearch(int pixel) { // 初始化 l r 边界值。 int l = 1, r = TEMPLATE_SIZE << 1; // 二分答案。 while(l <= r) { // 局部变量,mid 为当前 l r 的中值。 int mid = (l + r) >> 1; // 如果 mid 值合法,更新 l 值。 if (checkValidity(mid, pixel)) l = mid + 1; // 否则更新 r 值。 else r = mid - 1; } // 得到一组可行模板。 checkValidity(r, pixel); // 调用 scatter 函数使得当前模板中像素值为 255 的点尽可能分散。 scatter(r, pixel); // 如果 pixel 个数大于 MAX_PIXEL / 2 像素值取反。 if (pixel > MAX_PIXEL / 2) { for (int i = 0; i < TEMPLATE_SIZE; i++) { for (int j = 0; j < TEMPLATE_SIZE; j++) { tempTpl[i * TEMPLATE_SIZE + j] = (tempTpl[i * TEMPLATE_SIZE + j] == 0) ? 255 : 0; } } } } // Kernel 函数:_makeTemplateKer(制作模板) static __global__ void _makeTemplateKer(ImageCuda imgCud, TemplateCuda coordinateCud, int packrange, unsigned int *devcnt) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标的 // x 和 y 分量(其中,c 表示column,r 表示row)。由于采用的并行度缩减策略, // 令一个线程处理 4 个输出像素,这四个像素位于同一列的相邻 4 行上,因此, // 对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 计算第一个坐标点对应的图像数据数组下标。 int idx = r * imgCud.pitchBytes + c; for (int i = 0; i < 4; i++) { // 检查每个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, // 一方面防止由于段错误导致的程序崩溃。 if (c >= imgCud.imgMeta.width || r >= imgCud.imgMeta.height) break; // 使用原子减操作获得当前像素点在模板中的下标。 unsigned int val = imgCud.imgMeta.imgData[idx]; int top = atomicSub(&(devcnt[val / packrange]), 1) - 1; // 将当前像素点的坐标和像素值写到对应的模板中。 coordinateCud.tplMeta.tplData[top << 1] = c; coordinateCud.tplMeta.tplData[(top << 1) | 1] = r; coordinateCud.attachedData[top] = (float)val; // 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y // 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计 // 算。 idx += imgCud.pitchBytes; r++; } } // Host 成员方法:initializeLayerTpl(初始化模板处理) __host__ int FlutterBinarize::initializeLayerTpl(Image *img, int groupnum, unsigned int *&cnt, Template *&coordinate) { // 检查输入图像是否为空。 if (img == NULL) return NULL_POINTER; // 局部变量,错误码 int errcode; // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(img); if (errcode != NO_ERROR) return errcode; // 提取输入图像的 ROI 子图像。 ImageCuda subimgCud; errcode = ImageBasicOp::roiSubImage(img, &subimgCud); if (errcode != NO_ERROR) return errcode; // 在 Device 端分配存储每层坐标点的在模板中数量的数组。 unsigned int *devcnt; // 在 Device 上分配存储临时每层像素点数量数组的空间。 errcode = cudaMalloc((void **)&devcnt, MAX_PIXEL * sizeof (unsigned int)); if (errcode != NO_ERROR) { cudaFree(devcnt); return errcode; } // 初始化 Device 上的内存空间。 errcode = cudaMemset(devcnt, 0, MAX_PIXEL * sizeof (unsigned int)); if (errcode != NO_ERROR) { cudaFree(devcnt); return errcode; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (subimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (subimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); // 计算输入图像的直方图。 Histogram hist; errcode = hist.histogram(img, devcnt, 0); if (errcode != NO_ERROR) { cudaFree(devcnt); return errcode; } // 将统计每层像素点数量的结果拷回 Host 端内存中。 errcode = cudaMemcpy(cnt, devcnt, MAX_PIXEL * sizeof (unsigned int), cudaMemcpyDeviceToHost); if (errcode != NO_ERROR) { cudaFree(devcnt); return errcode; } // 获得每层坐标点在模板中的区域范围。 for (int i = 0; i < groupnum; i++) { // 定义变量 sum 表示前 i 层像素点的坐标在模板中的下标上限。 int sum = (i == 0) ? 0 : cnt[i-1]; // 定义第 i 层像素点在模板中的起始位置和终止位置。 int stpos = i * packrange; int endpos = GET_MIN(MAX_PIXEL, (i + 1) * packrange); for (int j = stpos; j < endpos; j++) sum += cnt[j]; // 给 cnt[i] 赋值。 cnt[i] = sum; } // 初始化 Device 上的内存空间。 errcode = cudaMemset(devcnt, 0, groupnum * sizeof (unsigned int)); if (errcode != NO_ERROR) { cudaFree(devcnt); return errcode; } // 将每层像素点在模板中的区域范围拷回 Host 端内存中。 errcode = cudaMemcpy(devcnt, cnt, groupnum * sizeof(unsigned int), cudaMemcpyHostToDevice); if (errcode != NO_ERROR) { cudaFree(devcnt); return errcode; } errcode = TemplateBasicOp::makeAtCurrentDevice(coordinate, subimgCud.imgMeta.width * subimgCud.imgMeta.height); if (errcode != NO_ERROR) { cudaFree(devcnt); return errcode; } // 根据 Template 指针,得到对应的 TemplateCuda 型数据。 TemplateCuda *coordinateCud = TEMPLATE_CUDA(coordinate); // 调用核函数,制作模板。 _makeTemplateKer<<<gridsize, blocksize>>>(subimgCud, *coordinateCud, packrange, devcnt); // 若调用 CUDA 出错返回错误代码。 if (cudaGetLastError() != cudaSuccess) { cudaFree(devcnt); return CUDA_ERROR; } // 释放 Device 端的统计数组存储空间。 cudaFree(devcnt); // 处理完毕,退出。 return NO_ERROR; } // Host 成员方法:initializeTemplate(初始化模板处理) __host__ int FlutterBinarize::initializeTemplate() { // 局部变量,错误码 int errcode; // 初始化像素值在 [0, 255] 范围内的每个像素值对应的模板。 for (int i = 0; i < MAX_PIXEL; i++) { // 初始化像素值为 i 的模板。如果没有申请成功,则置为 NULL。 errcode = ImageBasicOp::newImage(&imgTpl[i]); if (errcode != NO_ERROR) { imgTpl[i] = NULL; return errcode; } // 为像素值为 i 的模板申请内存空间。如果没有申请成功,则置为 NULL。 errcode = ImageBasicOp::makeAtCurrentDevice( imgTpl[i], TEMPLATE_SIZE, TEMPLATE_SIZE); if (errcode != NO_ERROR) { ImageBasicOp::deleteImage(imgTpl[i]); imgTpl[i] = NULL; return errcode; } } for (int i = 0; i < MAX_PIXEL; i++) { binarySearch(i); errcode = cudaMemcpy(imgTpl[i]->imgData, tempTpl, TEMPLATE_SIZE * TEMPLATE_SIZE * sizeof (unsigned char), cudaMemcpyHostToDevice); if (errcode != NO_ERROR) return errcode; // 提取当前像素值 i 对应的模板的 ROI 子图像。 errcode = ImageBasicOp::roiSubImage(imgTpl[i], &subimgCudTpl[i]); if (errcode != NO_ERROR) return errcode; } // 处理完毕,退出。 return NO_ERROR; } // Kernel 函数:_genRandomKer(生成随机数) static __global__ void _genRandomKer(int *randnumdev, int timehost, int width) { // 计算当前线程的位置。 int index = blockIdx.x * 4; // curand随机函数初始化。 curandState state; curand_init(timehost, index, 0, &state); // 得到当前行在随机数矩阵中的偏移。 int position = index * width; // 一个线程生成 4 行随机数。 for (int i = 0; i < 4; i++) { for (int j = 0; j < width; j++) { // 生成 [0 , RANDOM_BOUND - 1 ] 范围内的随机数。 randnumdev[position + j] = curand(&state) % RANDOM_BOUND; } // 获取下一行的偏移。 position += width; } } // Kernel 函数:_flutterBinarizeLayerKer(抖动二值化) static __global__ void _flutterBinarizeLayerKer(ImageCuda imgCud, TemplateCuda coordinateCud, int *randnumdev, int from, int to, int threshold) { // 计算当前线程处理的像素点的下标。 int tid = blockIdx.x * blockDim.x + threadIdx.x + from; // 判断该像素点是否在当前层。 if (tid < to) { // 得到当前像素点的 (x,y) 坐标。 int x = coordinateCud.tplMeta.tplData[tid << 1]; int y = coordinateCud.tplMeta.tplData[(tid << 1) | 1]; // 计算当前像素点的索引值。 int offset = y * imgCud.pitchBytes + x; // 如果当前像素点已经被扩散,则不进行扩散处理。 if (imgCud.imgMeta.imgData[offset] != 255) return; // 得到当前像素点在随机数数组中的偏移。 int position = y * imgCud.imgMeta.width + x; // 得到当前 position 位置上对应的随机数。 int radval = *(randnumdev + position); // 如果当前点不被选择扩散,则不进行扩散处理。 if (radval >= (int)(threshold * FLUTTER_RADIO)) return; // 将当前点像素值标记为 0。 imgCud.imgMeta.imgData[offset] = 0; // 该点向周围 8 个方向进行扩散。 for (int i = -1; i <= 1; i++) { for (int j = -1; j <= 1; j++) { // 如果是当前点则跳过不处理。 if (i == 0 && j == 0) continue; // 得到当前被扩散像素点的 (x,y) 坐标。 int tx = x + i; int ty = y + j; // 如果该点坐标非法,则跳过不进行扩散处理。 if (tx < 0 || tx >= imgCud.imgMeta.width || ty < 0 || ty >= imgCud.imgMeta.height) continue; // 计算当前被扩散点的索引值。 int toffset = ty * imgCud.pitchBytes + tx; // 如果当前像素点已经被扩散,则不进行扩散处理。 if (imgCud.imgMeta.imgData[toffset] != 255) continue; // 得到当前像素点在随机数数组中的偏移。 int tposition = ty * imgCud.imgMeta.width + tx; // 得到当前 position 位置上对应的随机数。 radval = *(randnumdev + tposition); // 如果当前点不被选择扩散,则跳过不进行扩散处理。 if (radval >= (int)(threshold * FLUTTER_RADIO)) continue; // 将对当前点进行扩散,像素值标记为 0。 imgCud.imgMeta.imgData[toffset] = 0; } } } } // Kernel 函数:_flutterBinarizeKer(抖动二值化) static __global__ void _flutterBinarizeKer(ImageCuda inimgCud, ImageCuda outimgCud, ImageCuda *subimgCudTplDev) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标的 // x 和 y 分量(其中,c 表示column,r 表示row)。由于采用的并行度缩减策略, // 令一个线程处理 4 个输出像素,这四个像素位于同一列的相邻 4 行上,因此, // 对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 计算第一个坐标点对应的图像数据数组下标。 int idx = r * inimgCud.pitchBytes + c; int tid = (r % TEMPLATE_SIZE) * TEMPLATE_SIZE + (c % TEMPLATE_SIZE); for (int i = 0; i < 4; i++) { // 检查每个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源, // 一方面防止由于段错误导致的程序崩溃。 if (c >= inimgCud.imgMeta.width || r >= inimgCud.imgMeta.height) break; // 获得输入图像对应像素点的像素值。 unsigned char val = inimgCud.imgMeta.imgData[idx]; // 用对应模板中相应位置的像素值对输入图像进行抖动二值化处理。 outimgCud.imgMeta.imgData[idx] = subimgCudTplDev[(int)val].imgMeta.imgData[tid]; // 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y // 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计 // 算。 idx += inimgCud.pitchBytes; r++; // 对应的 tid 只需要加上 TEMPLATE_SIZE 即可。 tid += TEMPLATE_SIZE; } } // 宏:FAIL_FLUTTER_BINARIZE_FREE // 如果出错,就释放之前申请的内存。 #define FAIL_FLUTTER_BINARIZE_FREE do { \ if (cnt != NULL) \ delete [] cnt; \ if (coordinate != NULL) \ TemplateBasicOp::deleteTemplate(coordinate); \ if (randnumdev != NULL) \ cudaFree(randnumdev); \ if (subimgCudTplDev != NULL) \ cudaFree(subimgCudTplDev); \ for (int i = 0; i < MAX_PIXEL; i++) { \ if (imgTpl[i] != NULL) { \ ImageBasicOp::deleteImage(imgTpl[i]); \ imgTpl[i] = NULL; \ } \ } \ } while (0) // Host 成员方法:flutterBinarize(抖动二值化处理) __host__ int FlutterBinarize::flutterBinarize(Image *inimg, Image *outimg) { // 检查输入图像和输出图像是否为空。 if (inimg == NULL || outimg == NULL) return NULL_POINTER; // 局部变量,根据每层像素值的范围划分出分组的数量。 int groupnum = MAX_PIXEL / packrange; // 局部变量,错误码 int errcode; // 声明函数内使用的所有变量,并初始化为空。 // 每层坐标点的在模板中的区域范围,[cnt[i - 1], cnt[i] - 1 ] 表示第 i 层点 // 在模板中的区域范围。 unsigned int *cnt = NULL; // 模板指针,依次存储每层坐标点的坐标。 Template *coordinate = NULL; // 在 Device 端申请随机数数组所需要的空间。 int *randnumdev = NULL; // 声明 Device 端 ROI 子图像模板指针。 ImageCuda *subimgCudTplDev = NULL; // 在 Host 端为 cnt 分配空间。 cnt = new unsigned int[MAX_PIXEL]; // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) { // 遇到错误则释放空间。 FAIL_FLUTTER_BINARIZE_FREE; return errcode; } // 将输出图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图像 // 的 ROI 子图像尺寸相同的图像。 errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); // 如果创建图像也操作失败,则说明操作彻底失败。 if (errcode != NO_ERROR) { // 遇到错误则释放空间。 FAIL_FLUTTER_BINARIZE_FREE; return errcode; } } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) { // 遇到错误则释放空间。 FAIL_FLUTTER_BINARIZE_FREE; return errcode; } // 提取输出图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) { // 遇到错误则释放空间。 FAIL_FLUTTER_BINARIZE_FREE; return errcode; } // 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一。 insubimgCud.imgMeta.width = GET_MIN(insubimgCud.imgMeta.width, outsubimgCud.imgMeta.width); outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width; insubimgCud.imgMeta.height = GET_MIN(insubimgCud.imgMeta.height, outsubimgCud.imgMeta.height); outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height; // 初始化输出图像的 ROI 子图像,像素值标记为 255。 errcode = cudaMemset2D(outimg->imgData, outsubimgCud.pitchBytes, 255, outimg->width * sizeof (unsigned char), outimg->height); if (errcode != NO_ERROR) { // 遇到错误则释放空间。 FAIL_FLUTTER_BINARIZE_FREE; return errcode; } // selectmethod = 1 执行第一种算法。 if (selectmethod == 1) { // 初始化空模板并在 Device 端开辟与输入图像等大的空间。 errcode = TemplateBasicOp::newTemplate(&coordinate); if (errcode != NO_ERROR) { // 遇到错误则释放空间。 FAIL_FLUTTER_BINARIZE_FREE; return errcode; } // 初始化模板。 errcode = this->initializeLayerTpl(inimg, groupnum, cnt, coordinate); if (errcode != NO_ERROR) { // 遇到错误则释放空间。 FAIL_FLUTTER_BINARIZE_FREE; return errcode; } // 计算随机数数组的大小。 int total = outsubimgCud.imgMeta.width * outsubimgCud.imgMeta.height; // 在 Device 端分配随机数数组所需要的空间。 errcode = cudaMalloc((void **)&randnumdev, total * sizeof (int)); if (errcode != NO_ERROR) { // 遇到错误则释放空间。 FAIL_FLUTTER_BINARIZE_FREE; return errcode; } // 在 Host 端获取时间。由于使用标准 C++ 库获得的时间是精确到秒的,这个 // 时间精度是远远大于两次可能的调用间隔,因此,我们只在程序启动时取当 // 前时间,之后对程序的时间直接进行自加,以使得每次的时间都是不同的, // 这样来保证种子在各次调用之间的不同,从而获得不同的随机数。 static int timehost = (int)time(NULL); timehost++; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 gridsize, blocksize; gridsize.x = (outsubimgCud.imgMeta.height + 3) / 4; blocksize.x = 1; // 随机数矩阵的宽度。 int width = outsubimgCud.imgMeta.width; // 调用生成随机数的 Kernel 函数。 _genRandomKer<<<gridsize, blocksize>>>(randnumdev, timehost, width); // 若调用 CUDA 出错返回错误代码。 if (cudaGetLastError() != cudaSuccess) { // 遇到错误则释放空间。 FAIL_FLUTTER_BINARIZE_FREE; return CUDA_ERROR; } // 根据 Template 指针,得到对应的 TemplateCuda 型数据。 TemplateCuda *coordinateCud = TEMPLATE_CUDA(coordinate); // 局部变量,前一层像素点的终止下标(即当前层像素点的起始下标)。 int prepos = 0; for (int i = 0; i < groupnum; i++) { // 如果当前层不存在则跳过不处理。 if (prepos == cnt[i]) continue; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 blocksize.x = DEF_BLOCK_X * DEF_BLOCK_Y; blocksize.y = 1; gridsize.x = (cnt[i] - prepos + blocksize.x - 1) / blocksize.x; gridsize.y = 1; // 调用核函数,对当前层像素点进行抖动二值化。 _flutterBinarizeLayerKer<<<gridsize, blocksize>>>( outsubimgCud, *coordinateCud, randnumdev, prepos, cnt[i], (int)(RANDOM_BOUND * (groupnum - i) / groupnum)); // 若调用 CUDA 出错返回错误代码。 if (cudaGetLastError() != cudaSuccess) { // 遇到错误则释放空间。 FAIL_FLUTTER_BINARIZE_FREE; return CUDA_ERROR; } // 更新 prepos。 prepos = cnt[i]; } // selectmethod = 2 执行第二种算法。 } else { // 初始化模板。 errcode = this->initializeTemplate(); if (errcode != NO_ERROR) { // 遇到错误则释放空间。 FAIL_FLUTTER_BINARIZE_FREE; return errcode; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (insubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (insubimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); // 为 subimgCudTplDev 分配内存空间。 errcode = cudaMalloc((void**)&subimgCudTplDev, MAX_PIXEL * sizeof (ImageCuda)); if (errcode != NO_ERROR) { // 遇到错误则释放空间。 FAIL_FLUTTER_BINARIZE_FREE; cudaFree(subimgCudTplDev); return CUDA_ERROR; } // 将 Host 上的 subimgCudTplDev 拷贝到 Device 上。 errcode = cudaMemcpy(subimgCudTplDev, subimgCudTpl, MAX_PIXEL * sizeof (ImageCuda), cudaMemcpyHostToDevice); if (errcode != NO_ERROR) { // 遇到错误则释放空间。 FAIL_FLUTTER_BINARIZE_FREE; cudaFree(subimgCudTplDev); return CUDA_ERROR; } // 调用核函数,对当前层像素点进行抖动二值化。 _flutterBinarizeKer<<<gridsize, blocksize>>>(insubimgCud, outsubimgCud, subimgCudTplDev); // 若调用 CUDA 出错返回错误代码。 if (cudaGetLastError() != cudaSuccess) { // 遇到错误则释放空间。 FAIL_FLUTTER_BINARIZE_FREE; cudaFree(subimgCudTplDev); return CUDA_ERROR; } } // 释放 Host 端的统计数组存储空间。 delete [] cnt; // 清空模板指针。 TemplateBasicOp::deleteTemplate(coordinate); // 释放 Device 内存中随机数组数据。 cudaFree(randnumdev); // 释放像素值在 [0, 255] 范围内的每个像素值对应的模板。 for (int i = 0; i < MAX_PIXEL; i++) { ImageBasicOp::deleteImage(imgTpl[i]); imgTpl[i] = NULL; } // 释放已分配的模板指针,避免内存泄露。 cudaFree(subimgCudTplDev); // 处理完毕,退出。 return NO_ERROR; } // 取消前面的宏定义。 #undef FAIL_FLUTTER_BINARIZE_FREE // Host 成员方法:flutterBinarize(抖动二值化处理) __host__ int FlutterBinarize::flutterBinarize(Image *img) { // 调用 Out-Place 版本的成员方法。 return flutterBinarize(img, img); }
the_stack
namespace anakin{ namespace saber{ //! general kernel for softmax template <typename dtype> __global__ void softmax_max_kernel(int total_size, const dtype* in_data, dtype* out_data, \ dtype min_data, int inner_num, int outer_num, int axis_size){ //! compute data index int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < total_size) { int idx_inner = idx % inner_num; int idx_outer = (idx / inner_num) * axis_size; int real_index = idx_outer * inner_num + idx_inner; //! get maximum data across softmax axis dtype max_data = min_data; for (int i = 0; i < axis_size; ++i) { max_data = in_data[real_index] > max_data? in_data[real_index] : max_data; real_index += inner_num; } out_data[idx] = max_data; } } template <typename dtype> __global__ void softmax_max_roi_kernel(int total_size, const dtype* in_data, \ dtype* out_data, dtype min_data, \ const int* input_stride_real, const int* output_stride_real, const int* shape_valid, \ int softmax_axis, int axis_size, int dims){ int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < total_size) { //! compute real data index int input_real_index = 0; for (int i = dims - 1; i >= 0; i--) { if (i == softmax_axis) { continue; } else { int x = idx % shape_valid[i]; input_real_index += x * input_stride_real[i]; idx = idx / shape_valid[i]; } } //! get maximum data across softmax axis dtype max_data = min_data; for (int i = 0; i < axis_size; ++i) { max_data = in_data[input_real_index] > max_data? \ in_data[input_real_index] : max_data; input_real_index += i * input_stride_real[softmax_axis]; } out_data[idx] = max_data; } } template <typename dtype> __global__ void softmax_sub_exp_sum_kernel(int total_size, const dtype* in_data, \ dtype* out_data, const dtype* max_data, dtype* sum_data, \ int inner_num, int outer_num, int axis_size){ //! compute data index int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < total_size) { int idx_inner = idx % inner_num; int idx_outer = (idx / inner_num) * axis_size; dtype max_data_cur = max_data[idx]; //dtype *sum_data_cur = &sum_data[idx]; dtype sum_data_cur = 0; int real_index = idx_outer * inner_num + idx_inner; //! compute exp and summarize across the softmax axis for (int i = 0; i < axis_size; ++i) { dtype sub_data = in_data[real_index] - max_data_cur; sub_data = expf(sub_data); sum_data_cur += sub_data; out_data[real_index] = sub_data; real_index += inner_num; } sum_data[idx] = sum_data_cur; } } template <typename dtype> __global__ void softmax_sub_exp_sum_roi_kernel(int total_size, \ const dtype* in_data, dtype* out_data, \ const dtype* max_data, dtype* sum_data, \ const int* input_stride_real, const int* output_stride_real, const int* shape_valid, \ int softmax_axis, int axis_size, int dims){ //! compute data index int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < total_size) { //! compute real data index int output_real_index = 0; for (int i = dims - 1; i >= 0; i--) { if (i == softmax_axis) { continue; } else { int x = idx % shape_valid[i]; output_real_index += x * output_stride_real[i]; idx = idx / shape_valid[i]; } } dtype max_data_cur = max_data[idx]; //dtype *sum_data_cur = &sum_data[idx]; dtype sum_data_cur = 0; //! compute exp and summarize across the softmax axis for (int i = 0; i < axis_size; ++i) { dtype sub_data = in_data[output_real_index] - max_data_cur; sub_data = expf(sub_data); sum_data_cur += sub_data; out_data[output_real_index] = sub_data; output_real_index += output_stride_real[softmax_axis]; } sum_data[idx] = sum_data_cur; } } template <typename dtype> __global__ void softmax_divid_output_kernel(int total_size, dtype* io_data, \ const dtype* sum_data, int inner_num, int outer_num, int axis_size){ //! compute data index int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < total_size) { int idx_inner = idx % inner_num; int idx_outer = (idx / inner_num) * axis_size; dtype sum_data_cur = sum_data[idx]; int real_index = idx_outer * inner_num + idx_inner; //! compute final result for (int i = 0; i < axis_size; ++i) { io_data[real_index] = io_data[real_index] / sum_data_cur; real_index += inner_num; } } } template <typename dtype> __global__ void softmax_divid_output_roi_kernel(int total_size, \ dtype* io_data, const dtype* sum_data, \ const int* input_stride_real, const int* output_stride_real, const int* shape_valid, \ int softmax_axis, int axis_size, int dims){ //! compute data index int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < total_size) { //! compute real data index int output_real_index = 0; for (int i = dims - 1; i >= 0; i--) { if (i == softmax_axis) { continue; } else { int x = idx % shape_valid[i]; output_real_index += x * output_stride_real[i]; idx = idx / shape_valid[i]; } } dtype sum_data_cur = sum_data[idx]; //! compute final result for (int i = 0; i < axis_size; ++i) { io_data[output_real_index] = io_data[output_real_index] / sum_data_cur; output_real_index += output_stride_real[softmax_axis]; } } } extern __shared__ char tile[]; template <typename dtype> __global__ void sharemem_softmax_kernel(int total_size, \ const dtype* in_data, dtype* out_data, \ int inner_num, int outer_num, int axis_size){ //__shared__ dtype data[MAX_AXIS_SIZE][CUDA_NUM_THREADS]; dtype* data = (dtype*)tile + threadIdx.x; //! compute thread index and real data index int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < total_size) { int idx_inner = idx % inner_num; int idx_outer = (idx / inner_num) * axis_size; int blocksize = blockDim.x; int real_index = idx_outer * inner_num + idx_inner; int loop_idx = real_index; //! read all data to sharemem in softmax channel #pragma unroll for (int i = 0; i < axis_size; ++i) { data[i * blocksize] = in_data[loop_idx]; loop_idx += inner_num; } //! get maximum value in softmax channel dtype max_data = data[0]; #pragma unroll for (int i = 1; i < axis_size; ++i) { dtype dt = data[i * blocksize]; if (max_data < dt){ max_data = dt; } } //! subtract then summarize dtype sum = 0; #pragma unroll for (int i = 0; i < axis_size; ++i) { //dtype *dt = &data[i][thread_idx]; dtype *dt = data + i * blocksize; *dt = expf(*dt - max_data); sum += *dt; } //! write back result loop_idx = real_index; #pragma unroll for (int i = 0; i < axis_size; ++i) { out_data[loop_idx] = data[i * blocksize] / sum; loop_idx += inner_num; } } } template <typename dtype> __global__ void sharemem_softmax_roi_kernel(int total_size, \ const dtype* in_data, dtype* out_data, \ const int* input_stride_real, const int* output_stride_real, const int* shape_valid, \ int softmax_axis, int axis_size, int dims){ //__shared__ dtype data[MAX_AXIS_SIZE][CUDA_NUM_THREADS]; dtype* data = (dtype*)tile + threadIdx.x; //! compute thread index and real data index int idx1 = blockIdx.x * blockDim.x + threadIdx.x; int idx = idx1; if (idx < total_size) { int blocksize = blockDim.x; //! compute real data index int input_real_index = 0; int output_real_index = 0; for (int i = dims - 1; i >= 0; i--) { if (i == softmax_axis) { continue; } else { int x = idx % shape_valid[i]; input_real_index += x * input_stride_real[i]; output_real_index += x * output_stride_real[i]; idx = idx / shape_valid[i]; } } //! read all data to sharemem in softmax channel #pragma unroll for (int i = 0; i < axis_size; ++i) { data[i * blocksize] = in_data[input_real_index]; input_real_index += input_stride_real[softmax_axis]; } //! get maximum value in softmax channel dtype max_data = data[0]; #pragma unroll for (int i = 1; i < axis_size; ++i) { dtype dt = data[i * blocksize]; if (max_data < dt){ max_data = dt; } } //! subtract then summarize dtype sum = 0; #pragma unroll for (int i = 0; i < axis_size; ++i) { //dtype *dt = &data[i][thread_idx]; dtype *dt = data + i * blocksize; *dt = expf(*dt - max_data); sum += *dt; } //! write back result #pragma unroll for (int i = 0; i < axis_size; ++i) { out_data[output_real_index] = data[i * blocksize] / sum; output_real_index += output_stride_real[softmax_axis]; } } } template <> SaberStatus SaberSoftmax<NV, AK_FLOAT>::create( const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, SoftmaxParam<NV>& param, Context<NV>& ctx) { //! compute size Shape shape_in = inputs[0]->valid_shape(); Shape shape_out = outputs[0]->valid_shape(); CHECK_EQ(shape_in == shape_out, true) << "valid shapes must be the same"; _outer_num = inputs[0]->count_valid(0, param.axis); _inner_num = inputs[0]->count_valid(param.axis + 1, inputs[0]->dims()); _axis_size = shape_in[param.axis]; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, API::get_device_id()); size_t sharedmem_size = deviceProp.sharedMemPerBlock; _max_dimsize = sharedmem_size / sizeof(float) / CUDA_NUM_THREADS; Shape sh_tmp({1, 1, 1, _outer_num * _inner_num}); if (_axis_size > _max_dimsize){ //! re_alloc device memory _max_data.reshape(sh_tmp); _sum_data.reshape(sh_tmp); } //! CHECK whether the input or output tensor is with continuous buffer or not _is_continue_buf = outputs[0]->is_continue_mem() && inputs[0]->is_continue_mem(); _dims = shape_in.size(); if (!_is_continue_buf) { Shape sh_input_real_stride = inputs[0]->get_stride(); Shape sh_output_real_stride = outputs[0]->get_stride(); //! re_alloc device memory Shape sh({1, 1, 1, _dims}); _valid_shape.reshape(sh); _input_stride.reshape(sh); _output_stride.reshape(sh); CUDA_CHECK(cudaMemcpy(_valid_shape.mutable_data(), inputs[0]->valid_shape().data(), \ sizeof(int) * _dims, cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(_input_stride.mutable_data(), sh_input_real_stride.data(), \ sizeof(int) * _dims, cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(_output_stride.mutable_data(), sh_output_real_stride.data(), \ sizeof(int) * _dims, cudaMemcpyHostToDevice)); } return SaberSuccess; } template <> SaberStatus SaberSoftmax<NV, AK_FLOAT>::init( const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, SoftmaxParam<NV>& param, Context<NV>& ctx) { //! get context this->_ctx = &ctx; return create(inputs, outputs, param, ctx); } template <> SaberStatus SaberSoftmax<NV, AK_FLOAT>::dispatch(\ const std::vector<Tensor<NV> *>& inputs, \ std::vector<Tensor<NV> *>& outputs, \ SoftmaxParam<NV>& param) { cudaStream_t stream = this->_ctx->get_compute_stream(); //! inputs only has one tensor int total_threads = this->_inner_num * this->_outer_num; const float* data_in = (const float* )inputs[0]->data(); float* data_out = (float*)outputs[0]->mutable_data(); float* max_data = (float*)this->_max_data.mutable_data(); float* sum_data = (float*)this->_sum_data.mutable_data(); const int* valid_shape = (const int*)_valid_shape.data(); const int* input_stride = (const int*)_input_stride.data(); const int* output_stride = (const int*)_output_stride.data(); if (_is_continue_buf) { //! softmax kernel without roi if (this->_axis_size <= _max_dimsize){ int sharemem_size = this->_axis_size * CUDA_NUM_THREADS * sizeof(float); sharemem_softmax_kernel<float>\ <<<CUDA_GET_BLOCKS(total_threads), CUDA_NUM_THREADS, sharemem_size, stream>>>( total_threads, data_in, data_out, this->_inner_num, this->_outer_num, this->_axis_size); } else { //! firstly, get maximum data float min_data = std::numeric_limits<float>::min(); softmax_max_kernel<float>\ <<<CUDA_GET_BLOCKS(total_threads), CUDA_NUM_THREADS, 0, stream>>>( total_threads, data_in, max_data, min_data, \ this->_inner_num, this->_outer_num, this->_axis_size); //! then, compute exp and sum data softmax_sub_exp_sum_kernel<float> <<<CUDA_GET_BLOCKS(total_threads), CUDA_NUM_THREADS, 0, stream>>>( total_threads, data_in, data_out, max_data, sum_data, \ this->_inner_num, this->_outer_num, this->_axis_size); //! lastly, compute divided output softmax_divid_output_kernel<float>\ <<<CUDA_GET_BLOCKS(total_threads), CUDA_NUM_THREADS, 0, stream>>>( total_threads, data_out, sum_data, \ this->_inner_num, this->_outer_num, this->_axis_size); } } else { //! softmax kernel with roi if (this->_axis_size <= _max_dimsize){ int sharemem_size = this->_axis_size * CUDA_NUM_THREADS * sizeof(float); sharemem_softmax_roi_kernel<float>\ <<<CUDA_GET_BLOCKS(total_threads), CUDA_NUM_THREADS, sharemem_size, stream>>>( total_threads, data_in, data_out, input_stride, output_stride, valid_shape, \ param.axis, _axis_size, _dims); } else { //! firstly, get maximum data float min_data = std::numeric_limits<float>::min(); softmax_max_roi_kernel<float>\ <<<CUDA_GET_BLOCKS(total_threads), CUDA_NUM_THREADS, 0, stream>>>( total_threads, data_in, max_data, min_data, \ input_stride, output_stride, valid_shape, \ param.axis, _axis_size, _dims); //! then, compute exp and sum data softmax_sub_exp_sum_roi_kernel<float> <<<CUDA_GET_BLOCKS(total_threads), CUDA_NUM_THREADS, 0, stream>>>( total_threads, data_in, data_out, max_data, sum_data, \ input_stride, output_stride, valid_shape, \ param.axis, _axis_size, _dims); //! lastly, compute divided output softmax_divid_output_roi_kernel<float>\ <<<CUDA_GET_BLOCKS(total_threads), CUDA_NUM_THREADS, 0, stream>>>( total_threads, data_out, sum_data, \ input_stride, output_stride, valid_shape, \ param.axis, _axis_size, _dims); } } return SaberSuccess; } // ============================================= int8 template <> SaberStatus SaberSoftmax<NV, AK_INT8>::create( const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, SoftmaxParam<NV>& param, Context<NV>& ctx) { return SaberSuccess; } template <> SaberStatus SaberSoftmax<NV, AK_INT8>::init( const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, SoftmaxParam<NV>& param, Context<NV>& ctx) { this->_ctx = &ctx; return create(inputs, outputs, param, ctx); } template <> SaberStatus SaberSoftmax<NV, AK_INT8>::dispatch( const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, SoftmaxParam<NV>& param) { return SaberSuccess; } template class SaberSoftmax<NV, AK_FLOAT>; template class SaberSoftmax<NV, AK_INT8>; DEFINE_OP_TEMPLATE(SaberSoftmax, SoftmaxParam, NV, AK_HALF); } //namespace anakin } //namespace anakin
the_stack
#if(CUDAGMM_VERSION == 4) #define SWAP(a, b, t) t = (a); a = (b); b = (t) __constant__ CvFastBgGMMParams d_GMMParams; __constant__ CvFastBgGMMData d_GMMData; __constant__ int d_arrImageInfo[ImageInfoCount]; /*====================================================================================*/ // forward declarations /*====================================================================================*/ int InitCUDA(CvFastBgGMM* pGMM); __global__ void cudaUpdateFastBgGMM(unsigned char* data, unsigned char* output); /*====================================================================================*/ /*====================================================================================*/ CvFastBgGMMParams* cvCreateFastBgGMMParams(int width, int height) { CvFastBgGMMParams* pGMMParams = new CvFastBgGMMParams(); int size = width*height; pGMMParams->nWidth = width; pGMMParams->nHeight = height; pGMMParams->nSize = size; pGMMParams->nNBands=3; //always 3 - not implemented for other values! //set parameters // K - max number of Gaussians per pixel pGMMParams->nM = 4; // Tb - the threshold - n var pGMMParams->fTb = 4*4; // Tbf - the threshold pGMMParams->fTB = 0.9f;//1-cf from the paper // Tgenerate - the threshold pGMMParams->fTg = 3.0f*3.0f;//update the mode or generate new pGMMParams->fSigma= 11.0f;//sigma for the new mode // alpha - the learning factor pGMMParams->fAlphaT=0.001f; // complexity reduction prior constant pGMMParams->fCT=0.05f; //shadow // Shadow detection pGMMParams->bShadowDetection = 0;//turn on pGMMParams->fTau = 0.5f;// Tau - shadow threshold pGMMParams->bRemoveForeground = 0; return pGMMParams; } /*====================================================================================*/ /*====================================================================================*/ template <bool toPinned> void copyImageData(IplImage* h_img, unsigned char* d_pinnedMem, int channels) { if(h_img->widthStep == channels*h_img->width) { memcpy( toPinned ? (d_pinnedMem) : (unsigned char*)(h_img->imageData), toPinned ? (unsigned char*)(h_img->imageData) : (d_pinnedMem), h_img->widthStep*h_img->height); } else { unsigned char* d_curData = d_pinnedMem; if(toPinned) { for(int i = 0; i < h_img->height; ++i) { memcpy( d_curData, &CV_IMAGE_ELEM(h_img, unsigned char, i, 0), channels*h_img->width); d_curData += (channels*h_img->width); } } else { for(int i = 0; i < h_img->height; ++i) { memcpy( &CV_IMAGE_ELEM(h_img, unsigned char, i, 0), d_curData, channels*h_img->width); d_curData += (channels*h_img->width); } } } } /*====================================================================================*/ /*====================================================================================*/ CvFastBgGMM* cvCreateFastBgGMM(CvFastBgGMMParams* pGMMParams, IplImage* frame0) { CvFastBgGMM* h_pGMMRet = new CvFastBgGMM(); if(InitCUDA(h_pGMMRet) < 0) { delete h_pGMMRet; return NULL; } CvFastBgGMMData* h_pGMMData = new CvFastBgGMMData(); // allocate device global memory int iElemCount = pGMMParams->nSize * pGMMParams->nM * sizeof(float); int iSizeCount = pGMMParams->nSize * sizeof(int); CUDAGMM_SAFE_CALL(cudaMalloc((void**)&(h_pGMMData->ucGaussian), 4*iElemCount)); CUDAGMM_SAFE_CALL(cudaMalloc((void**)&(h_pGMMData->rWeight), iElemCount)); CUDAGMM_SAFE_CALL(cudaMalloc((void**)&(h_pGMMData->rnUsedModes), iSizeCount)); CUDAGMM_SAFE_CALL(cudaMemset(h_pGMMData->rnUsedModes, 0, iSizeCount)); CUDAGMM_SAFE_CALL(cudaMemcpyToSymbol(d_GMMData, h_pGMMData, sizeof(CvFastBgGMMData), 0, cudaMemcpyHostToDevice)); h_pGMMRet->internal_data = h_pGMMData; // we will use 4-channels image as input data! h_pGMMRet->inputFrame = cvCreateImage(cvSize(pGMMParams->nWidth, pGMMParams->nHeight), IPL_DEPTH_8U, 4); h_pGMMRet->nInputImgSize = 4 * frame0->width * pGMMParams->nHeight; CUDAGMM_SAFE_CALL(cudaMalloc((void**)&(h_pGMMRet->d_inputImg), h_pGMMRet->nInputImgSize)); h_pGMMRet->h_outputImg = cvCreateImage(cvSize(pGMMParams->nWidth, pGMMParams->nHeight), IPL_DEPTH_8U, 1); h_pGMMRet->nOutputImgSize = pGMMParams->nWidth * pGMMParams->nHeight; CUDAGMM_SAFE_CALL(cudaMalloc((void**)&(h_pGMMRet->d_outputImg), h_pGMMRet->nOutputImgSize)); // d_arrImageInfo constant (device mem.) int inpPixelCnt = pGMMParams->nWidth * pGMMParams->nHeight; // number of pixels per thread must be 4k, i.e. 4, 8, 12, 16, 20... int iPixelsPerThread = min(32, (int)ceil(inpPixelCnt *1.0 / (h_pGMMRet->nBlocksPerGrid * h_pGMMRet->nThreadsPerBlock))); iPixelsPerThread = 4*(int)ceil(iPixelsPerThread/4.0f); h_pGMMRet->nBlocksPerGrid = (int)ceil(inpPixelCnt*1.0 / ((h_pGMMRet->nThreadsPerBlock) * iPixelsPerThread)); printf("%d pixels/thread, %d threads/block, %d blocks\r\n", iPixelsPerThread, h_pGMMRet->nThreadsPerBlock, h_pGMMRet->nBlocksPerGrid); int arrImgInfo[ImageInfoCount] = { inpPixelCnt, iPixelsPerThread }; CUDAGMM_SAFE_CALL(cudaMemcpyToSymbol(d_arrImageInfo, arrImgInfo, ImageInfoCount*sizeof(int), 0, cudaMemcpyHostToDevice)); CUDAGMM_SAFE_CALL(cudaStreamCreate(&(h_pGMMRet->copyStream))); CUDAGMM_SAFE_CALL(cudaStreamCreate(&(h_pGMMRet->execStream))); // cudaHostAllocMapped might fail CUDAGMM_SAFE_CALL(cudaHostAlloc((void**)&(h_pGMMRet->h_pinnedIn), h_pGMMRet->nInputImgSize, cudaHostAllocWriteCombined)); CUDAGMM_SAFE_CALL(cudaHostAlloc((void**)&(h_pGMMRet->h_pinnedOut), h_pGMMRet->nOutputImgSize, cudaHostAllocDefault)); CUDAGMM_SAFE_CALL(cudaMalloc((void**)&(h_pGMMRet->d_inputImg2), h_pGMMRet->nInputImgSize)); CUDAGMM_SAFE_CALL(cudaMalloc((void**)&(h_pGMMRet->d_outputImg2), h_pGMMRet->nOutputImgSize)); // copy the algorithm parameters to Constant memory pGMMParams->fPrune = -(pGMMParams->fAlphaT) * (pGMMParams->fCT); CUDAGMM_SAFE_CALL(cudaMemcpyToSymbol(d_GMMParams, pGMMParams, sizeof(CvFastBgGMMParams), 0, cudaMemcpyHostToDevice)); // setup the initial state for asynchronous execution cvCvtColor(frame0, h_pGMMRet->inputFrame, CV_BGR2BGRA); copyImageData<true>(h_pGMMRet->inputFrame, h_pGMMRet->h_pinnedIn, 4); CUDAGMM_SAFE_CALL(cudaMemcpy(h_pGMMRet->d_inputImg2, h_pGMMRet->h_pinnedIn, h_pGMMRet->nInputImgSize, cudaMemcpyHostToDevice)); cudaUpdateFastBgGMM<<< (h_pGMMRet->nBlocksPerGrid), (h_pGMMRet->nThreadsPerBlock), 4, h_pGMMRet->execStream >>> ( h_pGMMRet->d_inputImg2, h_pGMMRet->d_outputImg2 ); CUDAGMM_SAFE_CALL(cudaMemcpyAsync(h_pGMMRet->d_inputImg, h_pGMMRet->h_pinnedIn, h_pGMMRet->nInputImgSize, cudaMemcpyHostToDevice, h_pGMMRet->copyStream)); CUDAGMM_SAFE_CALL(cudaStreamSynchronize(h_pGMMRet->execStream)); CUDAGMM_SAFE_CALL(cudaMemcpy(h_pGMMRet->h_pinnedOut, h_pGMMRet->d_outputImg2, h_pGMMRet->nOutputImgSize, cudaMemcpyDeviceToHost)); CUDAGMM_SAFE_CALL(cudaStreamSynchronize(h_pGMMRet->copyStream)); cudaUpdateFastBgGMM<<< (h_pGMMRet->nBlocksPerGrid), (h_pGMMRet->nThreadsPerBlock), 4, h_pGMMRet->execStream >>> ( h_pGMMRet->d_inputImg, h_pGMMRet->d_outputImg ); CUDAGMM_SAFE_CALL(cudaMemcpyAsync(h_pGMMRet->d_inputImg2, h_pGMMRet->h_pinnedIn, h_pGMMRet->nInputImgSize, cudaMemcpyHostToDevice, h_pGMMRet->copyStream)); return h_pGMMRet; } /*====================================================================================*/ /*====================================================================================*/ void cvReleaseFastBgGMM(CvFastBgGMM** h_ppGMM) { CvFastBgGMM* h_pGMM = *h_ppGMM; cvReleaseImage(&(h_pGMM->h_outputImg)); cvReleaseImage(&(h_pGMM->inputFrame)); CUDAGMM_SAFE_CALL( cudaStreamSynchronize(h_pGMM->copyStream)); CUDAGMM_SAFE_CALL( cudaStreamSynchronize(h_pGMM->execStream)); CUDAGMM_SAFE_CALL( cudaFree(h_pGMM->d_inputImg)); CUDAGMM_SAFE_CALL( cudaFree(h_pGMM->d_outputImg)); CUDAGMM_SAFE_CALL( cudaFree(h_pGMM->d_inputImg2)); CUDAGMM_SAFE_CALL( cudaFree(h_pGMM->d_outputImg2)); CUDAGMM_SAFE_CALL( cudaFreeHost(h_pGMM->h_pinnedIn)); CUDAGMM_SAFE_CALL( cudaFreeHost(h_pGMM->h_pinnedOut)); CUDAGMM_SAFE_CALL( cudaStreamDestroy(h_pGMM->copyStream)); CUDAGMM_SAFE_CALL( cudaStreamDestroy(h_pGMM->execStream)); CvFastBgGMMData* h_pGMMData = h_pGMM->internal_data; CUDAGMM_SAFE_CALL( cudaFree(h_pGMMData->ucGaussian)); CUDAGMM_SAFE_CALL( cudaFree(h_pGMMData->rWeight)); CUDAGMM_SAFE_CALL( cudaFree(h_pGMMData->rnUsedModes)); delete h_pGMM->internal_data; delete h_pGMM; (*h_ppGMM) = 0; } /*====================================================================================*/ /*====================================================================================*/ void cvUpdateFastBgGMM(CvFastBgGMM* pGMM, IplImage* inputImg) { cvCvtColor(inputImg, pGMM->inputFrame, CV_BGR2BGRA); CUDAGMM_SAFE_CALL(cudaStreamSynchronize(pGMM->copyStream)); copyImageData<true>(pGMM->inputFrame, pGMM->h_pinnedIn, 4); copyImageData<false>(pGMM->h_outputImg, pGMM->h_pinnedOut, 1); CUDAGMM_SAFE_CALL(cudaStreamSynchronize(pGMM->execStream)); unsigned char* pTmp; SWAP(pGMM->d_inputImg, pGMM->d_inputImg2, pTmp); SWAP(pGMM->d_outputImg, pGMM->d_outputImg2, pTmp); CUDAGMM_SAFE_CALL(cudaMemcpyAsync(pGMM->d_inputImg, pGMM->h_pinnedIn, pGMM->nInputImgSize, cudaMemcpyHostToDevice, pGMM->copyStream)); CUDAGMM_SAFE_CALL(cudaMemcpyAsync(pGMM->h_pinnedOut, pGMM->d_outputImg, pGMM->nOutputImgSize, cudaMemcpyDeviceToHost, pGMM->copyStream)); cudaUpdateFastBgGMM<<< (pGMM->nBlocksPerGrid), (pGMM->nThreadsPerBlock), 4, pGMM->execStream >>> ( pGMM->d_inputImg2, pGMM->d_outputImg2 ); #ifdef _DEBUG cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) { printf("CUDA error: %d: %s\r\n", error, cudaGetErrorString(error)); } #endif } float cvUpdateFastBgGMMTimer(CvFastBgGMM* pGMM, IplImage* inputImg) { cudaEvent_t start, stop; float time = 0; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord( start, 0 ); cvUpdateFastBgGMM(pGMM, inputImg); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &time, start, stop ); cudaEventDestroy( start ); cudaEventDestroy( stop ); return time; } /*============================================================================*/ // CUDA-related functions /*============================================================================*/ int InitCUDA(CvFastBgGMM* pGMM) { #if __DEVICE_EMULATION__ pGMM->nThreadsPerBlock = pGMM->nBlocksPerGrid = 256; return 0; #else int count = 0; int i = 0; cudaGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return -1; } for(i = 0; i < count; i++) { cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) { if(prop.major >= 1) { pGMM->nThreadsPerBlock = prop.maxThreadsPerBlock / 4; // temporarily hard-code a little here... pGMM->nBlocksPerGrid = 256; break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA.\n"); return -1; } cudaSetDevice(i); return i; #endif } /*=======================================================================================*/ /*=======================================================================================*/ __device__ int _cudaUpdateFastBgGMM(int pixel, float red, float green, float blue, int* pModesUsed ) { //calculate distances to the modes (+ sort???) //here we need to go in descending order!!! int pos; bool bFitsPDF = 0; int bBackground = 0; float m_fOneMinAlpha = 1 - d_GMMParams.fAlphaT; int nModes = (*pModesUsed); float weight, totalWeight = 0.0f; float dR, dG, dB; float dist, k, sigmanew; //go through all modes for (int iModes = 0; iModes < nModes; iModes++) { pos = pixel + iModes*d_arrImageInfo[ImageInfoPixelCount]; weight = d_GMMData.rWeight[pos]; //fit not found yet if (!bFitsPDF) { //check if it belongs to some of the modes //calculate distance float4 cGauss = d_GMMData.ucGaussian[pos]; dR = cGauss.x - red; dG = cGauss.y - green; dB = cGauss.z - blue; //check if it fits the current mode (Factor * sigma) //square distance -slower and less accurate //float maxDistance = cvSqrt(m_fTg*var); //if ((fabs(dR) <= maxDistance) && (fabs(dG) <= maxDistance) && (fabs(dB) <= maxDistance)) //circle dist = dR*dR + dG*dG + dB*dB; //background? - m_fTb if ((totalWeight < d_GMMParams.fTB) && (dist < d_GMMParams.fTb * cGauss.w)) bBackground = 1; //check fit if (dist < d_GMMParams.fTg * cGauss.w) { //belongs to the mode bFitsPDF = 1; //update distribution k = d_GMMParams.fAlphaT/weight; weight = m_fOneMinAlpha * weight + d_GMMParams.fPrune; weight += d_GMMParams.fAlphaT; cGauss.x -= k*(dR); cGauss.y -= k*(dG); cGauss.z -= k*(dB); //limit update speed for cov matrice //not needed sigmanew = cGauss.w + k*(dist - cGauss.w); //limit the variance cGauss.w = sigmanew < 4 ? 4 : sigmanew > 5 * d_GMMParams.fSigma ? 5 * d_GMMParams.fSigma : sigmanew; d_GMMData.ucGaussian[pos] = cGauss; //sort //all other weights are at the same place and //only the matched (iModes) is higher -> just find the new place for it for (int iLocal = iModes; iLocal > 0; iLocal--) { int posLocal = pixel + iLocal*d_arrImageInfo[ImageInfoPixelCount]; if (weight < (d_GMMData.rWeight[posLocal-d_arrImageInfo[ImageInfoPixelCount]])) { break; } else { //swap float tmpVal; float4 tmpuChar; SWAP(d_GMMData.ucGaussian[posLocal], d_GMMData.ucGaussian[posLocal - d_arrImageInfo[ImageInfoPixelCount]], tmpuChar); SWAP(d_GMMData.rWeight[posLocal], d_GMMData.rWeight[posLocal - d_arrImageInfo[ImageInfoPixelCount]], tmpVal); } } //belongs to the mode } else { weight = m_fOneMinAlpha * weight + d_GMMParams.fPrune; //check prune if (weight < -(d_GMMParams.fPrune)) { weight = 0.0f; nModes--; // bPrune=1; //break;//the components are sorted so we can skip the rest } } //check if it fits the current mode (2.5 sigma) /////// } //fit not found yet else { weight = m_fOneMinAlpha * weight + d_GMMParams.fPrune; if (weight < -(d_GMMParams.fPrune)) { weight=0.0; nModes--; //bPrune=1; //break;//the components are sorted so we can skip the rest } } totalWeight += weight; d_GMMData.rWeight[pos] = weight; } //go through all modes ////// //renormalize weights for (int iLocal = 0; iLocal < nModes; iLocal++) { d_GMMData.rWeight[pixel + iLocal*d_arrImageInfo[ImageInfoPixelCount]] /= totalWeight; } //make new mode if needed and exit if (!bFitsPDF) { if (nModes == d_GMMParams.nM) { //replace the weakest } else { //add a new one //totalWeight+=m_fAlphaT; //pos++; nModes++; } pos = pixel + (nModes-1)*d_arrImageInfo[ImageInfoPixelCount]; if (nModes == 1) d_GMMData.rWeight[pos] = 1; else d_GMMData.rWeight[pos] = d_GMMParams.fAlphaT; //renormalize weights for (int iLocal = 0; iLocal < nModes-1; iLocal++) { d_GMMData.rWeight[pixel + iLocal*d_arrImageInfo[ImageInfoPixelCount]] *= m_fOneMinAlpha; } float4 cGauss; cGauss.x = red; cGauss.y = green; cGauss.z = blue; cGauss.w = d_GMMParams.fSigma; d_GMMData.ucGaussian[pos] = cGauss; //sort //find the new place for it for (int iLocal = nModes - 1; iLocal>0; iLocal--) { int posLocal = pixel + iLocal*d_arrImageInfo[ImageInfoPixelCount]; if (d_GMMParams.fAlphaT < (d_GMMData.rWeight[posLocal - d_arrImageInfo[ImageInfoPixelCount]])) { break; } else { //swap float4 tmpuChar; float tmpVal; SWAP(d_GMMData.ucGaussian[posLocal], d_GMMData.ucGaussian[posLocal - d_arrImageInfo[ImageInfoPixelCount]], tmpuChar); SWAP(d_GMMData.rWeight[posLocal], d_GMMData.rWeight[posLocal - d_arrImageInfo[ImageInfoPixelCount]], tmpVal); } } } //set the number of modes *pModesUsed=nModes; return bBackground; } /*=======================================================================================*/ /*=======================================================================================*/ __device__ int _cudaRemoveShadowGMM(int pixel, float red, float green, float blue, int nModes) { //calculate distances to the modes (+ sort???) //here we need to go in descending order!!! // long posPixel = pixel * m_nM; int pos; float tWeight = 0; float numerator, denominator; // check all the distributions, marked as background: for (int iModes=0;iModes<nModes;iModes++) { pos=pixel+iModes*d_arrImageInfo[ImageInfoPixelCount]; float4 cGauss = d_GMMData.ucGaussian[pos]; float weight = d_GMMData.rWeight[pos]; tWeight += weight; numerator = red * cGauss.x + green * cGauss.y + blue * cGauss.z; denominator = cGauss.x * cGauss.x + cGauss.y * cGauss.y + cGauss.z * cGauss.z; // no division by zero allowed if (denominator == 0) { break; } float a = numerator / denominator; // if tau < a < 1 then also check the color distortion if ((a <= 1) && (a >= d_GMMParams.fTau))//m_nBeta=1 { float dR=a * cGauss.x - red; float dG=a * cGauss.y - green; float dB=a * cGauss.z - blue; //square distance -slower and less accurate //float maxDistance = cvSqrt(m_fTb*var); //if ((fabs(dR) <= maxDistance) && (fabs(dG) <= maxDistance) && (fabs(dB) <= maxDistance)) //circle float dist=(dR*dR+dG*dG+dB*dB); if (dist<d_GMMParams.fTb*cGauss.w*a*a) { return 2; } } if (tWeight > d_GMMParams.fTB) { break; } } return 0; } /*=======================================================================================*/ /*=======================================================================================*/ __device__ void _cudaReplacePixelBackgroundGMM(int pixel, uchar4* pData) { uchar4 tmp; float4 cGauss = d_GMMData.ucGaussian[pixel]; tmp.z = (unsigned char) cGauss.x; tmp.y = (unsigned char) cGauss.y; tmp.x = (unsigned char) cGauss.z; (*pData) = tmp; } /*=======================================================================================*/ /*=======================================================================================*/ extern __shared__ int sharedInfo[]; __global__ void cudaUpdateFastBgGMM(unsigned char* data, unsigned char* output) { if(threadIdx.x == 0) { // the start pixel for current block sharedInfo[0] = (blockIdx.x * blockDim.x)*d_arrImageInfo[ImageInfoPixelsPerThread]; } __syncthreads(); int iPxStart = sharedInfo[0] + threadIdx.x; int iPxEnd = min( d_arrImageInfo[ImageInfoPixelCount], sharedInfo[0] + (blockDim.x * d_arrImageInfo[ImageInfoPixelsPerThread])); uchar4* pGlobalInput = ((uchar4*)data) + iPxStart; unsigned char* pGlobalOutput = output + iPxStart; int* pUsedModes = d_GMMData.rnUsedModes + iPxStart; uchar fRed, fGreen, fBlue; uchar4 currentInputPx; for(int i = iPxStart; i < iPxEnd; i += blockDim.x) { // retrieves the color currentInputPx = *pGlobalInput; fBlue = currentInputPx.x; fGreen = currentInputPx.y; fRed = currentInputPx.z; pGlobalInput += blockDim.x; // update model + background subtract int result = _cudaUpdateFastBgGMM(i, fRed, fGreen, fBlue, pUsedModes); int nMLocal = *pUsedModes; pUsedModes += blockDim.x; if (d_GMMParams.bShadowDetection) { if (!result) { result= _cudaRemoveShadowGMM(i, fRed, fGreen, fBlue, nMLocal); } } switch (result) { case 0: //foreground (*pGlobalOutput) = 255; if (d_GMMParams.bRemoveForeground) { _cudaReplacePixelBackgroundGMM(i, pGlobalInput-blockDim.x); } break; case 1: //background (*pGlobalOutput) = 0; break; case 2: //shadow (*pGlobalOutput) = 128; if (d_GMMParams.bRemoveForeground) { _cudaReplacePixelBackgroundGMM(i, pGlobalInput-blockDim.x); } break; } pGlobalOutput += blockDim.x; } } #endif
the_stack
// ----------------------------------------------------------------------------------------- // NVEnc by rigaya // ----------------------------------------------------------------------------------------- // // The MIT License // // Copyright (c) 2014-2016 rigaya // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // // ------------------------------------------------------------------------------------------ #include <map> #include "convert_csp.h" #include "NVEncFilterYadif.h" #include "NVEncParam.h" template<typename T> __inline__ __device__ T max3(T a, T b, T c) { return max(max(a, b), c); } template<typename T> __inline__ __device__ T min3(T a, T b, T c) { return min(min(a, b), c); } template<typename TypePixel> __inline__ __device__ int spatial( cudaTextureObject_t tex1, const float gIdXf, const float gIdYf ) { int ym1[7], yp1[7]; #pragma unroll for (int ix = -3; ix <= 3; ix++) { ym1[ix+3] = (int)tex2D<TypePixel>(tex1, gIdXf + ix, gIdYf - 1.0f); yp1[ix+3] = (int)tex2D<TypePixel>(tex1, gIdXf + ix, gIdYf + 1.0f); } const int score[5] = { abs(ym1[2] - yp1[2]) + abs(ym1[3] - yp1[3]) + abs(ym1[4] - yp1[4]), abs(ym1[1] - yp1[3]) + abs(ym1[2] - yp1[4]) + abs(ym1[3] - yp1[5]), abs(ym1[0] - yp1[4]) + abs(ym1[1] - yp1[5]) + abs(ym1[2] - yp1[6]), abs(ym1[3] - yp1[1]) + abs(ym1[4] - yp1[2]) + abs(ym1[5] - yp1[3]), abs(ym1[4] - yp1[0]) + abs(ym1[5] - yp1[1]) + abs(ym1[6] - yp1[2]) }; int minscore = score[0]; int minidx = 0; if (score[1] < minscore) { minscore = score[1]; minidx = 1; if (score[2] < minscore) { minscore = score[2]; minidx = 2; } } if (score[3] < minscore) { minscore = score[3]; minidx = 3; if (score[4] < minscore) { minscore = score[4]; minidx = 4; } } switch (minidx) { case 0: return (ym1[3] + yp1[3]) >> 1; case 1: return (ym1[2] + yp1[4]) >> 1; case 2: return (ym1[1] + yp1[5]) >> 1; case 3: return (ym1[4] + yp1[2]) >> 1; case 4: default:return (ym1[5] + yp1[1]) >> 1; } } template<typename TypePixel> __device__ int temporal( cudaTextureObject_t tex0, cudaTextureObject_t tex01, cudaTextureObject_t tex1, cudaTextureObject_t tex12, cudaTextureObject_t tex2, const int valSpatial, const float gIdXf, const float gIdYf ) { const int t00m1 = (int)tex2D<TypePixel>(tex0, gIdXf, gIdYf - 1.0f); const int t00p1 = (int)tex2D<TypePixel>(tex0, gIdXf, gIdYf + 1.0f); const int t01m2 = (int)tex2D<TypePixel>(tex01, gIdXf, gIdYf - 2.0f); const int t01_0 = (int)tex2D<TypePixel>(tex01, gIdXf, gIdYf + 0.0f); const int t01p2 = (int)tex2D<TypePixel>(tex01, gIdXf, gIdYf + 2.0f); const int t10m1 = (int)tex2D<TypePixel>(tex1, gIdXf, gIdYf - 1.0f); const int t10p1 = (int)tex2D<TypePixel>(tex1, gIdXf, gIdYf + 1.0f); const int t12m2 = (int)tex2D<TypePixel>(tex12, gIdXf, gIdYf - 2.0f); const int t12_0 = (int)tex2D<TypePixel>(tex12, gIdXf, gIdYf + 0.0f); const int t12p2 = (int)tex2D<TypePixel>(tex12, gIdXf, gIdYf + 2.0f); const int t20m1 = (int)tex2D<TypePixel>(tex2, gIdXf, gIdYf - 1.0f); const int t20p1 = (int)tex2D<TypePixel>(tex2, gIdXf, gIdYf + 1.0f); const int tm2 = (t01m2 + t12m2) >> 1; const int t_0 = (t01_0 + t12_0) >> 1; const int tp2 = (t01p2 + t12p2) >> 1; int diff = max3( abs(t01_0 - t12_0), (abs(t00m1 - t10m1) + abs(t00p1 - t10p1)) >> 1, (abs(t20m1 - t10m1) + abs(t10p1 - t20p1)) >> 1); diff = max3(diff, -max3(t_0 - t10p1, t_0 - t10m1, min(tm2 - t10m1, tp2 - t10p1)), min3(t_0 - t10p1, t_0 - t10m1, max(tm2 - t10m1, tp2 - t10p1))); return max(min(valSpatial, t_0 + diff), t_0 - diff); } template<typename TypePixel, int bit_depth, int BLOCK_X, int BLOCK_Y> __global__ void kernel_yadif( TypePixel *ptrDst, cudaTextureObject_t tex0, cudaTextureObject_t tex1, cudaTextureObject_t tex2, const int dstPitch, const int dstWidth, const int dstHeight, const int srcWidth, const int srcHeight, const YadifTargetField targetField, const RGY_PICSTRUCT picstruct) { const int gIdX = blockIdx.x * BLOCK_X + threadIdx.x; const int gIdY = blockIdx.y * BLOCK_Y + threadIdx.y; if (gIdX < dstWidth && gIdY < dstHeight) { const float gIdXf = gIdX + 0.5f; const float gIdYf = gIdY + 0.5f; TypePixel ret; if ((gIdY & 1) != targetField) { ret = tex2D<TypePixel>(tex1, gIdXf, gIdYf); } else { const int valSpatial = spatial<TypePixel>(tex1, gIdXf, gIdYf); const bool field2nd = ((targetField==YADIF_GEN_FIELD_TOP) == (((uint32_t)picstruct & (uint32_t)RGY_PICSTRUCT_TFF) != 0)); cudaTextureObject_t tex01 = field2nd ? tex1 : tex0; cudaTextureObject_t tex12 = field2nd ? tex2 : tex1; ret = (TypePixel)clamp( temporal<TypePixel>(tex0, tex01, tex1, tex12, tex2, valSpatial, gIdXf, gIdYf), 0, ((1<<bit_depth)-1)); } *(TypePixel *)((uint8_t *)ptrDst + gIdY * dstPitch + gIdX * sizeof(TypePixel)) = ret; } } template<typename TypePixel> cudaError_t setTexFieldYadif(cudaTextureObject_t& texSrc, const RGYFrameInfo *pFrame) { texSrc = 0; cudaResourceDesc resDescSrc; memset(&resDescSrc, 0, sizeof(resDescSrc)); resDescSrc.resType = cudaResourceTypePitch2D; resDescSrc.res.pitch2D.desc = cudaCreateChannelDesc<TypePixel>(); resDescSrc.res.pitch2D.pitchInBytes = pFrame->pitch; resDescSrc.res.pitch2D.width = pFrame->width; resDescSrc.res.pitch2D.height = pFrame->height; resDescSrc.res.pitch2D.devPtr = (uint8_t *)pFrame->ptr; cudaTextureDesc texDescSrc; memset(&texDescSrc, 0, sizeof(texDescSrc)); texDescSrc.addressMode[0] = cudaAddressModeWrap; texDescSrc.addressMode[1] = cudaAddressModeWrap; texDescSrc.filterMode = cudaFilterModePoint; texDescSrc.readMode = cudaReadModeElementType; texDescSrc.normalizedCoords = 0; return cudaCreateTextureObject(&texSrc, &resDescSrc, &texDescSrc, nullptr); } template<typename TypePixel, int bit_depth> cudaError_t run_yadif(RGYFrameInfo *pOutputPlane, const RGYFrameInfo *pSrc0, const RGYFrameInfo *pSrc1, const RGYFrameInfo *pSrc2, const YadifTargetField targetField, const RGY_PICSTRUCT picstruct, cudaStream_t stream) { cudaTextureObject_t texSrc0 = 0; cudaTextureObject_t texSrc1 = 0; cudaTextureObject_t texSrc2 = 0; auto cudaerr = cudaSuccess; if ( (cudaerr = setTexFieldYadif<TypePixel>(texSrc0, pSrc0)) != cudaSuccess || (cudaerr = setTexFieldYadif<TypePixel>(texSrc1, pSrc1)) != cudaSuccess || (cudaerr = setTexFieldYadif<TypePixel>(texSrc2, pSrc2)) != cudaSuccess) { return cudaerr; } static const int YADIF_BLOCK_X = 32; static const int YADIF_BLOCK_Y = 8; dim3 blockSize(YADIF_BLOCK_X, YADIF_BLOCK_Y); dim3 gridSize(divCeil(pOutputPlane->width, blockSize.x), divCeil(pOutputPlane->height, blockSize.y)); kernel_yadif<TypePixel, bit_depth, YADIF_BLOCK_X, YADIF_BLOCK_Y><<<gridSize, blockSize, 0, stream>>>( (TypePixel * )pOutputPlane->ptr, texSrc0, texSrc1, texSrc2, pOutputPlane->pitch, pOutputPlane->width, pOutputPlane->height, pSrc1->width, pSrc1->height, targetField, picstruct); cudaerr = cudaGetLastError(); cudaDestroyTextureObject(texSrc0); cudaDestroyTextureObject(texSrc1); cudaDestroyTextureObject(texSrc2); return cudaerr; } template<typename TypePixel, int bit_depth> cudaError_t run_yadif_frame(RGYFrameInfo *pOutputFrame, const RGYFrameInfo *pSrc0, const RGYFrameInfo *pSrc1, const RGYFrameInfo *pSrc2, const YadifTargetField targetField, const RGY_PICSTRUCT picstruct, cudaStream_t stream) { const auto planeSrc0Y = getPlane(pSrc0, RGY_PLANE_Y); const auto planeSrc0U = getPlane(pSrc0, RGY_PLANE_U); const auto planeSrc0V = getPlane(pSrc0, RGY_PLANE_V); const auto planeSrc1Y = getPlane(pSrc1, RGY_PLANE_Y); const auto planeSrc1U = getPlane(pSrc1, RGY_PLANE_U); const auto planeSrc1V = getPlane(pSrc1, RGY_PLANE_V); const auto planeSrc2Y = getPlane(pSrc2, RGY_PLANE_Y); const auto planeSrc2U = getPlane(pSrc2, RGY_PLANE_U); const auto planeSrc2V = getPlane(pSrc2, RGY_PLANE_V); auto planeOutputY = getPlane(pOutputFrame, RGY_PLANE_Y); auto planeOutputU = getPlane(pOutputFrame, RGY_PLANE_U); auto planeOutputV = getPlane(pOutputFrame, RGY_PLANE_V); auto cudaerr = run_yadif<TypePixel, bit_depth>(&planeOutputY, &planeSrc0Y, &planeSrc1Y, &planeSrc2Y, targetField, picstruct, stream); if (cudaerr != cudaSuccess) { return cudaerr; } cudaerr = run_yadif<TypePixel, bit_depth>(&planeOutputU, &planeSrc0U, &planeSrc1U, &planeSrc2U, targetField, picstruct, stream); if (cudaerr != cudaSuccess) { return cudaerr; } cudaerr = run_yadif<TypePixel, bit_depth>(&planeOutputV, &planeSrc0V, &planeSrc1V, &planeSrc2V, targetField, picstruct, stream); if (cudaerr != cudaSuccess) { return cudaerr; } return cudaerr; } NVEncFilterYadifSource::NVEncFilterYadifSource() : m_nFramesInput(0), m_nFramesOutput(0), m_buf() { } NVEncFilterYadifSource::~NVEncFilterYadifSource() { clear(); } void NVEncFilterYadifSource::clear() { for (auto& buf : m_buf) { buf.clear(); } m_nFramesInput = 0; m_nFramesOutput = 0; } cudaError_t NVEncFilterYadifSource::alloc(const RGYFrameInfo& frameInfo) { if (!cmpFrameInfoCspResolution(&m_buf.begin()->frame, &frameInfo)) { //すべて確保されているか確認 bool allocated = true; for (auto& buf : m_buf) { if (buf.frame.ptr == nullptr) { allocated = false; break; } } if (allocated) { return cudaSuccess; } } for (auto& buf : m_buf) { auto ret = buf.alloc(frameInfo.width, frameInfo.height, frameInfo.csp); if (ret != cudaSuccess) { buf.clear(); return ret; } } return cudaSuccess; } cudaError_t NVEncFilterYadifSource::add(const RGYFrameInfo *pInputFrame, cudaStream_t stream) { const int iframe = m_nFramesInput++; auto pDstFrame = get(iframe); copyFrameProp(&pDstFrame->frame, pInputFrame); return copyFrameAsync(&pDstFrame->frame, pInputFrame, stream); } NVEncFilterYadif::NVEncFilterYadif() : m_nFrame(0), m_pts(0), m_source() { m_sFilterName = _T("yadif"); } NVEncFilterYadif::~NVEncFilterYadif() { close(); } RGY_ERR NVEncFilterYadif::check_param(shared_ptr<NVEncFilterParamYadif> pAfsParam) { if (pAfsParam->frameOut.height <= 0 || pAfsParam->frameOut.width <= 0) { AddMessage(RGY_LOG_ERROR, _T("Invalid parameter.\n")); return RGY_ERR_INVALID_PARAM; } if (pAfsParam->yadif.mode >= VPP_YADIF_MODE_MAX) { AddMessage(RGY_LOG_ERROR, _T("Invalid parameter (mode).\n")); return RGY_ERR_INVALID_PARAM; } return RGY_ERR_NONE; } RGY_ERR NVEncFilterYadif::init(shared_ptr<NVEncFilterParam> pParam, shared_ptr<RGYLog> pPrintMes) { RGY_ERR sts = RGY_ERR_NONE; m_pPrintMes = pPrintMes; auto prmYadif = std::dynamic_pointer_cast<NVEncFilterParamYadif>(pParam); if (!prmYadif) { AddMessage(RGY_LOG_ERROR, _T("Invalid parameter type.\n")); return RGY_ERR_INVALID_PARAM; } //パラメータチェック if (check_param(prmYadif) != RGY_ERR_NONE) { return RGY_ERR_INVALID_PARAM; } auto cudaerr = AllocFrameBuf(prmYadif->frameOut, (prmYadif->yadif.mode & VPP_YADIF_MODE_BOB) ? 2 : 1); if (cudaerr != cudaSuccess) { AddMessage(RGY_LOG_ERROR, _T("failed to allocate memory: %s.\n"), char_to_tstring(cudaGetErrorName(cudaerr)).c_str()); return RGY_ERR_MEMORY_ALLOC; } prmYadif->frameOut.pitch = m_pFrameBuf[0]->frame.pitch; AddMessage(RGY_LOG_DEBUG, _T("allocated output buffer: %dx%pixym1[3], pitch %pixym1[3], %s.\n"), m_pFrameBuf[0]->frame.width, m_pFrameBuf[0]->frame.height, m_pFrameBuf[0]->frame.pitch, RGY_CSP_NAMES[m_pFrameBuf[0]->frame.csp]); cudaerr = m_source.alloc(prmYadif->frameOut); if (cudaerr != cudaSuccess) { AddMessage(RGY_LOG_ERROR, _T("failed to allocate memory: %s.\n"), char_to_tstring(cudaGetErrorName(cudaerr)).c_str()); return RGY_ERR_MEMORY_ALLOC; } prmYadif->frameOut.picstruct = RGY_PICSTRUCT_FRAME; m_nFrame = 0; m_pts = 0; m_nPathThrough &= (~(FILTER_PATHTHROUGH_PICSTRUCT | FILTER_PATHTHROUGH_FLAGS | FILTER_PATHTHROUGH_TIMESTAMP)); if (prmYadif->yadif.mode & VPP_YADIF_MODE_BOB) { prmYadif->baseFps *= 2; } setFilterInfo(pParam->print()); m_pParam = pParam; return sts; } tstring NVEncFilterParamYadif::print() const { return yadif.print(); } RGY_ERR NVEncFilterYadif::run_filter(const RGYFrameInfo *pInputFrame, RGYFrameInfo **ppOutputFrames, int *pOutputFrameNum, cudaStream_t stream) { RGY_ERR sts = RGY_ERR_NONE; auto prmYadif = std::dynamic_pointer_cast<NVEncFilterParamYadif>(m_pParam); if (!prmYadif) { AddMessage(RGY_LOG_ERROR, _T("Invalid parameter type.\n")); return RGY_ERR_INVALID_PARAM; } const int iframe = m_source.inframe(); if (pInputFrame->ptr == nullptr && m_nFrame >= iframe) { //終了 *pOutputFrameNum = 0; ppOutputFrames[0] = nullptr; return sts; } else if (pInputFrame->ptr != nullptr) { //エラーチェック const auto memcpyKind = getCudaMemcpyKind(pInputFrame->deivce_mem, m_pFrameBuf[0]->frame.deivce_mem); if (memcpyKind != cudaMemcpyDeviceToDevice) { AddMessage(RGY_LOG_ERROR, _T("only supported on device memory.\n")); return RGY_ERR_INVALID_CALL; } if (m_pParam->frameOut.csp != m_pParam->frameIn.csp) { AddMessage(RGY_LOG_ERROR, _T("csp does not match.\n")); return RGY_ERR_INVALID_PARAM; } //sourceキャッシュにコピー auto cudaerr = m_source.add(pInputFrame, stream); if (cudaerr != cudaSuccess) { AddMessage(RGY_LOG_ERROR, _T("failed to add frame to source buffer: %s.\n"), char_to_tstring(cudaGetErrorName(cudaerr)).c_str()); return RGY_ERR_CUDA; } } //十分な数のフレームがたまった、あるいはdrainモードならフレームを出力 if (iframe >= 1 || pInputFrame == nullptr) { //出力先のフレーム CUFrameBuf *pOutFrame = nullptr; *pOutputFrameNum = 1; if (ppOutputFrames[0] == nullptr) { pOutFrame = m_pFrameBuf[m_nFrameIdx].get(); ppOutputFrames[0] = &pOutFrame->frame; ppOutputFrames[0]->picstruct = pInputFrame->picstruct; m_nFrameIdx = (m_nFrameIdx + 1) % m_pFrameBuf.size(); if (prmYadif->yadif.mode & VPP_YADIF_MODE_BOB) { pOutFrame = m_pFrameBuf[m_nFrameIdx].get(); ppOutputFrames[1] = &pOutFrame->frame; ppOutputFrames[1]->picstruct = pInputFrame->picstruct; m_nFrameIdx = (m_nFrameIdx + 1) % m_pFrameBuf.size(); *pOutputFrameNum = 2; } } const auto *const pSourceFrame = &m_source.get(m_nFrame)->frame; pOutFrame->frame.flags = pSourceFrame->flags & (~(RGY_FRAME_FLAG_RFF | RGY_FRAME_FLAG_RFF_COPY | RGY_FRAME_FLAG_RFF_BFF | RGY_FRAME_FLAG_RFF_TFF)); YadifTargetField targetField = YADIF_GEN_FIELD_UNKNOWN; if (prmYadif->yadif.mode & VPP_YADIF_MODE_AUTO) { //エラーチェック const auto memcpyKind = getCudaMemcpyKind(pSourceFrame->deivce_mem, ppOutputFrames[0]->deivce_mem); if (memcpyKind != cudaMemcpyDeviceToDevice) { AddMessage(RGY_LOG_ERROR, _T("only supported on device memory.\n")); return RGY_ERR_INVALID_CALL; } if ((pSourceFrame->picstruct & RGY_PICSTRUCT_INTERLACED) == 0) { ppOutputFrames[0]->picstruct = RGY_PICSTRUCT_FRAME; ppOutputFrames[0]->timestamp = pSourceFrame->timestamp; copyFrameAsync(ppOutputFrames[0], pSourceFrame, stream); if (prmYadif->yadif.mode & VPP_YADIF_MODE_BOB) { ppOutputFrames[1]->picstruct = RGY_PICSTRUCT_FRAME; ppOutputFrames[0]->timestamp = pSourceFrame->timestamp; ppOutputFrames[0]->duration = (pSourceFrame->duration + 1) / 2; ppOutputFrames[1]->timestamp = ppOutputFrames[0]->timestamp + ppOutputFrames[0]->duration; ppOutputFrames[1]->duration = pSourceFrame->duration - ppOutputFrames[0]->duration; ppOutputFrames[1]->inputFrameId = pInputFrame->inputFrameId; copyFrameAsync(ppOutputFrames[1], pSourceFrame, stream); } m_nFrame++; return RGY_ERR_NONE; } else if ((pSourceFrame->picstruct & RGY_PICSTRUCT_FRAME_TFF) == RGY_PICSTRUCT_FRAME_TFF) { targetField = YADIF_GEN_FIELD_BOTTOM; } else if ((pSourceFrame->picstruct & RGY_PICSTRUCT_FRAME_BFF) == RGY_PICSTRUCT_FRAME_BFF) { targetField = YADIF_GEN_FIELD_TOP; } } else if (prmYadif->yadif.mode & VPP_YADIF_MODE_TFF) { targetField = YADIF_GEN_FIELD_BOTTOM; } else if (prmYadif->yadif.mode & VPP_YADIF_MODE_BFF) { targetField = YADIF_GEN_FIELD_TOP; } else { AddMessage(RGY_LOG_ERROR, _T("Not implemented yet.\n")); return RGY_ERR_INVALID_PARAM; } static const std::map<RGY_CSP, decltype(run_yadif_frame<uint8_t, 8>)*> func_list = { { RGY_CSP_YV12, run_yadif_frame<uint8_t, 8> }, { RGY_CSP_YV12_16, run_yadif_frame<uint16_t, 16> }, { RGY_CSP_YUV444, run_yadif_frame<uint8_t, 8> }, { RGY_CSP_YUV444_16, run_yadif_frame<uint16_t, 16> } }; if (func_list.count(pSourceFrame->csp) == 0) { AddMessage(RGY_LOG_ERROR, _T("unsupported csp %s.\n"), RGY_CSP_NAMES[pSourceFrame->csp]); return RGY_ERR_UNSUPPORTED; } func_list.at(pSourceFrame->csp)(ppOutputFrames[0], &m_source.get(m_nFrame-1)->frame, &m_source.get(m_nFrame+0)->frame, &m_source.get(m_nFrame+1)->frame, targetField, pSourceFrame->picstruct, stream ); ppOutputFrames[0]->picstruct = RGY_PICSTRUCT_FRAME; ppOutputFrames[0]->timestamp = pSourceFrame->timestamp; if (prmYadif->yadif.mode & VPP_YADIF_MODE_BOB) { targetField = (targetField == YADIF_GEN_FIELD_BOTTOM) ? YADIF_GEN_FIELD_TOP : YADIF_GEN_FIELD_BOTTOM; if (func_list.count(pSourceFrame->csp) == 0) { AddMessage(RGY_LOG_ERROR, _T("unsupported csp %s.\n"), RGY_CSP_NAMES[pSourceFrame->csp]); return RGY_ERR_UNSUPPORTED; } func_list.at(pSourceFrame->csp)(ppOutputFrames[1], &m_source.get(m_nFrame-1)->frame, &m_source.get(m_nFrame+0)->frame, &m_source.get(m_nFrame+1)->frame, targetField, pSourceFrame->picstruct, stream ); ppOutputFrames[1]->picstruct = RGY_PICSTRUCT_FRAME; ppOutputFrames[0]->timestamp = pSourceFrame->timestamp; ppOutputFrames[0]->duration = (pSourceFrame->duration + 1) / 2; ppOutputFrames[1]->timestamp = ppOutputFrames[0]->timestamp + ppOutputFrames[0]->duration; ppOutputFrames[1]->duration = pSourceFrame->duration - ppOutputFrames[0]->duration; ppOutputFrames[1]->inputFrameId = pInputFrame->inputFrameId; } m_nFrame++; } else { //出力フレームなし *pOutputFrameNum = 0; ppOutputFrames[0] = nullptr; } return sts; } void NVEncFilterYadif::close() { m_nFrame = 0; m_pts = 0; AddMessage(RGY_LOG_DEBUG, _T("closed yadif filter.\n")); }
the_stack
#include <ctc.h> #include "test.h" bool small_test() { const int alphabet_size = 5; const int T = 2; std::vector<float> activations = {0.1, 0.6, 0.1, 0.1, 0.1, 0.1, 0.1, 0.6, 0.1, 0.1}; // Calculate the score analytically float expected_score; { std::vector<float> probs(activations.size()); softmax(activations.data(), alphabet_size, T, probs.data()); // Score calculation is specific to the given activations above expected_score = probs[1] * probs[7]; } cudaStream_t stream; throw_on_error(cudaStreamCreate(&stream), "cudaStreamCreate"); float *activations_gpu; throw_on_error(cudaMalloc(&activations_gpu, activations.size() * sizeof(float)), "cudaMalloc"); throw_on_error(cudaMemcpyAsync(activations_gpu, activations.data(), activations.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); std::vector<int> labels = {1, 2}; std::vector<int> label_lengths = {2}; std::vector<int> lengths; lengths.push_back(T); float score; ctcOptions options{}; options.loc = CTC_GPU; options.stream = stream; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), options, &gpu_alloc_bytes), "Error: get_workspace_size in small_test"); char *ctc_gpu_workspace; throw_on_error(cudaMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "cudaMalloc"); throw_on_error(compute_ctc_loss(activations_gpu, nullptr, labels.data(), label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), &score, ctc_gpu_workspace, options), "Error: compute_ctc_loss in small_test"); score = std::exp(-score); const float eps = 1e-6; const float lb = expected_score - eps; const float ub = expected_score + eps; throw_on_error(cudaFree(activations_gpu), "cudaFree"); throw_on_error(cudaFree(ctc_gpu_workspace), "cudaFree"); throw_on_error(cudaStreamDestroy(stream), "cudaStreamDestroy"); return (score > lb && score < ub); } int offset(int t, int n, int a) { constexpr int minibatch = 2; constexpr int alphabet_size = 6; return (t * minibatch + n) * alphabet_size + a; } bool options_test() { const int alphabet_size = 6; const int T = 5; const int minibatch = 2; std::vector<float> activations = {0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553, 0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508, 0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436, 0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549, 0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688, 0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456, 0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533, 0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345, 0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107, 0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046}; std::vector<float> expected_grads = // from tensorflow {-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553, -0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508, 0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436, 0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549, 0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688, 0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544, 0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533, 0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345, -0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107, -0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046}; // Calculate the expected scores analytically auto& a = activations; double expected_score[2]; expected_score[0] = -std::log(a[offset(0, 0, 0)] * a[offset(1, 0, 1)] * a[offset(2, 0, 2)] * a[offset(3, 0, 1)] * a[offset(4, 0, 0)]); expected_score[1] = 5.42262; // from tensorflow // now take the log to account for the softmax for (auto& a : activations) { a = std::log(a); } cudaStream_t stream; throw_on_error(cudaStreamCreate(&stream), "cudaStreamCreate"); float *activations_gpu; throw_on_error(cudaMalloc(&activations_gpu, activations.size() * sizeof(float)), "cudaMalloc"); throw_on_error(cudaMemcpyAsync(activations_gpu, activations.data(), activations.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); std::vector<int> labels = {0, 1, 2, 1, 0, 0, 1, 1, 0}; std::vector<int> label_lengths = {5, 4}; std::vector<int> lengths = {5, 5}; float score[2]; float *grads_gpu; throw_on_error(cudaMalloc(&grads_gpu, (alphabet_size * T * minibatch) * sizeof(float)), "cudaMalloc"); ctcOptions options{}; options.loc = CTC_GPU; options.stream = stream; options.blank_label = 5; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), options, &gpu_alloc_bytes), "Error: get_workspace_size in options_test"); char *ctc_gpu_workspace; throw_on_error(cudaMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "cudaMalloc"); throw_on_error(compute_ctc_loss(activations_gpu, grads_gpu, labels.data(), label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), &score[0], ctc_gpu_workspace, options), "Error: compute_ctc_loss in options_test"); std::vector<float> grads(alphabet_size * T * minibatch); throw_on_error(cudaMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), cudaMemcpyDeviceToHost, stream), "cudaMemcpyAsync"); throw_on_error(cudaStreamSynchronize(stream), "cudaStreamSynchronize"); throw_on_error(cudaFree(activations_gpu), "cudaFree"); throw_on_error(cudaFree(ctc_gpu_workspace), "cudaFree"); throw_on_error(cudaStreamDestroy(stream), "cudaStreamDestroy"); const double eps = 1e-4; bool result = true; for (int i = 0; i < grads.size(); i++) { const double lb = expected_grads[i] - eps; const double ub = expected_grads[i] + eps; if (!(grads[i] > lb && grads[i] < ub)) { std::cerr << "grad mismatch in options_test" << " expected grad: " << expected_grads[i] << " calculated score: " << grads[i] << " !(" << lb << " < " << grads[i] << " < " << ub << ")" << std::endl; result = false; } } for (int i = 0; i < 2; i++) { const double lb = expected_score[i] - eps; const double ub = expected_score[i] + eps; if (!(score[i] > lb && score[i] < ub)) { std::cerr << "score mismatch in options_test" << " expected score: " << expected_score[i] << " calculated score: " << score[i] << std::endl; result = false; } } return result; } bool inf_test() { const int alphabet_size = 15; const int T = 50; const int L = 10; const int minibatch = 1; std::vector<int> labels = genLabels(alphabet_size, L); labels[0] = 2; std::vector<int> label_lengths = {L}; std::vector<float> acts = genActs(alphabet_size * T * minibatch); for (int i = 0; i < T; ++i) acts[alphabet_size * i + 2] = -1e30; cudaStream_t stream; throw_on_error(cudaStreamCreate(&stream), "cudaStreamCreate"); float *acts_gpu; throw_on_error(cudaMalloc(&acts_gpu, acts.size() * sizeof(float)), "cudaMalloc"); throw_on_error(cudaMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); std::vector<int> lengths; lengths.push_back(T); float *grads_gpu; throw_on_error(cudaMalloc(&grads_gpu, (alphabet_size * T) * sizeof(float)), "cudaMalloc"); float cost; ctcOptions options{}; options.loc = CTC_GPU; options.stream = stream; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), options, &gpu_alloc_bytes), "Error: get_workspace_size in inf_test"); char *ctc_gpu_workspace; throw_on_error(cudaMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "cudaMalloc"); throw_on_error(compute_ctc_loss(acts_gpu, grads_gpu, labels.data(), label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), &cost, ctc_gpu_workspace, options), "Error: compute_ctc_loss in inf_test"); bool status = std::isinf(cost); std::vector<float> grads(alphabet_size * T); throw_on_error(cudaMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), cudaMemcpyDeviceToHost, stream), "cudaMemcpyAsync"); throw_on_error(cudaStreamSynchronize(stream), "cudaStreamSynchronize"); for (int i = 0; i < alphabet_size * T; ++i) status &= !std::isnan(grads[i]); throw_on_error(cudaFree(acts_gpu), "cudaFree"); throw_on_error(cudaFree(grads_gpu), "cudaFree"); throw_on_error(cudaFree(ctc_gpu_workspace), "cudaFree"); throw_on_error(cudaStreamDestroy(stream), "cudaStreamDestroy"); return status; } float grad_check(int T, int alphabet_size, std::vector<float>& acts, const std::vector<std::vector<int>>& labels, const std::vector<int>& lengths) { float epsilon = 1e-2; const int minibatch = labels.size(); cudaStream_t stream; throw_on_error(cudaStreamCreate(&stream), "cudaStreamCreate"); float *acts_gpu; throw_on_error(cudaMalloc(&acts_gpu, acts.size() * sizeof(float)), "cudaMalloc"); throw_on_error(cudaMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); std::vector<int> flat_labels; std::vector<int> label_lengths; for (const auto& l : labels) { flat_labels.insert(flat_labels.end(), l.begin(), l.end()); label_lengths.push_back(l.size()); } std::vector<float> costs(minibatch); float *grads_gpu; throw_on_error(cudaMalloc(&grads_gpu, acts.size() * sizeof(float)), "cudaMalloc"); ctcOptions options{}; options.loc = CTC_GPU; options.stream = stream; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), options, &gpu_alloc_bytes), "Error: get_workspace_size in grad_check"); char *ctc_gpu_workspace; throw_on_error(cudaMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "cudaMalloc"); throw_on_error(compute_ctc_loss(acts_gpu, grads_gpu, flat_labels.data(), label_lengths.data(), lengths.data(), alphabet_size, minibatch, costs.data(), ctc_gpu_workspace, options), "Error: compute_ctc_loss (0) in grad_check"); std::vector<float> grads(acts.size()); throw_on_error(cudaMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), cudaMemcpyDeviceToHost, stream), "cudaMemcpyAsync"); throw_on_error(cudaStreamSynchronize(stream), "cudaStreamSynchronize"); std::vector<float> num_grad(grads.size()); //perform 2nd order central differencing for (int i = 0; i < T * alphabet_size * minibatch; ++i) { acts[i] += epsilon; throw_on_error(cudaMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); std::vector<float> costsP1(minibatch); std::vector<float> costsP2(minibatch); throw_on_error(compute_ctc_loss(acts_gpu, NULL, flat_labels.data(), label_lengths.data(), lengths.data(), alphabet_size, minibatch, costsP1.data(), ctc_gpu_workspace, options), "Error: compute_ctc_loss (1) in grad_check"); acts[i] -= 2 * epsilon; throw_on_error(cudaMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); throw_on_error(compute_ctc_loss(acts_gpu, NULL, flat_labels.data(), label_lengths.data(), lengths.data(), alphabet_size, minibatch, costsP2.data(), ctc_gpu_workspace, options), "Error: compute_ctc_loss (2) in grad_check"); float costP1 = std::accumulate(costsP1.begin(), costsP1.end(), 0.); float costP2 = std::accumulate(costsP2.begin(), costsP2.end(), 0.); acts[i] += epsilon; num_grad[i] = (costP1 - costP2) / (2 * epsilon); } float diff = rel_diff(grads, num_grad); throw_on_error(cudaFree(acts_gpu), "cudaFree"); throw_on_error(cudaFree(grads_gpu), "cudaFree"); throw_on_error(cudaFree(ctc_gpu_workspace), "cudaFree"); throw_on_error(cudaStreamDestroy(stream), "cudaStreamDestroy"); return diff; } bool run_tests() { std::vector<std::tuple<int, int, int, int, float>> problem_sizes = { std::make_tuple(28, 50, 15, 1, 1e-5) }; bool status = true; for (auto problem : problem_sizes) { int alphabet_size, T, L, minibatch; float tol; std::tie(alphabet_size, T, L, minibatch, tol) = problem; std::vector<float> acts = genActs(alphabet_size * T * minibatch); std::vector<std::vector<int>> labels; std::vector<int> sizes; for (int mb = 0; mb < minibatch; ++mb) { int actual_length = L; labels.push_back(genLabels(alphabet_size, actual_length)); sizes.push_back(T); } float diff = grad_check(T, alphabet_size, acts, labels, sizes); status &= (diff < tol); } return status; } int main(void) { if (get_warpctc_version() != 2) { std::cerr << "Invalid WarpCTC version." << std::endl; return 1; } std::cout << "Running GPU tests" << std::endl; throw_on_error(cudaSetDevice(0), "cudaSetDevice"); bool status = true; status &= small_test(); status &= options_test(); status &= inf_test(); status &= run_tests(); if (status) { std::cout << "Tests pass" << std::endl; return 0; } else { std::cout << "Some or all tests fail" << std::endl; return 1; } }
the_stack
static __constant__ const uint8_t c_perm[8][8] = { { 2, 3, 6, 7, 0, 1, 4, 5 },{ 6, 7, 2, 3, 4, 5, 0, 1 },{ 7, 6, 5, 4, 3, 2, 1, 0 },{ 1, 0, 3, 2, 5, 4, 7, 6 }, { 0, 1, 4, 5, 6, 7, 2, 3 },{ 6, 7, 2, 3, 0, 1, 4, 5 },{ 6, 7, 0, 1, 4, 5, 2, 3 },{ 4, 5, 2, 3, 6, 7, 0, 1 } }; static __constant__ const uint32_t c_IV_512[32] = { 0x0ba16b95, 0x72f999ad, 0x9fecc2ae, 0xba3264fc, 0x5e894929, 0x8e9f30e5, 0x2f1daa37, 0xf0f2c558, 0xac506643, 0xa90635a5, 0xe25b878b, 0xaab7878f, 0x88817f7a, 0x0a02892b, 0x559a7550, 0x598f657e, 0x7eef60a1, 0x6b70e3e8, 0x9c1714d1, 0xb958e2a8, 0xab02675e, 0xed1c014f, 0xcd8d65bb, 0xfdb7a257, 0x09254899, 0xd699c7bc, 0x9019b6dc, 0x2b9022e4, 0x8fa14956, 0x21bf9bd3, 0xb94d0943, 0x6ffddc22 }; static __constant__ const int16_t c_FFT128_8_16_Twiddle[128] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 60, 2, 120, 4, -17, 8, -34, 16, -68, 32, 121, 64, -15, 128, -30, 1, 46, 60, -67, 2, 92, 120, 123, 4, -73, -17, -11, 8, 111, -34, -22, 1, -67, 120, -73, 8, -22, -68, -70, 64, 81, -30, -46, -2,-123, 17,-111, 1,-118, 46, -31, 60, 116, -67, -61, 2, 21, 92, -62, 120, -25, 123,-122, 1, 116, 92,-122, -17, 84, -22, 18, 32, 114, 117, -49, -30, 118, 67, 62, 1, -31, -67, 21, 120, -122, -73, -50, 8, 9, -22, -89, -68, 52, -70, 114, 1, -61, 123, -50, -34, 18, -70, -99, 128, -98, 67, 25, 17, -9, 35, -79 }; static __constant__ const int16_t c_FFT256_2_128_Twiddle[128] = { 1, 41,-118, 45, 46, 87, -31, 14, 60,-110, 116,-127, -67, 80, -61, 69, 2, 82, 21, 90, 92, -83, -62, 28,120, 37, -25, 3, 123, -97,-122,-119, 4, -93, 42, -77, -73, 91,-124, 56,-17, 74, -50, 6, -11, 63, 13, 19, 8, 71, 84, 103, 111, -75, 9, 112,-34,-109,-100, 12, -22, 126, 26, 38, 16,-115, -89, -51, -35, 107, 18, -33,-68, 39, 57, 24, -44, -5, 52, 76, 32, 27, 79,-102, -70, -43, 36, -66,121, 78, 114, 48, -88, -10, 104,-105, 64, 54, -99, 53, 117, -86, 72, 125,-15,-101, -29, 96, 81, -20, -49, 47,128, 108, 59, 106, -23, 85,-113, -7,-30, 55, -58, -65, -95, -40, -98, 94 }; __device__ __forceinline__ static uint32_t IF(uint32_t x,uint32_t y,uint32_t z){ /* #if __CUDA_ARCH__ >= 500 && CUDA_VERSION >= 7050 uint32_t result; asm("lop3.b32 %0, %1, %2, %3, 0xCA;" : "=r"(result) : "r"(x), "r"(y), "r"(z)); // x=F0, y=CC, z=AA // 0xCA = ((CC⊻AA)∧F0)⊻AA return result; #else */ return (((y ^ z) & x) ^ z); // #endif } __device__ __forceinline__ static uint32_t MAJ(const uint32_t x,const uint32_t y,const uint32_t z){ #if __CUDA_ARCH__ >= 500 && CUDA_VERSION >= 7050 uint32_t result; asm ("lop3.b32 %0, %1, %2, %3, 0xE8;" : "=r"(result) : "r"(x), "r"(y), "r"(z)); // x=AA, y=CC, z=F0 // 0xCA = ((CC⊻AA)∧F0)⊻AA return result; #else return ((z &y) | ((z|y) & x)); #endif } #define p8_xor(x) ( ((x)%7) == 0 ? 1 : \ ((x)%7) == 1 ? 6 : \ ((x)%7) == 2 ? 2 : \ ((x)%7) == 3 ? 3 : \ ((x)%7) == 4 ? 5 : \ ((x)%7) == 5 ? 7 : 4 ) __device__ __forceinline__ static void STEP8_IF(const uint32_t *w, const uint32_t i, const uint32_t r, const uint32_t s, uint32_t *A, const uint32_t *B, const uint32_t *C, uint32_t *D) { uint32_t R[8]; #pragma unroll 8 for(int j=0; j<8; j++) R[j] = ROTL32(A[j], r); uint32_t W[8]; *(uint2x4*)&W[0] = *(uint2x4*)&w[0]; #pragma unroll 8 for(int j=0; j<8; j++) D[j]+= W[j] + IF(A[j], B[j], C[j]); #pragma unroll 8 for(int j=0; j<8; j++) D[j] = R[j^p8_xor(i)] + ROTL32(D[j], s); #pragma unroll 8 for(int j=0; j<8; j++) A[j] = R[j]; } __device__ __forceinline__ static void STEP8_MAJ(const uint32_t *w, const uint32_t i, const uint32_t r, const uint32_t s, uint32_t *A, const uint32_t *B, const uint32_t *C, uint32_t *D) { uint32_t R[8]; uint32_t W[8]; *(uint2x4*)&W[0] = *(uint2x4*)&w[0]; #pragma unroll 8 for(int j=0; j<8; j++) R[j] = ROTL32(A[j], r); #pragma unroll 8 for(int j=0; j<8; j++) D[j]+= W[j] + MAJ(A[j], B[j], C[j]); #pragma unroll 8 for(int j=0; j<8; j++) D[j] = R[j^p8_xor(i)] + ROTL32(D[j], s); #pragma unroll 8 for(int j=0; j<8; j++) A[j] = R[j]; } static __constant__ uint32_t d_cw[4][8][8] = { 0x531B1720, 0xAC2CDE09, 0x0B902D87, 0x2369B1F4, 0x2931AA01, 0x02E4B082, 0xC914C914, 0xC1DAE1A6, 0xF18C2B5C, 0x08AC306B, 0x27BFC914, 0xCEDC548D, 0xC630C4BE, 0xF18C4335, 0xF0D3427C, 0xBE3DA380, 0x143C02E4, 0xA948C630, 0xA4F2DE09, 0xA71D2085, 0xA439BD84, 0x109FCD6A, 0xEEA8EF61, 0xA5AB1CE8, 0x0B90D4A4, 0x3D6D039D, 0x25944D53, 0xBAA0E034, 0x5BC71E5A, 0xB1F4F2FE, 0x12CADE09, 0x548D41C3, 0x3CB4F80D, 0x36ECEBC4, 0xA66443EE, 0x43351ABD, 0xC7A20C49, 0xEB0BB366, 0xF5293F98, 0x49B6DE09, 0x531B29EA, 0x02E402E4, 0xDB25C405, 0x53D4E543, 0x0AD71720, 0xE1A61A04, 0xB87534C1, 0x3EDF43EE, 0x213E50F0, 0x39173EDF, 0xA9485B0E, 0xEEA82EF9, 0x14F55771, 0xFAF15546, 0x3D6DD9B3, 0xAB73B92E, 0x582A48FD, 0xEEA81892, 0x4F7EAA01, 0xAF10A88F, 0x11581720, 0x34C124DB, 0xD1C0AB73, 0x1E5AF0D3, 0xC34C07F3, 0xC914143C, 0x599CBC12, 0xBCCBE543, 0x385EF3B7, 0x14F54C9A, 0x0AD7C068, 0xB64A21F7, 0xDEC2AF10, 0xC6E9C121, 0x56B8A4F2, 0x1158D107, 0xEB0BA88F, 0x050FAABA, 0xC293264D, 0x548D46D2, 0xACE5E8E0, 0x53D421F7, 0xF470D279, 0xDC974E0C, 0xD6CF55FF, 0xFD1C4F7E, 0x36EC36EC, 0x3E261E5A, 0xEBC4FD1C, 0x56B839D0, 0x5B0E21F7, 0x58E3DF7B, 0x5BC7427C, 0xEF613296, 0x1158109F, 0x5A55E318, 0xA7D6B703, 0x1158E76E, 0xB08255FF, 0x50F05771, 0xEEA8E8E0, 0xCB3FDB25, 0x2E40548D, 0xE1A60F2D, 0xACE5D616, 0xFD1CFD1C, 0x24DB3BFB, 0xAC2C1ABD, 0xF529E8E0, 0x1E5AE5FC, 0x478BCB3F, 0xC121BC12, 0xF4702B5C, 0xC293FC63, 0xDA6CB2AD, 0x45601FCC, 0xA439E1A6, 0x4E0C0D02, 0xED3621F7, 0xAB73BE3D, 0x0E74D4A4, 0xF754CF95, 0xD84136EC, 0x3124AB73, 0x39D03B42, 0x0E74BCCB, 0x0F2DBD84, 0x41C35C80, 0xA4135BED, 0xE10E1EF2, 0x6C4F93B1, 0x6E2191DF, 0xE2E01D20, 0xD1952E6B, 0x6A7D9583, 0x131DECE3, 0x369CC964, 0xFB73048D, 0x9E9D6163, 0x280CD7F4, 0xD9C6263A, 0x1062EF9E, 0x2AC7D539, 0xAD2D52D3, 0x0A03F5FD, 0x197CE684, 0xAA72558E, 0xDE5321AD, 0xF0870F79, 0x607A9F86, 0xAFE85018, 0x2AC7D539, 0xE2E01D20, 0x2AC7D539, 0xC6A93957, 0x624C9DB4, 0x6C4F93B1, 0x641E9BE2, 0x452CBAD4, 0x263AD9C6, 0xC964369C, 0xC3053CFB, 0x452CBAD4, 0x95836A7D, 0x4AA2B55E, 0xAB5B54A5, 0xAC4453BC, 0x74808B80, 0xCB3634CA, 0xFC5C03A4, 0x4B8BB475, 0x21ADDE53, 0xE2E01D20, 0xDF3C20C4, 0xBD8F4271, 0xAA72558E, 0xFC5C03A4, 0x48D0B730, 0x2AC7D539, 0xD70B28F5, 0x53BCAC44, 0x3FB6C04A, 0x14EFEB11, 0xDB982468, 0x9A1065F0, 0xB0D14F2F, 0x8D5272AE, 0xC4D73B29, 0x91DF6E21, 0x949A6B66, 0x303DCFC3, 0x5932A6CE, 0x1234EDCC, 0xF5140AEC, 0xCDF1320F, 0x3DE4C21C, 0x48D0B730, 0x1234EDCC, 0x131DECE3, 0x52D3AD2D, 0xE684197C, 0x6D3892C8, 0x72AE8D52, 0x6FF3900D, 0x73978C69, 0xEB1114EF, 0x15D8EA28, 0x71C58E3B, 0x90F66F0A, 0x15D8EA28, 0x9BE2641E, 0x65F09A10, 0xEA2815D8, 0xBD8F4271, 0x3A40C5C0, 0xD9C6263A, 0xB38C4C74, 0xBAD4452C, 0x70DC8F24, 0xAB5B54A5, 0x46FEB902, 0x1A65E59B, 0x0DA7F259, 0xA32A5CD6, 0xD62229DE, 0xB81947E7, 0x6D3892C8, 0x15D8EA28, 0xE59B1A65, 0x065FF9A1, 0xB2A34D5D, 0x6A7D9583, 0x975568AB, 0xFC5C03A4, 0x2E6BD195, 0x966C6994, 0xF2590DA7, 0x263AD9C6, 0x5A1BA5E5, 0xB0D14F2F, 0x975568AB, 0x6994966C, 0xF1700E90, 0xD3672C99, 0xCC1F33E1, 0xFC5C03A4, 0x452CBAD4, 0x4E46B1BA, 0xF1700E90, 0xB2A34D5D, 0xD0AC2F54, 0x5760A8A0, 0x8C697397, 0x624C9DB4, 0xE85617AA, 0x95836A7D }; __device__ __forceinline__ static void Round8_0_final(uint32_t* A,const uint32_t r,const uint32_t s,const uint32_t t,const uint32_t u){ STEP8_IF(d_cw[0][0],0, r, s, A, &A[ 8], &A[16], &A[24]); STEP8_IF(d_cw[0][1],1, s, t, &A[24], A, &A[ 8], &A[16]); STEP8_IF(d_cw[0][2],2, t, u, &A[16], &A[24], A, &A[ 8]); STEP8_IF(d_cw[0][3],3, u, r, &A[ 8], &A[16], &A[24], A); STEP8_MAJ(d_cw[0][4],4, r, s, A, &A[ 8], &A[16], &A[24]); STEP8_MAJ(d_cw[0][5],5, s, t, &A[24], A, &A[ 8], &A[16]); STEP8_MAJ(d_cw[0][6],6, t, u, &A[16], &A[24], A, &A[ 8]); STEP8_MAJ(d_cw[0][7],7, u, r, &A[ 8], &A[16], &A[24], A); } __device__ __forceinline__ static void Round8_1_final(uint32_t* A,const uint32_t r,const uint32_t s,const uint32_t t,const uint32_t u){ STEP8_IF(d_cw[1][0], 8, r, s, A, &A[ 8], &A[16], &A[24]); STEP8_IF(d_cw[1][1], 9, s, t, &A[24], A, &A[ 8], &A[16]); STEP8_IF(d_cw[1][2],10, t, u, &A[16], &A[24], A, &A[ 8]); STEP8_IF(d_cw[1][3],11, u, r, &A[ 8], &A[16], &A[24], A); STEP8_MAJ(d_cw[1][4],12, r, s, A, &A[ 8], &A[16], &A[24]); STEP8_MAJ(d_cw[1][5],13, s, t, &A[24], A, &A[ 8], &A[16]); STEP8_MAJ(d_cw[1][6],14, t, u, &A[16], &A[24], A, &A[ 8]); STEP8_MAJ(d_cw[1][7],15, u, r, &A[ 8], &A[16], &A[24], A); } __device__ __forceinline__ static void Round8_2_final(uint32_t* A,const uint32_t r,const uint32_t s,const uint32_t t,const uint32_t u){ STEP8_IF(d_cw[2][0],16, r, s, A, &A[ 8], &A[16], &A[24]); STEP8_IF(d_cw[2][1],17, s, t, &A[24], A, &A[ 8], &A[16]); STEP8_IF(d_cw[2][2],18, t, u, &A[16], &A[24], A, &A[ 8]); STEP8_IF(d_cw[2][3],19, u, r, &A[ 8], &A[16], &A[24], A); STEP8_MAJ(d_cw[2][4],20, r, s, A, &A[ 8], &A[16], &A[24]); STEP8_MAJ(d_cw[2][5],21, s, t, &A[24], A, &A[ 8], &A[16]); STEP8_MAJ(d_cw[2][6],22, t, u, &A[16], &A[24], A, &A[ 8]); STEP8_MAJ(d_cw[2][7],23, u, r, &A[ 8], &A[16], &A[24], A); } __device__ __forceinline__ static void Round8_3_final(uint32_t* A,const uint32_t r,const uint32_t s,const uint32_t t,const uint32_t u){ STEP8_IF(d_cw[3][0],24, r, s, A, &A[ 8], &A[16], &A[24]); STEP8_IF(d_cw[3][1],25, s, t, &A[24], A, &A[ 8], &A[16]); STEP8_IF(d_cw[3][2],26, t, u, &A[16], &A[24], A, &A[ 8]); STEP8_IF(d_cw[3][3],27, u, r, &A[ 8], &A[16], &A[24], A); STEP8_MAJ(d_cw[3][4],28, r, s, A, &A[ 8], &A[16], &A[24]); STEP8_MAJ(d_cw[3][5],29, s, t, &A[24], A, &A[ 8], &A[16]); STEP8_MAJ(d_cw[3][6],30, t, u, &A[16], &A[24], A, &A[ 8]); STEP8_MAJ(d_cw[3][7],31, u, r, &A[ 8], &A[16], &A[24], A); } //#define expanded_vector(x) __ldg(&g_fft4[x]) static __device__ __forceinline__ void expanded_vector(uint32_t* w,const uint4* ptr){ asm volatile ("ld.global.nc.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(w[0]), "=r"(w[1]),"=r"(w[2]), "=r"(w[3]) : __LDG_PTR(ptr)); } __device__ __forceinline__ static void Round8(uint32_t* A, const uint32_t thr_offset, const uint4 *const __restrict__ g_fft4) { uint32_t w[8]; uint32_t tmp = thr_offset; uint32_t r = 3, s = 23, t = 17, u = 27; expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_IF(w,0, r, s, A, &A[8], &A[16], &A[24]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_IF(w,1, s, t, &A[24], A, &A[8], &A[16]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_IF(w,2, t, u, &A[16], &A[24], A, &A[8]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_IF(w,3, u, r, &A[8], &A[16], &A[24], A); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_MAJ(w,4, r, s, A, &A[8], &A[16], &A[24]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_MAJ(w,5, s, t, &A[24], A, &A[8], &A[16]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_MAJ(w,6, t, u, &A[16], &A[24], A, &A[8]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_MAJ(w,7, u, r, &A[8], &A[16], &A[24], A); r = 28; s = 19; t = 22; u = 7; expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_IF(w,8, r, s, A, &A[8], &A[16], &A[24]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_IF(w,9, s, t, &A[24], A, &A[8], &A[16]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_IF(w,10, t, u, &A[16], &A[24], A, &A[8]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_IF(w,11, u, r, &A[8], &A[16], &A[24], A); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_MAJ(w,12, r, s, A, &A[8], &A[16], &A[24]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_MAJ(w,13, s, t, &A[24], A, &A[8], &A[16]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_MAJ(w,14, t, u, &A[16], &A[24], A, &A[8]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_MAJ(w,15, u, r, &A[8], &A[16], &A[24], A); r = 29; s = 9; t = 15; u = 5; expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_IF(w,16, r, s, A, &A[8], &A[16], &A[24]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_IF(w,17, s, t, &A[24], A, &A[8], &A[16]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_IF(w,18, t, u, &A[16], &A[24], A, &A[8]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_IF(w,19, u, r, &A[8], &A[16], &A[24], A); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_MAJ(w,20, r, s, A, &A[8], &A[16], &A[24]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_MAJ(w,21, s, t, &A[24], A, &A[8], &A[16]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_MAJ(w,22, t, u, &A[16], &A[24], A, &A[8]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_MAJ(w,23, u, r, &A[8], &A[16], &A[24], A); r = 4; s = 13; t = 10; u = 25; expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_IF(w,24, r, s, A, &A[8], &A[16], &A[24]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_IF(w,25, s, t, &A[24], A, &A[8], &A[16]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_IF(w,26, t, u, &A[16], &A[24], A, &A[8]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_IF(w,27, u, r, &A[8], &A[16], &A[24], A); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_MAJ(w,28, r, s, A, &A[8], &A[16], &A[24]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_MAJ(w,29, s, t, &A[24], A, &A[8], &A[16]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_MAJ(w,30, t, u, &A[16], &A[24], A, &A[8]); expanded_vector(&w[0],&g_fft4[tmp++]); expanded_vector(&w[4],&g_fft4[tmp++]); STEP8_MAJ(w,31, u, r, &A[8], &A[16], &A[24], A); } /********************* Message expansion ************************/ /* * Reduce modulo 257; result is in [-127; 383] * REDUCE(x) := (x&255) - (x>>8) */ #define REDUCE(x) \ (((x)&255) - ((x)>>8)) /* * Reduce from [-127; 383] to [-128; 128] * EXTRA_REDUCE_S(x) := x<=128 ? x : x-257 */ #define EXTRA_REDUCE_S(x) \ ((x)<=128 ? (x) : (x)-257) /* * Reduce modulo 257; result is in [-128; 128] */ #define REDUCE_FULL_S(x) \ EXTRA_REDUCE_S(REDUCE(x)) // Parallelization: // // FFT_8 wird 2 times 8-fach parallel ausgeführt (in FFT_64) // and 1 time 16-fach parallel (in FFT_128_full) // // STEP8_IF and STEP8_MAJ beinhalten je 2x 8-fach parallel Operations /** * FFT_8 using w=4 as 8th root of unity * Unrolled decimation in frequency (DIF) radix-2 NTT. * Output data is in revbin_permuted order. */ __device__ __forceinline__ static void FFT_8(int *y,const uint8_t stripe){ #define BUTTERFLY(i,j,n) \ do { \ int u= y[stripe*i]; \ int v= y[stripe*j]; \ y[stripe*i] = u+v; \ y[stripe*j] = (u-v) << (n<<1); \ } while(0) BUTTERFLY(0, 4, 0); BUTTERFLY(1, 5, 1); BUTTERFLY(2, 6, 2); BUTTERFLY(3, 7, 3); y[stripe*6] = REDUCE(y[stripe*6]); y[stripe*7] = REDUCE(y[stripe*7]); BUTTERFLY(0, 2, 0); BUTTERFLY(4, 6, 0); BUTTERFLY(1, 3, 2); BUTTERFLY(5, 7, 2); y[stripe*7] = REDUCE(y[stripe*7]); BUTTERFLY(0, 1, 0); BUTTERFLY(2, 3, 0); BUTTERFLY(4, 5, 0); BUTTERFLY(6, 7, 0); y[ 0] = REDUCE(y[ 0]); y[stripe] = REDUCE(y[stripe]); y[stripe<<1] = REDUCE(y[stripe<<1]); y[stripe*3] = REDUCE(y[stripe*3]); y[stripe<<2] = REDUCE(y[stripe<<2]); y[stripe*5] = REDUCE(y[stripe*5]); y[stripe*6] = REDUCE(y[stripe*6]); y[stripe*7] = REDUCE(y[stripe*7]); y[ 0] = EXTRA_REDUCE_S(y[ 0]); y[stripe] = EXTRA_REDUCE_S(y[stripe]); y[stripe<<1] = EXTRA_REDUCE_S(y[stripe<<1]); y[stripe*3] = EXTRA_REDUCE_S(y[stripe*3]); y[stripe<<2] = EXTRA_REDUCE_S(y[stripe<<2]); y[stripe*5] = EXTRA_REDUCE_S(y[stripe*5]); y[stripe*6] = EXTRA_REDUCE_S(y[stripe*6]); y[stripe*7] = EXTRA_REDUCE_S(y[stripe*7]); #undef BUTTERFLY } /** * FFT_16 using w=2 as 16th root of unity * Unrolled decimation in frequency (DIF) radix-2 NTT. * Output data is in revbin_permuted order. */ __device__ __forceinline__ static void FFT_16(int *y){ #define DO_REDUCE_FULL_S(i) \ do { \ y[i] = REDUCE(y[i]); \ y[i] = EXTRA_REDUCE_S(y[i]); \ } while(0) int u,v; const uint8_t thr = threadIdx.x&7; u = y[0]; // 0..7 v = y[1]; // 8..15 y[0] = u+v; y[1] = (u-v) << (thr); if ((thr) >=3) y[1] = REDUCE(y[1]); // 11...15 u = __shfl(y[0], (threadIdx.x&3),8); // 0,1,2,3 0,1,2,3 v = __shfl(y[0],4+(threadIdx.x&3),8); // 4,5,6,7 4,5,6,7 y[0] = ((thr) < 4) ? (u+v) : ((u-v) << ((threadIdx.x&3)<<1)); u = __shfl(y[1], (threadIdx.x&3),8); // 8,9,10,11 8,9,10,11 v = __shfl(y[1],4+(threadIdx.x&3),8); // 12,13,14,15 12,13,14,15 y[1] = ((thr) < 4) ? (u+v) : ((u-v) << ((threadIdx.x&3)<<1)); if ((threadIdx.x&1) && (thr >= 4)) { y[0] = REDUCE(y[0]); // 5, 7 y[1] = REDUCE(y[1]); // 13, 15 } u = __shfl(y[0], (threadIdx.x&5),8); // 0,1,0,1 4,5,4,5 v = __shfl(y[0],2+(threadIdx.x&5),8); // 2,3,2,3 6,7,6,7 y[0] = ((threadIdx.x&3) < 2) ? (u+v) : ((u-v) << ((threadIdx.x&1)<<2)); u = __shfl(y[1], (threadIdx.x&5),8); // 8,9,8,9 12,13,12,13 v = __shfl(y[1],2+(threadIdx.x&5),8); // 10,11,10,11 14,15,14,15 y[1] = ((threadIdx.x&3) < 2) ? (u+v) : ((u-v) << ((threadIdx.x&1)<<2)); u = __shfl(y[0], (threadIdx.x&6),8); // 0,0,2,2 4,4,6,6 v = __shfl(y[0],1+(threadIdx.x&6),8); // 1,1,3,3 5,5,7,7 y[0] = ((threadIdx.x&1) < 1) ? (u+v) : (u-v); u = __shfl(y[1], (threadIdx.x&6),8); // 8,8,10,10 12,12,14,14 v = __shfl(y[1],1+(threadIdx.x&6),8); // 9,9,11,11 13,13,15,15 y[1] = ((threadIdx.x&1) < 1) ? (u+v) : (u-v); DO_REDUCE_FULL_S( 0); // 0...7 DO_REDUCE_FULL_S( 1); // 8...15 #undef DO_REDUCE_FULL_S } /***************************************************/ #if __CUDA_ARCH__ > 500 __global__ __launch_bounds__(TPB52_1,9) #else __global__ __launch_bounds__(TPB50_1,9) #endif static void x11_simd512_gpu_expand_64(uint32_t threads,const uint32_t* __restrict__ g_hash, uint4 *g_temp4) { const uint32_t threadBloc = (blockDim.x * blockIdx.x + threadIdx.x)>>3; const uint8_t thr = (threadIdx.x & 7); /* Message Expansion using Number Theoretical Transform similar to FFT */ int expanded[32]; uint4 vec0; int P, Q, P1, Q1, P2, Q2; const bool even = (threadIdx.x & 1) == 0; const bool hi = (thr)>=4; const bool lo = (thr)<4; const bool sel = ((threadIdx.x+2)&7) >= 4; // 2,3,4,5 if (threadBloc < threads){ const uint32_t hashPosition = threadBloc<<4; const uint32_t *inpHash = &g_hash[hashPosition]; const uint32_t data0 = __ldg(&inpHash[thr]); const uint32_t data1 = __ldg(&inpHash[thr + 8]); // Puffer für expandierte Nachricht uint4 *temp4 = &g_temp4[hashPosition<<2]; #pragma unroll 4 for (uint32_t i=0; i < 4; i++) { expanded[ i] = bfe(__byte_perm(__shfl(data0, i<<1, 8), __shfl(data0, (i<<1)+1, 8), thr),0,8); } #pragma unroll 4 for (uint32_t i=0; i < 4; i++) { expanded[4+i] = bfe(__byte_perm(__shfl(data1, i<<1, 8), __shfl(data1, (i<<1)+1, 8), thr),0,8); } #pragma unroll 8 for (uint32_t i=8; i < 16; i++) { expanded[ i] = 0; } /* * FFT_256 using w=41 as 256th root of unity. Decimation in frequency (DIF) NTT. Output data is in revbin_permuted order. In place. */ #pragma unroll 8 for (uint32_t i=0; i<8; i++) expanded[16+i] = REDUCE(expanded[i] * c_FFT256_2_128_Twiddle[8*i+(thr)]); #pragma unroll 8 for (uint32_t i=24; i < 32; i++) { expanded[ i] = 0; } /* handle X^255 with an additional butterfly */ if (thr==7){ expanded[15] = 1; expanded[31] = REDUCE((-1) * c_FFT256_2_128_Twiddle[127]); } // FFT_128_full(expanded); FFT_8(expanded,2); // eight parallel FFT8's FFT_8(&expanded[16],2); // eight parallel FFT8's FFT_8(&expanded[ 1],2); // eight parallel FFT8's FFT_8(&expanded[17],2); // eight parallel FFT8's #pragma unroll 16 for (uint32_t i=0; i<16; i++){ expanded[i] = REDUCE(expanded[i]*c_FFT128_8_16_Twiddle[i*8+(thr)]); expanded[i+16] = REDUCE(expanded[i+16]*c_FFT128_8_16_Twiddle[i*8+(thr)]); } #pragma unroll 8 for (uint32_t i=0; i<8; i++){ FFT_16(expanded+(i<<1)); // eight sequential FFT16's, each one executed in parallel by 8 threads FFT_16(expanded+16+(i<<1)); // eight sequential FFT16's, each one executed in parallel by 8 threads } // store w matrices in global memory P1 = expanded[ 0]; P2 = __shfl(expanded[ 2], (threadIdx.x-1)&7, 8); P = even ? P1 : P2; Q1 = expanded[16]; Q2 = __shfl(expanded[18], (threadIdx.x-1)&7, 8); Q = even ? Q1 : Q2; vec0.x = __shfl(__byte_perm(185*P, 185*Q , 0x5410), c_perm[0][thr], 8); P1 = expanded[ 8]; P2 = __shfl(expanded[10], (threadIdx.x-1)&7, 8); P = even ? P1 : P2; Q1 = expanded[24]; Q2 = __shfl(expanded[26], (threadIdx.x-1)&7, 8); Q = even ? Q1 : Q2; vec0.y = __shfl(__byte_perm(185*P, 185*Q , 0x5410), c_perm[0][thr], 8); P1 = expanded[ 4]; P2 = __shfl(expanded[ 6], (threadIdx.x-1)&7, 8); P = even ? P1 : P2; Q1 = expanded[20]; Q2 = __shfl(expanded[22], (threadIdx.x-1)&7, 8); Q = even ? Q1 : Q2; vec0.z = __shfl(__byte_perm(185*P, 185*Q , 0x5410), c_perm[0][thr], 8); P1 = expanded[12]; P2 = __shfl(expanded[14], (threadIdx.x-1)&7, 8); P = even ? P1 : P2; Q1 = expanded[28]; Q2 = __shfl(expanded[30], (threadIdx.x-1)&7, 8); Q = even ? Q1 : Q2; vec0.w = __shfl(__byte_perm(185*P, 185*Q , 0x5410), c_perm[0][thr], 8); temp4[thr] = vec0; P1 = expanded[ 1]; P2 = __shfl(expanded[ 3], (threadIdx.x-1)&7, 8); P = even ? P1 : P2; Q1 = expanded[17]; Q2 = __shfl(expanded[19], (threadIdx.x-1)&7, 8); Q = even ? Q1 : Q2; vec0.x = __shfl(__byte_perm(185*P, 185*Q , 0x5410), c_perm[1][thr], 8); P1 = expanded[ 9]; P2 = __shfl(expanded[11], (threadIdx.x-1)&7, 8); P = even ? P1 : P2; Q1 = expanded[25]; Q2 = __shfl(expanded[27], (threadIdx.x-1)&7, 8); Q = even ? Q1 : Q2; vec0.y = __shfl(__byte_perm(185*P, 185*Q , 0x5410), c_perm[1][thr], 8); P1 = expanded[ 5]; P2 = __shfl(expanded[ 7], (threadIdx.x-1)&7, 8); P = even ? P1 : P2; Q1 = expanded[21]; Q2 = __shfl(expanded[23], (threadIdx.x-1)&7, 8); Q = even ? Q1 : Q2; vec0.z = __shfl(__byte_perm(185*P, 185*Q , 0x5410), c_perm[1][thr], 8); P1 = expanded[13]; P2 = __shfl(expanded[15], (threadIdx.x-1)&7, 8); P = even ? P1 : P2; Q1 = expanded[29]; Q2 = __shfl(expanded[31], (threadIdx.x-1)&7, 8); Q = even ? Q1 : Q2; vec0.w = __shfl(__byte_perm(185*P, 185*Q , 0x5410), c_perm[1][thr], 8); temp4[8+(thr)] = vec0; P1 = hi?expanded[ 1]:expanded[ 0]; P2 = __shfl(hi?expanded[ 3]:expanded[ 2], (threadIdx.x+1)&7, 8); P = !even ? P1 : P2; Q1 = hi?expanded[17]:expanded[16]; Q2 = __shfl(hi?expanded[19]:expanded[18], (threadIdx.x+1)&7, 8); Q = !even ? Q1 : Q2; vec0.x = __shfl(__byte_perm(185*P, 185*Q , 0x5410), c_perm[2][thr], 8); P1 = hi?expanded[ 9]:expanded[ 8]; P2 = __shfl(hi?expanded[11]:expanded[10], (threadIdx.x+1)&7, 8); P = !even ? P1 : P2; Q1 = hi?expanded[25]:expanded[24]; Q2 = __shfl(hi?expanded[27]:expanded[26], (threadIdx.x+1)&7, 8); Q = !even ? Q1 : Q2; vec0.y = __shfl(__byte_perm(185*P, 185*Q , 0x5410), c_perm[2][thr], 8); P1 = hi?expanded[ 5]:expanded[ 4]; P2 = __shfl(hi?expanded[ 7]:expanded[ 6], (threadIdx.x+1)&7, 8); P = !even ? P1 : P2; Q1 = hi?expanded[21]:expanded[20]; Q2 = __shfl(hi?expanded[23]:expanded[22], (threadIdx.x+1)&7, 8); Q = !even ? Q1 : Q2; vec0.z = __shfl(__byte_perm(185*P, 185*Q , 0x5410), c_perm[2][thr], 8); P1 = hi?expanded[13]:expanded[12]; P2 = __shfl(hi?expanded[15]:expanded[14], (threadIdx.x+1)&7, 8); P = !even ? P1 : P2; Q1 = hi?expanded[29]:expanded[28]; Q2 = __shfl(hi?expanded[31]:expanded[30], (threadIdx.x+1)&7, 8); Q = !even ? Q1 : Q2; vec0.w = __shfl(__byte_perm(185*P, 185*Q , 0x5410), c_perm[2][thr], 8); temp4[16+(thr)] = vec0; P1 = lo?expanded[ 1]:expanded[ 0]; P2 = __shfl(lo?expanded[ 3]:expanded[ 2], (threadIdx.x+1)&7, 8); P = !even ? P1 : P2; Q1 = lo?expanded[17]:expanded[16]; Q2 = __shfl(lo?expanded[19]:expanded[18], (threadIdx.x+1)&7, 8); Q = !even ? Q1 : Q2; vec0.x = __shfl(__byte_perm(185*P, 185*Q , 0x5410), c_perm[3][thr], 8); P1 = lo?expanded[ 9]:expanded[ 8]; P2 = __shfl(lo?expanded[11]:expanded[10], (threadIdx.x+1)&7, 8); P = !even ? P1 : P2; Q1 = lo?expanded[25]:expanded[24]; Q2 = __shfl(lo?expanded[27]:expanded[26], (threadIdx.x+1)&7, 8); Q = !even ? Q1 : Q2; vec0.y = __shfl(__byte_perm(185*P, 185*Q , 0x5410), c_perm[3][thr], 8); P1 = lo?expanded[ 5]:expanded[ 4]; P2 = __shfl(lo?expanded[ 7]:expanded[ 6], (threadIdx.x+1)&7, 8); P = !even ? P1 : P2; Q1 = lo?expanded[21]:expanded[20]; Q2 = __shfl(lo?expanded[23]:expanded[22], (threadIdx.x+1)&7, 8); Q = !even ? Q1 : Q2; vec0.z = __shfl(__byte_perm(185*P, 185*Q , 0x5410), c_perm[3][thr], 8); P1 = lo?expanded[13]:expanded[12]; P2 = __shfl(lo?expanded[15]:expanded[14], (threadIdx.x+1)&7, 8); P = !even ? P1 : P2; Q1 = lo?expanded[29]:expanded[28]; Q2 = __shfl(lo?expanded[31]:expanded[30], (threadIdx.x+1)&7, 8); Q = !even ? Q1 : Q2; vec0.w = __shfl(__byte_perm(185*P, 185*Q , 0x5410), c_perm[3][thr], 8); temp4[24+(thr)] = vec0; P1 = sel?expanded[0]:expanded[1]; Q1 = __shfl(P1, (threadIdx.x^1)&7, 8); Q2 = sel?expanded[2]:expanded[3]; P2 = __shfl(Q2, (threadIdx.x^1)&7, 8); P = even? P1 : P2; Q = even? Q1 : Q2; vec0.x = __shfl(__byte_perm(233*P, 233*Q , 0x5410), c_perm[4][thr], 8); P1 = sel?expanded[8]:expanded[9]; Q1 = __shfl(P1, (threadIdx.x^1)&7, 8); Q2 = sel?expanded[10]:expanded[11]; P2 = __shfl(Q2, (threadIdx.x^1)&7, 8); P = even? P1 : P2; Q = even? Q1 : Q2; vec0.y = __shfl(__byte_perm(233*P, 233*Q , 0x5410), c_perm[4][thr], 8); P1 = sel?expanded[4]:expanded[5]; Q1 = __shfl(P1, (threadIdx.x^1)&7, 8); Q2 = sel?expanded[6]:expanded[7]; P2 = __shfl(Q2, (threadIdx.x^1)&7, 8); P = even? P1 : P2; Q = even? Q1 : Q2; vec0.z = __shfl(__byte_perm(233*P, 233*Q , 0x5410), c_perm[4][thr], 8); P1 = sel?expanded[12]:expanded[13]; Q1 = __shfl(P1, (threadIdx.x^1)&7, 8); Q2 = sel?expanded[14]:expanded[15]; P2 = __shfl(Q2, (threadIdx.x^1)&7, 8); P = even? P1 : P2; Q = even? Q1 : Q2; vec0.w = __shfl(__byte_perm(233*P, 233*Q , 0x5410), c_perm[4][thr], 8); temp4[32+thr] = vec0; P1 = sel?expanded[1]:expanded[0]; Q1 = __shfl(P1, (threadIdx.x^1)&7, 8); Q2 = sel?expanded[3]:expanded[2]; P2 = __shfl(Q2, (threadIdx.x^1)&7, 8); P = even? P1 : P2; Q = even? Q1 : Q2; vec0.x = __shfl(__byte_perm(233*P, 233*Q , 0x5410), c_perm[5][thr], 8); P1 = sel?expanded[9]:expanded[8]; Q1 = __shfl(P1, (threadIdx.x^1)&7, 8); Q2 = sel?expanded[11]:expanded[10]; P2 = __shfl(Q2, (threadIdx.x^1)&7, 8); P = even? P1 : P2; Q = even? Q1 : Q2; vec0.y = __shfl(__byte_perm(233*P, 233*Q , 0x5410), c_perm[5][thr], 8); P1 = sel?expanded[5]:expanded[4]; Q1 = __shfl(P1, (threadIdx.x^1)&7, 8); Q2 = sel?expanded[7]:expanded[6]; P2 = __shfl(Q2, (threadIdx.x^1)&7, 8); P = even? P1 : P2; Q = even? Q1 : Q2; vec0.z = __shfl(__byte_perm(233*P, 233*Q , 0x5410), c_perm[5][thr], 8); P1 = sel?expanded[13]:expanded[12]; Q1 = __shfl(P1, (threadIdx.x^1)&7, 8); Q2 = sel?expanded[15]:expanded[14]; P2 = __shfl(Q2, (threadIdx.x^1)&7, 8); P = even? P1 : P2; Q = even? Q1 : Q2; vec0.w = __shfl(__byte_perm(233*P, 233*Q , 0x5410), c_perm[5][thr], 8); temp4[40+thr] = vec0; uint32_t t; t = __shfl(expanded[17],(threadIdx.x+4)&7,8); P1 = sel?t:expanded[16]; Q1 = __shfl(P1, (threadIdx.x^1)&7, 8); t = __shfl(expanded[19],(threadIdx.x+4)&7,8); Q2 = sel?t:expanded[18]; P2 = __shfl(Q2, (threadIdx.x^1)&7, 8); P = even? P1 : P2; Q = even? Q1 : Q2; vec0.x = __shfl(__byte_perm(233*P, 233*Q , 0x5410), c_perm[6][thr], 8); t = __shfl(expanded[25],(threadIdx.x+4)&7,8); P1 = sel?t:expanded[24]; Q1 = __shfl(P1, (threadIdx.x^1)&7, 8); t = __shfl(expanded[27],(threadIdx.x+4)&7,8); Q2 = sel?t:expanded[26]; P2 = __shfl(Q2, (threadIdx.x^1)&7, 8); P = even? P1 : P2; Q = even? Q1 : Q2; vec0.y = __shfl(__byte_perm(233*P, 233*Q , 0x5410), c_perm[6][thr], 8); t = __shfl(expanded[21],(threadIdx.x+4)&7,8); P1 = sel?t:expanded[20]; Q1 = __shfl(P1, (threadIdx.x^1)&7, 8); t = __shfl(expanded[23],(threadIdx.x+4)&7,8); Q2 = sel?t:expanded[22]; P2 = __shfl(Q2, (threadIdx.x^1)&7, 8); P = even? P1 : P2; Q = even? Q1 : Q2; vec0.z = __shfl(__byte_perm(233*P, 233*Q , 0x5410), c_perm[6][thr], 8); t = __shfl(expanded[29],(threadIdx.x+4)&7,8); P1 = sel?t:expanded[28]; Q1 = __shfl(P1, (threadIdx.x^1)&7, 8); t = __shfl(expanded[31],(threadIdx.x+4)&7,8); Q2 = sel?t:expanded[30]; P2 = __shfl(Q2, (threadIdx.x^1)&7, 8); P = even? P1 : P2; Q = even? Q1 : Q2; vec0.w = __shfl(__byte_perm(233*P, 233*Q , 0x5410), c_perm[6][thr], 8); temp4[48+thr] = vec0; t = __shfl(expanded[16],(threadIdx.x+4)&7,8); P1 = sel?expanded[17]:t; Q1 = __shfl(P1, (threadIdx.x^1)&7, 8); t = __shfl(expanded[18],(threadIdx.x+4)&7,8); Q2 = sel?expanded[19]:t; P2 = __shfl(Q2, (threadIdx.x^1)&7, 8); P = even? P1 : P2; Q = even? Q1 : Q2; vec0.x = __shfl(__byte_perm(233*P, 233*Q , 0x5410), c_perm[7][thr], 8); t = __shfl(expanded[24],(threadIdx.x+4)&7,8); P1 = sel?expanded[25]:t; Q1 = __shfl(P1, (threadIdx.x^1)&7, 8); t = __shfl(expanded[26],(threadIdx.x+4)&7,8); Q2 = sel?expanded[27]:t; P2 = __shfl(Q2, (threadIdx.x^1)&7, 8); P = even? P1 : P2; Q = even? Q1 : Q2; vec0.y = __shfl(__byte_perm(233*P, 233*Q , 0x5410), c_perm[7][thr], 8); t = __shfl(expanded[20],(threadIdx.x+4)&7,8); P1 = sel?expanded[21]:t; Q1 = __shfl(P1, (threadIdx.x^1)&7, 8); t = __shfl(expanded[22],(threadIdx.x+4)&7,8); Q2 = sel?expanded[23]:t; P2 = __shfl(Q2, (threadIdx.x^1)&7, 8); P = even? P1 : P2; Q = even? Q1 : Q2; vec0.z = __shfl(__byte_perm(233*P, 233*Q , 0x5410), c_perm[7][thr], 8); t = __shfl(expanded[28],(threadIdx.x+4)&7,8); P1 = sel?expanded[29]:t; Q1 = __shfl(P1, (threadIdx.x^1)&7, 8); t = __shfl(expanded[30],(threadIdx.x+4)&7,8); Q2 = sel?expanded[31]:t; P2 = __shfl(Q2, (threadIdx.x^1)&7, 8); P = even? P1 : P2; Q = even? Q1 : Q2; vec0.w = __shfl(__byte_perm(233*P, 233*Q , 0x5410), c_perm[7][thr], 8); temp4[56+thr] = vec0; } }
the_stack
#include "CascadedCommon.h" #include "CascadedCompressionGPU.h" #include "CascadedDecompressionKernels.cuh" #include "CascadedMetadata.h" #include "CascadedMetadataOnGPU.h" #include "Check.h" #include "CudaUtils.h" #include "nvcomp_cub.cuh" #include "type_macros.h" #include "unpack.h" #include <cassert> #include <iostream> #include <list> #include <map> #include <memory> #include <mutex> #include <vector> // align all temp allocations by 512B #define CUDA_MEM_ALIGN(size) (((size) + 0x1FF) & ~0x1FF) #ifndef RLE_THREAD_BLOCK #define RLE_THREAD_BLOCK 128 #endif #ifndef RLE_ELEMS_PER_THREAD #define RLE_ELEMS_PER_THREAD 4 #endif #define RLE_ELEMS_PER_BLOCK (RLE_THREAD_BLOCK * RLE_ELEMS_PER_THREAD) namespace nvcomp { namespace highlevel { // internal representations: one kernel per scheme enum nvcompScheme_t { NVCOMP_SCHEME_BP, NVCOMP_SCHEME_RLE, NVCOMP_SCHEME_DELTA, NVCOMP_SCHEME_RLE_DELTA, // automatically fused RLE+Delta to reduce mem // traffic }; struct nvcompLayer_t; struct nvcompDataNode_t { void* ptr; nvcompType_t type; int packing; nvcompLayer_t* parentLayer; size_t length; // to enable BP as a separate layer, default -1 int pointToId; }; struct nvcompLayer_t { nvcompScheme_t scheme; size_t maxOutputSize; nvcompDataNode_t* vals; nvcompDataNode_t* runs; nvcompDataNode_t* output; // TODO: can we get rid of those int valId; int runId; int outputId; }; struct nvcompIntConfig_t { int outputId = 0; nvcompType_t outputType = NVCOMP_TYPE_INT; size_t maxOutputSize = 0; std::list<nvcompLayer_t> layers = {}; std::map<int, nvcompDataNode_t> nodes = {}; // TODO: should we make this nvcompData_t instead of int? // compute the workspace size size_t getWorkspaceBytes(); size_t getWorkspaceBytes(nvcompDataNode_t* node); // fuse kernels, etc. void optimizeLayers(); }; struct nvcompIntTask_t { // TODO: add CUDA event assigned to this task }; struct nvcompIntHandle_t { std::unique_ptr<nvcompIntConfig_t> config = nullptr; cudaStream_t stream = 0; // main decomp functions template <typename outputT> nvcompError_t decompCPU( nvcompDataNode_t* node, const void** inputData, const void** h_headers); template <typename outputT, typename runT> nvcompError_t decompGPU( nvcompDataNode_t* node, const void** inputData, const void** h_headers, cudaStream_t stream = NULL); // workspace memory size_t workspaceBytes = 0; void* workspaceStorage = nullptr; // workspace mem management nvcompError_t release(); nvcompError_t allocateAsync(); // new function that splits of pre-allocated memory // workspace breakdown size_t max_input_len = 0; // maximum input RLE length size_t max_output_len = 0; // maximum output RLE length void* temp_val = nullptr; // temp RLE val expansions void* temp_run = nullptr; // temp RLE run expansions void* temp_delta = nullptr; // temp Delta expansions void* temp_output = nullptr; // temp Delta expansions // cub scan memory size_t temp_scan_bytes = 0; void* temp_scan = nullptr; // block indices start and offsets size_t max_num_blocks = 0; size_t* start_ind = nullptr; size_t* start_off = nullptr; }; template <typename keyT, typename valueT> struct SharedMap { std::map<keyT, valueT> data = {}; std::mutex m = {}; // find the next available id keyT find_next() { std::lock_guard<std::mutex> guard(m); int id = 0; while (data.find(id) != data.end()) id++; return (keyT)id; } bool exists(const keyT& key) { std::lock_guard<std::mutex> guard(m); return data.find(key) != data.end(); } void insert(const keyT& key, const valueT& val) { std::lock_guard<std::mutex> guard(m); if (data.find(key) == data.end()) data[key] = val; } valueT& operator[](const keyT& key) { std::lock_guard<std::mutex> guard(m); return data[key]; } void erase(const keyT& key) { std::lock_guard<std::mutex> guard(m); data.erase(key); } }; // internal collections SharedMap<nvcompConfig_t, nvcompIntConfig_t> configs; SharedMap<nvcompHandle_t, nvcompIntHandle_t> handles; // TODO: can we get rid of these? std::mutex config_mutex; std::mutex handle_mutex; namespace { template <typename T> void cubDeviceScanTempSpace(size_t& temp_scan_bytes, const size_t max_input_len) { void* temp_scan = nullptr; T* temp_run = nullptr; CudaUtils::check( cub::DeviceScan::InclusiveSum( temp_scan, temp_scan_bytes, temp_run, temp_run, max_input_len), "cub::DeviceScan::InclusiveSum failed"); } void checkCompressSize(const size_t numBytes) { const size_t maxBytes = static_cast<size_t>(std::numeric_limits<int>::max()); if (numBytes > maxBytes) { throw std::runtime_error( "Cascaded compression can only compress up to a maximum of " + std::to_string(maxBytes) + " bytes at a time (requested " + std::to_string(numBytes) + " bytes)."); } } std::unique_ptr<nvcompIntConfig_t> generateConfig(const CascadedMetadata* const metadata) { const int numRLEs = metadata->getNumRLEs(); const int numDeltas = metadata->getNumDeltas(); const bool bitPacking = metadata->useBitPacking(); int vals_id = 0; // initialize config const nvcompType_t type = metadata->getValueType(); std::unique_ptr<nvcompIntConfig_t> config(new nvcompIntConfig_t); config->outputId = vals_id; config->outputType = type; config->maxOutputSize = metadata->getUncompressedSize(); const nvcompType_t runType = selectRunsType(metadata->getNumUncompressedElements()); const size_t maxSegmentSize = metadata->getUncompressedSize(); config->nodes[0].length = metadata->getNumUncompressedElements(); // A step can be RLE+Delta, RLE, or Delta, with final outputs conditionally // having bit packing applied const int numSteps = std::max(numRLEs, numDeltas); for (int r = numSteps - 1; r >= 0; r--) { const int inputId = vals_id; if (numSteps - r - 1 < numRLEs) { const int runId = ++vals_id; const int valId = ++vals_id; // add to config nvcompConfigAddRLE_BP( config.get(), inputId, maxSegmentSize, valId, type, bitPacking, runId, runType, bitPacking); config->nodes[valId].length = metadata->getNumElementsOf(valId); config->nodes[runId].length = metadata->getNumElementsOf(runId); // store vals (apply delta if necessary) if (numRLEs - 1 - r < numDeltas) { const int deltaId = ++vals_id; if (r == 0) { nvcompConfigAddDelta_BP( config.get(), valId, maxSegmentSize, deltaId, type, bitPacking); } else { nvcompConfigAddDelta_BP( config.get(), valId, maxSegmentSize, deltaId, type, 0); // no bitpacking when delta is used as an intermediate step } config->nodes[deltaId].length = metadata->getNumElementsOf(deltaId); } } else { // RLE-less step const int deltaId = ++vals_id; if (r == 0) { nvcompConfigAddDelta_BP( config.get(), inputId, maxSegmentSize, deltaId, type, bitPacking); } else { nvcompConfigAddDelta_BP( config.get(), inputId, maxSegmentSize, deltaId, type, 0); // no bitpacking when delta is used as an intermediate step } config->nodes[deltaId].length = metadata->getNumElementsOf(deltaId); } } // If there are no RLEs or Deltas, we will do a single BP step. if (numRLEs == 0 && numDeltas == 0) { const int inputId = vals_id; const int bpId = ++vals_id; nvcompConfigAddBP(config.get(), inputId, maxSegmentSize, bpId, type); config->nodes[bpId].length = metadata->getNumElementsOf(bpId); } return config; } template <typename T> constexpr bool isFixedWidth() { return std::is_same<T, char>::value || std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value || std::is_same<T, int16_t>::value || std::is_same<T, uint16_t>::value || std::is_same<T, int32_t>::value || std::is_same<T, uint32_t>::value || std::is_same<T, int64_t>::value || std::is_same<T, uint64_t>::value; } template <typename T> size_t writeFixedWidthData( const T* const val, void* const ptr, const size_t offset, const size_t maxSize) { assert(isFixedWidth<T>()); size_t newOffset = offset + sizeof(*val); if (ptr) { // only write if we're doing a really output if (newOffset > maxSize) { throw std::runtime_error( "Not enough room to write member, need at least " + std::to_string(newOffset) + " bytes, but given only " + std::to_string(maxSize)); } memcpy(static_cast<char*>(ptr) + offset, val, sizeof(*val)); } return newOffset; } template <typename T> size_t writeData( const T* const val, void* const ptr, const size_t offset, const size_t maxSize) { if (isFixedWidth<T>()) { return writeFixedWidthData(val, ptr, offset, maxSize); } else if (std::is_same<T, bool>::value) { const int8_t typedVal = static_cast<int8_t>(*val); return writeData(&typedVal, ptr, offset, maxSize); } else if (std::is_same<T, int>::value) { // on most systems this will not be used, as int32_t is usually defined as // int const int32_t typedVal = static_cast<int32_t>(*val); return writeData(&typedVal, ptr, offset, maxSize); } else if (std::is_same<T, size_t>::value) { const uint64_t typedVal = static_cast<uint64_t>(*val); return writeData(&typedVal, ptr, offset, maxSize); } else { throw std::runtime_error("Unsupported type for serialization."); } } } // namespace /************************************************************************************** * Older API definitions below. New API calls rely on them. **************************************************************************************/ nvcompIntConfig_t* createConfig(const CascadedMetadata* metadata) { return generateConfig(metadata).release(); } void destroyConfig(nvcompIntConfig_t* config) { delete config; } nvcompError_t nvcompConfigAddRLE_BP( nvcompIntConfig_t* const config, int outputId, size_t maxOutputSize, int valId, nvcompType_t valType, int valPacking, int runId, nvcompType_t runType, int runPacking) { nvcompIntConfig_t& c = *config; // setup input nodes if necessary if (c.nodes.find(valId) == c.nodes.end()) { c.nodes[valId] = {NULL, valType, valPacking, NULL, 0, 0}; } if (c.nodes.find(runId) == c.nodes.end()) { c.nodes[runId] = {NULL, runType, runPacking, NULL, 0, 0}; } // create the output node if necessary if (c.nodes.find(outputId) == c.nodes.end()) { c.nodes[outputId] = {NULL, valType, 0, NULL, 0, 0}; } nvcompLayer_t layer = {NVCOMP_SCHEME_RLE, maxOutputSize, NULL, NULL, NULL, valId, runId, outputId}; c.layers.push_back(layer); c.nodes[outputId].parentLayer = &c.layers.back(); return nvcompSuccess; } nvcompError_t nvcompConfigAddDelta_BP( nvcompIntConfig_t* const config, int outputId, size_t maxOutputSize, int valId, nvcompType_t valType, int valPacking) { nvcompIntConfig_t& c = *config; // setup the input node if necessary if (c.nodes.find(valId) == c.nodes.end()) { c.nodes[valId] = {NULL, valType, valPacking, NULL, 0, 0}; } // create the output node if necessary if (c.nodes.find(outputId) == c.nodes.end()) { c.nodes[outputId] = {NULL, valType, 0, NULL, 0, 0}; } nvcompLayer_t layer = {NVCOMP_SCHEME_DELTA, maxOutputSize, NULL, NULL, NULL, valId, -1, outputId}; c.layers.push_back(layer); c.nodes[outputId].parentLayer = &c.layers.back(); return nvcompSuccess; } nvcompError_t nvcompConfigAddBP( nvcompIntConfig_t* const config, int outputId, size_t maxOutputSize, int valId, nvcompType_t valType) { nvcompIntConfig_t& c = *config; // setup the input node if necessary if (c.nodes.find(valId) == c.nodes.end()) { c.nodes[valId] = {NULL, valType, 1, NULL, 0, 0}; } // create the output node if necessary if (c.nodes.find(outputId) == c.nodes.end()) { c.nodes[outputId] = {NULL, valType, 0, NULL, 0, 0}; } nvcompLayer_t layer = { NVCOMP_SCHEME_BP, maxOutputSize, NULL, NULL, NULL, valId, -1, outputId}; c.layers.push_back(layer); c.nodes[outputId].parentLayer = &c.layers.back(); return nvcompSuccess; } size_t nvcompIntConfig_t::getWorkspaceBytes(nvcompDataNode_t* /*node*/) { // TODO: allocate output buffers for each node except the terminal one // currently this is done inside decompGPU which will break concurrency (once // we add streams) return 0; } size_t nvcompIntConfig_t::getWorkspaceBytes() { if (nodes.find(outputId) == nodes.end()) { throw std::runtime_error( "getWorkspaceBytes(): could not find output ID amongst nodes: " + std::to_string(outputId) + " with " + std::to_string(nodes.size()) + " nodes."); } if (nodes[outputId].parentLayer == NULL) { throw std::runtime_error("getWorkspaceBytes(): the output node is not used " "by any compression layers."); } int numRLEs = 0; int numDeltas = 0; size_t max_input_len = 0; for (const nvcompLayer_t& layer : layers) { if (layer.scheme == NVCOMP_SCHEME_RLE || layer.scheme == NVCOMP_SCHEME_RLE_DELTA) { ++numRLEs; } if (layer.scheme == NVCOMP_SCHEME_DELTA || layer.scheme == NVCOMP_SCHEME_RLE_DELTA) { ++numDeltas; } const size_t layer_len = nodes[layer.valId].length; if (layer_len > max_input_len) { max_input_len = layer_len; } } const size_t max_output_len = maxOutputSize; size_t size = 0; // temp vals, runs, delta, output if (numRLEs > 0 || numDeltas > 0) { size += CUDA_MEM_ALIGN(max_input_len * sizeOfnvcompType(outputType)); if (numRLEs > 0) { size += CUDA_MEM_ALIGN( max_input_len * sizeOfnvcompType(selectRunsType(maxOutputSize))); } size += CUDA_MEM_ALIGN(max_input_len * sizeOfnvcompType(outputType)); size += CUDA_MEM_ALIGN(max_input_len * sizeOfnvcompType(outputType)); } size_t temp_scan_bytes_run = 0; size_t temp_scan_bytes_delta = 0; NVCOMP_TYPE_ONE_SWITCH( selectRunsType(max_output_len), cubDeviceScanTempSpace, temp_scan_bytes_run, max_input_len); NVCOMP_TYPE_ONE_SWITCH( outputType, cubDeviceScanTempSpace, temp_scan_bytes_delta, max_input_len); size_t temp_scan_bytes = std::max(temp_scan_bytes_run, temp_scan_bytes_delta); size += CUDA_MEM_ALIGN(temp_scan_bytes); size_t max_num_blocks = (max_output_len + RLE_ELEMS_PER_BLOCK - 1) / RLE_ELEMS_PER_BLOCK; size += CUDA_MEM_ALIGN((max_num_blocks + 1) * sizeof(size_t)); size += CUDA_MEM_ALIGN((max_num_blocks + 1) * sizeof(size_t)); return size; } nvcompError_t nvcompIntHandle_t::release() { return nvcompSuccess; } // recursively assign memory for all nodes in our DAG // ** Assumes worspaceStorage is already allocated with sufficient space ** nvcompError_t nvcompIntHandle_t::allocateAsync() { nvcompIntConfig_t& c = *config; nvcompType_t outputType = c.outputType; // assign member variables for size max_output_len = c.maxOutputSize; max_input_len = 0; int numRLEs = 0; int numDeltas = 0; for (const nvcompLayer_t& layer : c.layers) { if (layer.scheme == NVCOMP_SCHEME_RLE || layer.scheme == NVCOMP_SCHEME_RLE_DELTA) { ++numRLEs; } if (layer.scheme == NVCOMP_SCHEME_DELTA || layer.scheme == NVCOMP_SCHEME_RLE_DELTA) { ++numDeltas; } const size_t layer_len = c.nodes[layer.valId].length; if (layer_len > max_input_len) { max_input_len = layer_len; } } unsigned char* ptr = (unsigned char*)workspaceStorage; // temporary buffers that can hold RLE expansions and other data, but we will // re-use locations if (numRLEs > 0 || numDeltas > 0) { temp_val = ptr; ptr += CUDA_MEM_ALIGN(max_input_len * sizeOfnvcompType(outputType)); if (numRLEs > 0) { temp_run = ptr; ptr += CUDA_MEM_ALIGN( max_input_len * sizeOfnvcompType(selectRunsType(max_output_len))); } temp_delta = ptr; ptr += CUDA_MEM_ALIGN(max_input_len * sizeOfnvcompType(outputType)); // one additional buffer for delta expansion // TODO: can we get rid of this one? temp_output = ptr; ptr += CUDA_MEM_ALIGN(max_input_len * sizeOfnvcompType(outputType)); } // allocate temp storage for cub scan using the largest size_t // this temp storage will be reused by delta and runs scans of different types temp_scan = ptr; size_t temp_scan_bytes_run = 0; size_t temp_scan_bytes_delta = 0; NVCOMP_TYPE_ONE_SWITCH( selectRunsType(max_output_len), cubDeviceScanTempSpace, temp_scan_bytes_run, max_input_len); NVCOMP_TYPE_ONE_SWITCH( outputType, cubDeviceScanTempSpace, temp_scan_bytes_delta, max_input_len); temp_scan_bytes = std::max(temp_scan_bytes_run, temp_scan_bytes_delta); ptr += CUDA_MEM_ALIGN(temp_scan_bytes); // block indices/offsets max_num_blocks = (max_output_len + RLE_ELEMS_PER_BLOCK - 1) / RLE_ELEMS_PER_BLOCK; start_ind = (size_t*)ptr; ptr += CUDA_MEM_ALIGN((max_num_blocks + 1) * sizeof(size_t)); start_off = (size_t*)ptr; ptr += CUDA_MEM_ALIGN((max_num_blocks + 1) * sizeof(size_t)); return nvcompSuccess; } // here we do kernel fusion void nvcompIntConfig_t::optimizeLayers() { for (auto it = layers.begin(); it != layers.end();) { if (it->scheme == NVCOMP_SCHEME_DELTA) { int valId = it->valId; int outputId = it->outputId; if (nodes.find(valId) != nodes.end() && nodes[valId].parentLayer != NULL && nodes[valId].parentLayer->scheme == NVCOMP_SCHEME_RLE) { nodes[outputId].parentLayer = nodes[valId].parentLayer; nodes[outputId].parentLayer->scheme = NVCOMP_SCHEME_RLE_DELTA; nodes[outputId].parentLayer->outputId = outputId; it = layers.erase(it); continue; } } it++; } } /* These functions may not be needed and removed to simplify codebase */ nvcompError_t nvcompSetWorkspace( nvcompHandle_t /*handle*/, void* /*workspaceStorage*/, size_t /*workspaceBytes*/) { std::cerr << "ERROR: nvcompSetWorkspace is not implemented yet!" << std::endl; return nvcompErrorNotSupported; } nvcompError_t nvcompGetWorkspaceSize(nvcompHandle_t handle, size_t* workspaceBytes) { *workspaceBytes = handles[handle].workspaceBytes; return nvcompSuccess; } nvcompError_t nvcompSetStream(nvcompHandle_t handle, cudaStream_t streamId) { handles[handle].stream = streamId; return nvcompSuccess; } nvcompError_t nvcompGetStream(nvcompHandle_t handle, cudaStream_t* streamId) { *streamId = handles[handle].stream; return nvcompSuccess; } // if the header is not packed this will shallow copy the pointer // otherwise unpack into the output buffer template <typename inputT, typename outputT> void unpackCpu( outputT** output, nvcompDataNode_t* node, const void* hdr, const void* data) { const CascadedMetadata::Header header = *static_cast<const CascadedMetadata::Header*>(hdr); if (node->packing) { for (size_t i = 0; i < header.length; ++i) { const inputT minValue = *CascadedMetadata::getMinValueLocation<inputT>(&header); (*output)[i] = unpackBytes(data, header.numBits, minValue, i); } } else { if (typeid(inputT) == typeid(outputT)) { *output = (outputT*)data; } else { for (size_t i = 0; i < header.length; i++) (*output)[i] = (outputT)((inputT*)data)[i]; } } } template <typename outputT> void unpackCpu( outputT** output, nvcompDataNode_t* node, const void* hdr, const void* data) { NVCOMP_TYPE_TWO_SWITCH_FIRST_ONLY( node->type, outputT, unpackCpu, output, node, hdr, data); } // if the header is not packed this will shallow copy the pointer if it's // accessible from the GPU otherwise copy or unpack into the output buffer template <typename inputT, typename outputT> void unpackGpu( outputT* d_output, nvcompDataNode_t* node, const void* data, const void* h_hdr, cudaStream_t stream) { // prepare input data const void* const d_input = CudaUtils::device_pointer(data); // Get length of run from the host-side header size_t length = static_cast<const CascadedMetadata::Header*>(h_hdr)->length; CascadedMetadata::Header header = *static_cast<const CascadedMetadata::Header*>(h_hdr); const unsigned char numBits = header.numBits; const inputT minValue = *CascadedMetadata::getMinValueLocation<inputT>(&header); const dim3 block(512); const dim3 grid(roundUpDiv(length, block.x)); if (node->packing) { unpackBytesKernel<<<grid, block, 0, stream>>>( d_input, d_output, numBits, minValue, length); CudaUtils::check_last_error("unpacKBytesKernel failed to launch"); } else { convertKernel<<<grid, block, 0, stream>>>( static_cast<const inputT*>(d_input), d_output, length); CudaUtils::check_last_error("convertKernel failed to launch"); } } template <typename outputT> void unpackGpu( outputT* d_output, nvcompDataNode_t* node, const void* data, const void* h_hdr, cudaStream_t stream) { NVCOMP_TYPE_TWO_SWITCH_FIRST_ONLY( node->type, outputT, unpackGpu, d_output, node, data, h_hdr, stream); } template <typename outputT> nvcompError_t nvcompIntHandle_t::decompCPU( nvcompDataNode_t* node, const void** inputHdrs, const void** inputData) { size_t maxOutputSize = config->maxOutputSize; std::vector<outputT> unpacked_vals; std::vector<size_t> unpacked_runs; outputT* vals_data = NULL; size_t* runs_data = NULL; size_t vals_len; nvcompLayer_t* layer = node->parentLayer; // add BP only layer if (layer->scheme == NVCOMP_SCHEME_BP) { unpacked_vals.resize(maxOutputSize); vals_data = &unpacked_vals[0]; unpackCpu( &vals_data, layer->vals, inputHdrs[layer->valId], inputData[layer->valId]); vals_len = static_cast<const CascadedMetadata::Header*>(inputHdrs[layer->valId]) ->length; node->length = vals_len; // lazy allocation // TODO: move to allocate() if (node->ptr == NULL) node->ptr = new outputT[vals_len]; // copy and convert type if necessary for (int i = 0; i < vals_len; i++) { ((outputT*)(node->ptr))[i] = vals_data[i]; } return nvcompSuccess; } // compute vals if (layer->vals->parentLayer != NULL) { decompCPU<outputT>(layer->vals, inputHdrs, inputData); vals_data = (outputT*)layer->vals->ptr; vals_len = layer->vals->length; } else { unpacked_vals.resize(maxOutputSize); vals_data = &unpacked_vals[0]; unpackCpu( &vals_data, layer->vals, inputHdrs[layer->valId], inputData[layer->valId]); vals_len = static_cast<const CascadedMetadata::Header*>(inputHdrs[layer->valId]) ->length; } // compute runs if (layer->runs != NULL) { if (layer->runs->parentLayer != NULL) { decompCPU<size_t>(layer->runs, inputHdrs, inputData); runs_data = (size_t*)layer->runs->ptr; } else { unpacked_runs.resize(maxOutputSize); runs_data = &unpacked_runs[0]; unpackCpu( &runs_data, layer->runs, inputHdrs[layer->runId], inputData[layer->runId]); } } // decompress (this is using additional memory) std::vector<outputT> next; next.clear(); switch (layer->scheme) { case NVCOMP_SCHEME_RLE: { for (int i = 0; i < vals_len; i++) next.insert(next.end(), runs_data[i], vals_data[i]); break; } case NVCOMP_SCHEME_RLE_DELTA: { for (int i = 0; i < vals_len; i++) next.insert(next.end(), runs_data[i], vals_data[i]); for (int i = 1; i < next.size(); i++) next[i] += next[i - 1]; break; } case NVCOMP_SCHEME_DELTA: { next.resize(vals_len); next[0] = vals_data[0]; for (int i = 1; i < vals_len; i++) next[i] = next[i - 1] + vals_data[i]; break; } default: return nvcompErrorNotSupported; } node->length = next.size(); // lazy allocation // TODO: move to allocate() if (node->ptr == NULL) node->ptr = new outputT[next.size()]; // copy and convert type if necessary for (int i = 0; i < next.size(); i++) ((outputT*)(node->ptr))[i] = next[i]; return nvcompSuccess; } // Perform Cascaded decompression on the GPU. // Assumes all workspace is pre-allocated and assigned, inputHdrs and inputData // are GPU-accessible, and h_headers is CPU-accessible template <typename outputT, typename runT> nvcompError_t nvcompIntHandle_t::decompGPU( nvcompDataNode_t* node, const void** inputData, const void** h_headers, cudaStream_t stream) { // get typed copies of pointers to avoid casting outputT* const localOutput = static_cast<outputT*>(temp_output); outputT* const localDelta = static_cast<outputT*>(temp_delta); runT* const localRun = static_cast<runT*>(temp_run); outputT* out_ptr = CudaUtils::device_pointer(static_cast<outputT*>(node->ptr)); nvcompLayer_t* layer = node->parentLayer; if (layer->scheme == NVCOMP_SCHEME_BP) { // We assume this is the only layer, and we just do it and exit layer->vals->ptr = out_ptr; unpackGpu( (outputT*)layer->vals->ptr, layer->vals, inputData[layer->valId], h_headers[layer->valId], stream); layer->vals->length = static_cast<const CascadedMetadata::Header*>(h_headers[layer->valId]) ->length; assert(layer->vals->length <= max_input_len); return nvcompSuccess; } // prepare inputs std::swap(temp_output, temp_val); if (layer->vals->parentLayer != NULL) { layer->vals->ptr = localOutput; // when recursing, swap decompGPU<outputT, runT>(layer->vals, inputData, h_headers, stream); } else { // unpack RLE values layer->vals->ptr = localOutput; unpackGpu( (outputT*)layer->vals->ptr, layer->vals, inputData[layer->valId], h_headers[layer->valId], stream); layer->vals->length = static_cast<const CascadedMetadata::Header*>(h_headers[layer->valId]) ->length; assert(layer->vals->length <= max_input_len); } if (layer->runs != nullptr) { if (layer->runs->parentLayer != nullptr) { throw std::runtime_error("decompGPU(): Runs cannot have parent layers."); } else { // unpack RLE runs layer->runs->ptr = localRun; unpackGpu( (runT*)layer->runs->ptr, layer->runs, inputData[layer->runId], h_headers[layer->runId], stream); layer->runs->length = static_cast<const CascadedMetadata::Header*>( h_headers[layer->runId]) ->length; } } outputT* d_vals = (outputT*)layer->vals->ptr; const size_t input_size = layer->vals->length; assert(input_size <= max_input_len); if (layer->scheme == NVCOMP_SCHEME_DELTA) { assert(out_ptr != d_vals); CudaUtils::check( cub::DeviceScan::InclusiveSum( temp_scan, temp_scan_bytes, d_vals, out_ptr, input_size, stream), "cub::DeviceScan::InclusiveSum failed"); } else { // must be RLE of some form runT* d_runs = (runT*)layer->runs->ptr; assert(layer->runs->length == input_size); if (layer->scheme == NVCOMP_SCHEME_RLE_DELTA) { const dim3 block(512); const dim3 grid(roundUpDiv(input_size, block.x)); vecMultKernel<<<grid, block, 0, stream>>>( d_vals, d_runs, localDelta, input_size); CudaUtils::check_last_error("vecMultKernel failed to launch"); // inclusive scan to compute Delta sums CudaUtils::check( cub::DeviceScan::InclusiveSum( temp_scan, temp_scan_bytes, localDelta, localDelta, input_size, stream), "cub::DeviceScan::InclusiveSum"); } // inclusive scan to compute RLE offsets // TODO: could be merged with the unpack kernel? CudaUtils::check( cub::DeviceScan::InclusiveSum( temp_scan, temp_scan_bytes, d_runs, d_runs, input_size, stream), "cub::DeviceScan::InclusiveSum"); const size_t output_length = node->length; // precompute start/end boundaries for each CUDA block size_t output_grid = (output_length + RLE_ELEMS_PER_BLOCK - 1) / RLE_ELEMS_PER_BLOCK; size_t output_grid_block = (output_grid + RLE_THREAD_BLOCK - 1) / RLE_THREAD_BLOCK; searchBlockBoundaries<runT, RLE_THREAD_BLOCK, RLE_ELEMS_PER_THREAD> <<<output_grid_block, RLE_THREAD_BLOCK, 0, stream>>>( start_ind, start_off, output_grid, input_size, d_runs); CudaUtils::check_last_error("searchBlockBoundariesKernel failed to launch"); // expand RLE and apply Delta: buf[r] -> buf[r+1] // TODO: implement macro to look nicer? switch (layer->scheme) { case NVCOMP_SCHEME_RLE_DELTA: expandRLEDelta< outputT, outputT, runT, RLE_THREAD_BLOCK, RLE_ELEMS_PER_THREAD, true><<<output_grid, RLE_THREAD_BLOCK, 0, stream>>>( (outputT*)out_ptr, output_length, d_vals, d_runs, localDelta, start_ind, start_off); CudaUtils::check_last_error("expandRLEDelta failed to launch"); break; case NVCOMP_SCHEME_RLE: expandRLEDelta< outputT, outputT, runT, RLE_THREAD_BLOCK, RLE_ELEMS_PER_THREAD, false><<<output_grid, RLE_THREAD_BLOCK, 0, stream>>>( (outputT*)out_ptr, output_length, d_vals, d_runs, localDelta, start_ind, start_off); CudaUtils::check_last_error("expandRLEDelta failed to launch"); break; default: throw std::runtime_error( "Invalid rle scheme: " + std::to_string(layer->scheme)); } } return nvcompSuccess; } nvcompError_t nvcompSetNodeLength(nvcompHandle_t handle, int nodeId, size_t output_length) { nvcompIntHandle_t& h = handles[handle]; nvcompIntConfig_t& c = *h.config; c.nodes[nodeId].length = output_length; return nvcompSuccess; } // Main function that sets up Cascaded decompression from the old API. // the new cascaded decompression API call is just a wrapper around this (though // heavily modified to be asynchronous). template <typename outputType, typename runType> nvcompError_t nvcompDecompressLaunch( nvcompHandle_t handle, void* outputData, const size_t outputSize, const void** inputData, const void** h_headers) { nvcompIntHandle_t& h = handles[handle]; nvcompIntConfig_t& c = *h.config; // TODO: assign all the buffers nvcompDataNode_t* terminal_node = &c.nodes[c.outputId]; terminal_node->ptr = outputData; nvcompError_t ret = h.decompGPU<outputType, runType>( terminal_node, inputData, h_headers, h.stream); const size_t neededBytes = terminal_node->length * sizeof(outputType); if (outputSize < neededBytes) { std::cerr << "Insufficient space to write decompressed date: given " << outputSize << " bytes but need " << neededBytes << " bytes." << std::endl; return nvcompErrorInvalidValue; } // this is to enable the correct result for multi-chunk execuation for (auto it = c.nodes.begin(); it != c.nodes.end(); it++) { it->second.length = 0; } return ret; } nvcompError_t nvcompDecompressLaunch( nvcompHandle_t handle, const size_t numUncompressedElements, void* const outputData, const size_t outputSize, const void** const inputData, const void** const h_headers) { const nvcompType_t outputType = handles[handle].config->outputType; const nvcompType_t runType = selectRunsType(numUncompressedElements); NVCOMP_TYPE_TWO_SWITCH_RETURN( outputType, runType, nvcompDecompressLaunch, handle, outputData, outputSize, inputData, h_headers); } nvcompError_t nvcompDestroyHandle(nvcompHandle_t handle) { nvcompIntHandle_t& h = handles[handle]; nvcompIntConfig_t& c = *h.config; // free temp memory h.release(); // clear all local nodes attached to this config c.nodes.clear(); // remove the handle from the list handles.erase(handle); return nvcompSuccess; } // Modified version of handle creation function from previous API to now be // asynchronous Assumes workspaceStorage is already allocated. nvcompError_t nvcompCreateHandleAsync( nvcompHandle_t* handle, std::unique_ptr<nvcompIntConfig_t> config, void* workspaceStorage, const size_t workspaceBytes, cudaStream_t stream) { std::lock_guard<std::mutex> guard(handle_mutex); nvcompIntConfig_t& c = *config; // first - optimize the plan c.optimizeLayers(); // assign pointers - at this point the nodes map is set for (auto it = c.layers.begin(); it != c.layers.end(); it++) { it->vals = &c.nodes[it->valId]; it->output = &c.nodes[it->outputId]; if (it->runId >= 0) it->runs = &c.nodes[it->runId]; } if (workspaceBytes < c.getWorkspaceBytes()) { std::cerr << "Insufficient workspace size: got " << workspaceBytes << " but need " << c.getWorkspaceBytes() << std::endl; return nvcompErrorInvalidValue; } // find the next available id nvcompHandle_t id = handles.find_next(); *handle = id; nvcompIntHandle_t& h = handles[id]; h.config = std::move(config); h.stream = stream; h.workspaceBytes = workspaceBytes; h.workspaceStorage = workspaceStorage; h.allocateAsync(); return nvcompSuccess; } } // namespace highlevel } // namespace nvcomp using namespace nvcomp; using namespace nvcomp::highlevel; void nvcompCascadedDestroyMetadata(void* const metadata_ptr) { CascadedMetadata* metadata = static_cast<CascadedMetadata*>(metadata_ptr); ::operator delete(metadata); } nvcompError_t nvcompCascadedDecompressConfigure( const void* compressed_ptr, size_t compressed_bytes, void** metadata_ptr, size_t* metadata_bytes, size_t* temp_bytes, size_t* uncompressed_bytes, cudaStream_t stream) { try { CHECK_NOT_NULL(compressed_ptr); CHECK_NOT_NULL(metadata_ptr); CHECK_NOT_NULL(temp_bytes); CHECK_NOT_NULL(uncompressed_bytes); CascadedMetadata* metadata; CascadedMetadataOnGPU gpuMetadata((void*)compressed_ptr, compressed_bytes); metadata = new CascadedMetadata(gpuMetadata.copyToHost(stream)); *temp_bytes = metadata->getTempBytes(); *uncompressed_bytes = metadata->getUncompressedSize(); *metadata_ptr = (void*)metadata; if(metadata_bytes) { *metadata_bytes = sizeof(CascadedMetadata); } } catch (const std::exception& e) { return Check::exception_to_error( e, "nvcompCascadedDecompressConfigure()"); } return nvcompSuccess; } nvcompError_t nvcompCascadedDecompressAsync( const void* const in_ptr, const size_t in_bytes, const void* const metadata_ptr, size_t metadata_bytes, void* const temp_ptr, const size_t temp_bytes, void* const out_ptr, const size_t out_bytes, cudaStream_t stream) { nvcompHandle_t handle = -1; try { CHECK_NOT_NULL(metadata_ptr); const CascadedMetadata* const metadata = static_cast<const CascadedMetadata*>(metadata_ptr); if (in_bytes < metadata->getCompressedSize()) { throw NVCompException( nvcompErrorInvalidValue, "in_bytes is smaller than compressed data size: " + std::to_string(in_bytes) + " < " + std::to_string(metadata->getCompressedSize())); } std::unique_ptr<nvcompIntConfig_t> c = generateConfig(metadata); // first - optimize the plan c->optimizeLayers(); // assign pointers - at this point the nodes map is set for (auto& layer : c->layers) { layer.vals = &c->nodes[layer.valId]; layer.output = &c->nodes[layer.outputId]; if (layer.runId >= 0) { layer.runs = &c->nodes[layer.runId]; } } CHECK_API_CALL(nvcompCreateHandleAsync( &handle, std::move(c), temp_ptr, temp_bytes, stream)); assert(handle >= 0); // Pointers to different portions of compressed data std::vector<void*> inputData(metadata->getNumInputs(), nullptr); std::vector<CascadedMetadata::Header> inputHdrs; std::vector<CascadedMetadata::Header*> cpuHdrs; for (size_t i = 0; i < metadata->getNumInputs(); i++) { inputHdrs.emplace_back(metadata->getHeader(i)); inputData[i] = &((char*)in_ptr)[metadata->getDataOffset(i)]; } for (CascadedMetadata::Header& hdr : inputHdrs) { cpuHdrs.emplace_back(&hdr); } nvcompDecompressLaunch( handle, metadata->getNumUncompressedElements(), out_ptr, out_bytes, (const void**)inputData.data(), (const void**)cpuHdrs.data()); nvcompDestroyHandle(handle); } catch (const std::exception& e) { if (handle >= 0) { nvcompDestroyHandle(handle); } return Check::exception_to_error(e, "nvcompCascadedDecompressAsync()"); } return nvcompSuccess; } nvcompError_t nvcompCascadedCompressConfigure( const nvcompCascadedFormatOpts* format_opts, nvcompType_t type, size_t uncompressed_bytes, size_t* metadata_bytes, size_t* temp_bytes, size_t* compressed_bytes) { try { CHECK_NOT_NULL(temp_bytes); CHECK_NOT_NULL(compressed_bytes); checkCompressSize(uncompressed_bytes); nvcompCascadedFormatOpts temp_opts; // if no format opts given, assume worst-case temp and output sizes if(format_opts == NULL) { temp_opts.num_RLEs = 2; temp_opts.num_deltas = 2; temp_opts.use_bp = 1; } else { temp_opts.num_RLEs = format_opts->num_RLEs; temp_opts.num_deltas = format_opts->num_deltas; temp_opts.use_bp = format_opts->use_bp; } if(metadata_bytes) { *metadata_bytes = sizeof(CascadedMetadata); } nvcompCascadedCompressionGPU::computeWorkspaceSize( uncompressed_bytes, type, &temp_opts, temp_bytes); nvcompCascadedCompressionGPU::generateOutputUpperBound( uncompressed_bytes, type, &temp_opts, compressed_bytes); } catch (const std::exception& e) { return Check::exception_to_error(e, "nvcompCascadedCompressConfigure()"); } return nvcompSuccess; } nvcompError_t nvcompCascadedCompressAsync( const nvcompCascadedFormatOpts* const format_opts, const nvcompType_t in_type, const void* const in_ptr, const size_t in_bytes, void* const temp_ptr, const size_t temp_bytes, void* const out_ptr, size_t* const out_bytes, cudaStream_t stream) { try { checkCompressSize(in_bytes); CHECK_NOT_NULL(out_bytes); nvcompCascadedFormatOpts final_opts; if(format_opts == NULL) { // need to run auto-selector of NULL nvcompCascadedSelectorOpts selector_opts; selector_opts.sample_size = 1024; selector_opts.num_samples = 100; selector_opts.seed = 1; size_t type_bytes = sizeOfnvcompType(in_type); // Adjust sample size if input is too small if (in_bytes < (selector_opts.sample_size * selector_opts.num_samples * type_bytes)) { selector_opts.num_samples = in_bytes / (selector_opts.sample_size*type_bytes); // If too small for even 1 sample of 1024, decrease sample size if (in_bytes < 1024*type_bytes) { selector_opts.num_samples = 1; selector_opts.sample_size = in_bytes / type_bytes; } } double est_ratio; nvcompError_t err = nvcompCascadedSelectorRun( &selector_opts, in_type, in_ptr, in_bytes, temp_ptr, temp_bytes, &final_opts, &est_ratio, stream); } else { final_opts.num_RLEs = format_opts->num_RLEs; final_opts.num_deltas = format_opts->num_deltas; final_opts.use_bp = format_opts->use_bp; } nvcompCascadedCompressionGPU::compressAsync( in_ptr, in_bytes, in_type, &final_opts, temp_ptr, temp_bytes, out_ptr, out_bytes, stream); } catch (const std::exception& e) { return Check::exception_to_error(e, "nvcompCascadedCompressAsync()"); } return nvcompSuccess; } size_t CascadedMetadata::getTempBytes() const { std::unique_ptr<nvcompIntConfig_t> c = generateConfig(this); // first - optimize the plan c->optimizeLayers(); // assign pointers - at this point the nodes map is set for (auto& layer : c->layers) { layer.vals = &c->nodes[layer.valId]; layer.output = &c->nodes[layer.outputId]; if (layer.runId >= 0) { layer.runs = &c->nodes[layer.runId]; } } // Return the required temp and output sizes return c->getWorkspaceBytes(); }
the_stack
#pragma once #include <set> #include <map> #include <vector> #include <algorithm> //#define KNN_TEST_DEBUG #ifdef KNN_TEST_DEBUG #define debug(a...) printf(a) #else #define debug(a...) #endif namespace gunrock { namespace app { namespace knn { /** * @brief Speciflying parameters for KNN Problem * @param parameters The util::Parameter<...> structure holding all parameter * info \return cudaError_t error message(s), if any */ cudaError_t UseParameters_test(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(parameters.Use<uint32_t>( "omp-threads", util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, 0, "Number of threads for parallel omp knn implementation; 0 for " "default.", __FILE__, __LINE__)); return retval; } template<typename SizeT, typename ValueT> std::pair<ValueT, SizeT> flip_pair(const std::pair<SizeT, ValueT> &p){ return std::pair<ValueT, SizeT>(p.second, p.first); } template<typename SizeT, typename ValueT, typename C> std::multimap<ValueT, SizeT, C> flip_map(const std::map<SizeT, ValueT, C>& map){ std::multimap<ValueT, SizeT, C> result; std::transform(map.begin(), map.end(), std::inserter(result, result.begin()), flip_pair<SizeT, ValueT>); return result; } /****************************************************************************** * KNN Testing Routines *****************************************************************************/ /** * @brief Simple CPU-based reference knn ranking implementations * @tparam GraphT Type of the graph * @tparam ValueT Type of the values * @param[in] graph Input graph ... * @param[in] quiet Whether to print out anything to stdout */ template < typename VertexT, typename SizeT, typename ValueT> double CPU_Reference(util::Parameters &parameters, util::Array1D<SizeT, ValueT> &points, // points SizeT n, // number of points SizeT dim, // number of dimension SizeT k, // number of nearest neighbor SizeT *knns, // knns bool quiet) { cudaError_t retval = cudaSuccess; util::CpuTimer cpu_timer; cpu_timer.Start(); GUARD_CU(points.Move(util::HOST, util::DEVICE, n*dim)); bool transpose = parameters.Get<bool>("transpose"); util::Array1D<SizeT, ValueT, util::UNIFIED> data; GUARD_CU(data .Allocate(n*dim, util::DEVICE)); GUARD_CU(data.ForAll( [points] __host__ __device__ (ValueT *d, const SizeT &pos){ d[pos] = points[pos]; }, n*dim, util::DEVICE)); GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed"); int num_devices; // = 3; cudaGetDeviceCount(&num_devices); /* *************************************** * [TODO] Consider boundary conditions* *************************************** */ int MAX_DATA = 2; int CHUNK = MAX_DATA*num_devices; cudaError_t retvals[CHUNK]; cudaStream_t stream[CHUNK]; cudaEvent_t event[CHUNK]; util::Array1D<SizeT, util::Array1D<SizeT, ValueT>> distance; util::Array1D<SizeT, util::Array1D<SizeT, SizeT>> keys; GUARD_CU(distance .Allocate(CHUNK, util::HOST)); GUARD_CU(keys .Allocate(CHUNK, util::HOST)); // CUB Temporary arrays util::Array1D<SizeT, util::Array1D<SizeT, ValueT>> cub_distance; util::Array1D<SizeT, util::Array1D<SizeT, SizeT>> cub_keys; GUARD_CU(cub_distance .Allocate(CHUNK, util::HOST)); GUARD_CU(cub_keys .Allocate(CHUNK, util::HOST)); util::Array1D<SizeT, util::Array1D<SizeT, SizeT>> knns_d; GUARD_CU(knns_d .Allocate(n, util::HOST)); util::PrintMsg("Host Allocation done."); for (int p = 0; p < ((n+(CHUNK-1))/CHUNK); p++) { for (int dev = 0; dev < num_devices; dev++) { GUARD_CU2(cudaSetDevice(dev), "cudaSetDevice failed."); for(int d = 0; d < MAX_DATA; d++) { auto jump = (p*CHUNK); auto index = jump + ((dev*MAX_DATA) + d); if (index >= n) break; GUARD_CU(knns_d[index] .Allocate(k, util::HOST | util::DEVICE)); } GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed"); } } util::PrintMsg("KNNs Allocation done."); for(int dev = 0; dev < num_devices; dev++) { GUARD_CU2(cudaSetDevice(dev), "cudaSetDevice failed."); for(int peer = 0; peer < num_devices; peer++) { int peer_access_avail = 0; GUARD_CU2(cudaDeviceCanAccessPeer(&peer_access_avail, dev, peer), "cudaDeviceCanAccessPeer failed"); if (peer_access_avail) { GUARD_CU2(cudaDeviceEnablePeerAccess(peer, 0), "cudaDeviceEnablePeerAccess failed"); } } for(int d = 0; d < MAX_DATA; d++) { auto row = (dev*MAX_DATA) + d; if (row >= CHUNK) break; GUARD_CU2(cudaStreamCreateWithFlags(&stream[row], cudaStreamNonBlocking), "cudaStreamCreateWithFlags failed."); GUARD_CU2(cudaEventCreate(&event[row]), "cudaEventCreate failed."); GUARD_CU2(cudaEventRecord(event[row], stream[row]), "cudaEventRecord failed."); GUARD_CU(distance[row] .Allocate(n, util::DEVICE)); GUARD_CU(keys[row] .Allocate(n, util::DEVICE)); GUARD_CU(cub_distance[row] .Allocate(n, util::DEVICE)); GUARD_CU(cub_keys[row] .Allocate(n, util::DEVICE)); } GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed"); } util::PrintMsg("Distance/Keys and Device management done."); // Find K nearest neighbors for each point in the dataset // Can use multi-gpu to speed up the computation for (SizeT m = 0; m < ((n+(CHUNK-1))/(CHUNK)); m++) { //#pragma omp parallel for for (int dev = 0; dev < num_devices; dev++) { util::GRError(cudaSetDevice(dev), "cudaSetDevice failed."); // #pragma omp parallel for for(int x = 0; x < MAX_DATA; x++) { auto row = (dev*MAX_DATA) + x; auto v = (m*CHUNK) + row; if (v < n && row < CHUNK){ auto &error = retvals[row]; auto &ith_distances = distance[row]; auto &ith_keys = keys[row]; // Calculate N distances for each point auto distance_op = [n, dim, data, ith_keys, ith_distances, row, v, transpose] __host__ __device__ (const SizeT &i) { ValueT dist = 0; if (i == v) { dist = util::PreDefinedValues<ValueT>::MaxValue; } else { dist = euclidean_distance(dim, n, data.GetPointer(util::DEVICE), v, i, transpose); } ith_distances[i] = dist; ith_keys[i] = i; }; error = oprtr::For(distance_op, n, util::DEVICE, stream[row]); error = util::GRError(cudaStreamSynchronize(stream[row]), "cudaStreamSynchronize failed", __FILE__, __LINE__); util::CUBRadixSort(true, n, ith_distances.GetPointer(util::DEVICE), ith_keys.GetPointer(util::DEVICE), cub_distance[row].GetPointer(util::DEVICE), cub_keys[row].GetPointer(util::DEVICE), (void*)NULL, (size_t)0, stream[row]); error = util::GRError(cudaStreamSynchronize(stream[row]), "cudaStreamSynchronize failed", __FILE__, __LINE__); // Choose k nearest neighbors for each node auto &ith_knns = knns_d[v]; auto knns_op = [m, k, ith_knns, ith_keys, row, v] __host__ __device__ (const SizeT &i) { ith_knns[i] = ith_keys[i]; }; error = oprtr::For(knns_op, k, util::DEVICE, stream[row]); error = util::GRError(cudaStreamSynchronize(stream[row]), "cudaStreamSynchronize failed", __FILE__, __LINE__); } } } } util::PrintMsg("Main algroithm loop done."); for (int dev = 0; dev < num_devices; dev++) { util::GRError(cudaSetDevice(dev), "cudaSetDevice failed."); retvals[dev] = util::GRError(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed", __FILE__, __LINE__); } util::PrintMsg("Synchronized all GPUs."); for (int p = 0; p < ((n+(CHUNK-1))/(CHUNK)); p++) { for (int dev = 0; dev < num_devices; dev++) { GUARD_CU2(cudaSetDevice(dev), "cudaSetDevice failed."); for(int d = 0; d < MAX_DATA; d++) { auto jump = (p*CHUNK); auto index = jump + ((dev*MAX_DATA) + d); if (index >= n) break; GUARD_CU(knns_d[index] .Move(util::DEVICE, util::HOST)); } } } for (int dev = 0; dev < num_devices; dev++) { util::GRError(cudaSetDevice(dev), "cudaSetDevice failed."); retvals[dev] = util::GRError(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed", __FILE__, __LINE__); } util::PrintMsg("Moved KNNs from DEVICE to HOST."); // #pragma omp parallel for for (int t = 0; t < n; t++) { for (int l = 0; l < k; l++) { auto h_knns = knns_d[t].GetPointer(util::HOST); knns[(t*k) + l] = h_knns[l]; } } util::PrintMsg("Copy KNNs device result to output KNNs array."); // Clean-up for (int dev = 0; dev < num_devices; dev++) { GUARD_CU2(cudaSetDevice(dev), "cudaSetDevice failed."); for(int x = 0; x < MAX_DATA; x++) { auto row = (dev*MAX_DATA) + x; if (row >= CHUNK) break; GUARD_CU(keys[row].Release(util::DEVICE)); GUARD_CU(distance[row].Release(util::DEVICE)); GUARD_CU(cub_keys[row].Release(util::DEVICE)); GUARD_CU(cub_distance[row].Release(util::DEVICE)); GUARD_CU2(cudaStreamDestroy(stream[row]), "cudaStreamDestroy failed."); } util::PrintMsg("Clean-up GPU Arrays:: keys, distance done."); } GUARD_CU(keys.Release(util::HOST)); GUARD_CU(distance.Release(util::HOST)); GUARD_CU(cub_keys.Release(util::HOST)); GUARD_CU(cub_distance.Release(util::HOST)); util::PrintMsg("Clean-up CPU Arrays:: keys, distance done."); for (int p = 0; p < ((n+(CHUNK-1))/(CHUNK)); p++) { for (int dev = 0; dev < num_devices; dev++) { GUARD_CU2(cudaSetDevice(dev), "cudaSetDevice failed."); for(int d = 0; d < MAX_DATA; d++) { auto jump = (p*CHUNK); auto index = jump + ((dev*MAX_DATA) + d); if (index >= n) break; GUARD_CU(knns_d[index] .Release(util::DEVICE | util::HOST)); } } } util::PrintMsg("Clean-up CPU Arrays:: knns_d[*] done."); GUARD_CU(knns_d .Release(util::HOST)); util::PrintMsg("Clean-up done."); cpu_timer.Stop(); float elapsed = cpu_timer.ElapsedMillis(); return elapsed; } /** * @brief Validation of KNN results * @tparam GraphT Type of the graph * @tparam ValueT Type of the values * @param[in] parameters Excution parameters * @param[in] graph Input graph * @param[in] h_knns KNN computed on GPU * @param[in] ref_knns KNN computed on CPU * @param[in] points List of points * @param[in] verbose Whether to output detail comparsions * \return GraphT::SizeT Number of errors */ template <typename GraphT, typename SizeT, typename ValueT> typename GraphT::SizeT Validate_Results(util::Parameters &parameters, GraphT &graph, SizeT *h_knns, SizeT *ref_knns, util::Array1D<SizeT, ValueT> points, bool verbose = true) { typedef typename GraphT::CsrT CsrT; SizeT num_errors = 0; bool quiet = parameters.Get<bool>("quiet"); bool quick = parameters.Get<bool>("quick"); SizeT k = parameters.Get<SizeT>("k"); SizeT num_points = parameters.Get<SizeT>("n"); SizeT dim = parameters.Get<SizeT>("dim"); bool transpose = parameters.Get<bool>("transpose"); if (quick) return num_errors; for (SizeT v = 0; v < num_points; ++v) { for (SizeT i = 0; i < k; ++i) { SizeT w1 = h_knns[v * k + i]; SizeT w2 = ref_knns[v * k + i]; if (w1 != w2) { ValueT dist1 = euclidean_distance(dim, num_points, points.GetPointer(util::HOST), v, w1, transpose); ValueT dist2 = euclidean_distance(dim, num_points, points.GetPointer(util::HOST), v, w2, transpose); if (dist1 != dist2){ util::PrintMsg( "point::nearest-neighbor, gpu_knn(" + std::to_string(v) + ")=" + std::to_string(h_knns[v * k + i]) + " != cpu_knn(" + std::to_string(v) + ")=" + std::to_string(ref_knns[v * k + i]), !quiet); ++num_errors; } } } } if (num_errors > 0) { util::PrintMsg(std::to_string(num_errors) + " errors occurred in KNN.", !quiet); } else { util::PrintMsg("PASSED KNN", !quiet); } return num_errors; } } // namespace knn } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#include <algorithm> // Pseudo-random number generator namespace amgx { static __host__ __device__ __forceinline__ unsigned int hash_function(unsigned int a, unsigned int seed, unsigned int rows = 0) { a ^= seed; a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) + (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a ^ 0xd3a2646c) + (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) + (a >> 16); return a; } // --------------------------- // Methods // --------------------------- //This recurring template avoids recursions and computes the greedy strategy visiting deeper and deeper neighborhoods. template<int LEVEL, int DEPTH> struct neighbor_checker { static void check(const int *A_row_offsets, const int *A_col_indices, int *color, const int row_start, const int row_end, bool *used_colors, const int start_color, int *queue, int &tail, const bool bfs, const bool use_ido_for_bfs) { for (int row_it = row_start; row_it < row_end; row_it++) { int c = A_col_indices[row_it]; int col = color[c]; if (col - start_color >= 0) //if color is amongst the tracked ones { used_colors[col - start_color] = 1; //use the color } if (!use_ido_for_bfs) //if using normal BFS, enqueue discovered nodes { if (col == 0 && bfs) //uncolored node discovered and using bfs (this is called also by pgreedy_equivalent) { color[c] = -1; //mark: visited state: each node is enqueued only once queue[tail] = c; //enqueue tail++; } } //Compile time recursion using templates. neighbor_checker < LEVEL - 1, DEPTH + 1 >::check(A_row_offsets, A_col_indices, color, A_row_offsets[c], A_row_offsets[c + 1], used_colors, start_color, queue, tail, bfs, use_ido_for_bfs); } } }; //Recurring template final step. template<int DEPTH> struct neighbor_checker<0, DEPTH> { static void check(const int *A_row_offsets, const int *A_col_indices, int *color, const int row_start, const int row_end, bool *used_colors, const int start_color, int *queue, int &tail, const bool bfs, const bool use_ido_for_bfs) { //end compile time recursion } }; /* * This algorithm runs the Greedy Algorithm using, as coloring order, the order that comes from Breadth-First-Search visit. * * It's strictly linear time in the number of edges, if the graph is connected. * It's has been measured to be much faster than the IDO ordering (next algorithm), while keeping its ability to color bipartite graphs optimally. * * */ template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void Serial_Greedy_BFS_MatrixColoring<TemplateConfig<AMGX_device, V, M, I> >::run_cpu_serial_bfs_greedy( Matrix_d &A, int *color, int *queue, int *A_row_offsets, int *A_col_indices ) { bool use_ido_for_bfs = false; bool bfs = true; int num_rows = A.get_num_rows(); int num_nonzero = A.get_num_nz(); //initialize to 0 for (int r = 0; r < num_rows; r++) { color[r] = 0; } int max_color_used = 0; int head = 0; int tail = 0; //add the central node to the BFS queue queue[tail++] = num_rows / 2; color[num_rows / 2] = -1; //mark visited const int MAXCOLORS = 128; //if need more colors, it changes start_color += MAXCOLORS int start_color = 0; int num_uncolored = num_rows; int all_colored_before = 0; while (num_uncolored > 0) //until all colored { if (head == tail) //nothing to do and not all colored: it means that the graph is disconnected, restart from an uncolored node { //TODO find something more efficient. for (int i = all_colored_before; i < num_rows; i++) { if (color[i] == 0) { all_colored_before = i; color[i] = -1; //mark as visited queue[tail++] = i; break; } } if (head == tail) { break; } //continue; } int r = queue[head]; //peek BFS queue ++head; //Task cancellation was here, see P4 int row_start = A_row_offsets[r ]; int row_end = A_row_offsets[r + 1]; //used colors buffer bool used_colors[MAXCOLORS]; for (int i = 0; i < MAXCOLORS; i++) { used_colors[i] = 0; } if (this->m_coloring_level == 1) { //duplicate code for clarity: a "neighbor_checker<1,0>::check(&A_row_offsets[0],&A_col_indices[0],&color[0],row_start,row_end,used_colors,start_color, queue, tail, bfs, use_ido_for_bfs);" would be enough for (int row_it = row_start; row_it < row_end; row_it++) { int c = A_col_indices[row_it]; int col = color[c]; if (col - start_color >= 0) //if color is amongst the tracked ones { used_colors[col - start_color] = 1; //use the color } if (col == 0) //uncolored node discovered { color[c] = -1; //mark: visited state: each node is enqueued only once queue[tail] = c; //enqueue column tail++; } } } else if (this->m_coloring_level == 2) { neighbor_checker<2, 0>::check(&A_row_offsets[0], &A_col_indices[0], &color[0], row_start, row_end, used_colors, start_color, queue, tail, bfs, use_ido_for_bfs); } else if (this->m_coloring_level == 3) { neighbor_checker<3, 0>::check(&A_row_offsets[0], &A_col_indices[0], &color[0], row_start, row_end, used_colors, start_color, queue, tail, bfs, use_ido_for_bfs); } else if (this->m_coloring_level == 4) { neighbor_checker<4, 0>::check(&A_row_offsets[0], &A_col_indices[0], &color[0], row_start, row_end, used_colors, start_color, queue, tail, bfs, use_ido_for_bfs); } else if (this->m_coloring_level == 5) { neighbor_checker<5, 0>::check(&A_row_offsets[0], &A_col_indices[0], &color[0], row_start, row_end, used_colors, start_color, queue, tail, bfs, use_ido_for_bfs); } //unsigned long long int available_colors = ~used_colors; //int my_color = 64 - utils::bfind( ~used_colors ); //find the first available color int my_color = MAXCOLORS; for (int i = 1; i < MAXCOLORS; i++) { if (used_colors[i] == 0) { my_color = i + start_color; break; } } if (my_color - start_color >= MAXCOLORS - 1) { start_color += MAXCOLORS; //no available color } --num_uncolored; color[r] = my_color; if (my_color > max_color_used) { max_color_used = my_color; //track the max color used, for counting colors } } this->m_num_colors = max_color_used + 1; } /* * This algorithm runs the Greedy Algorithm using, as coloring order, the Incidence-Degree-Ordering: * the next vertex to be colored is the one with the MOST colored neighbors. * */ template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void Serial_Greedy_BFS_MatrixColoring<TemplateConfig<AMGX_device, V, M, I> >::run_cpu_serial_IDO_ordering_greedy( Matrix_d &A, int *color, int *queue, int *A_row_offsets, int *A_col_indices ) { bool use_ido_for_bfs = false; bool bfs = true; int num_rows = A.get_num_rows(); int num_nonzero = A.get_num_nz(); for (int r = 0; r < num_rows; r++) { color[r] = 0; } int max_color_used = 0; int head = 0; int tail = 0; queue[tail++] = num_rows / 2; color[num_rows / 2] = -1; //mark visited const int MAXCOLORS = 128; //if need more colors, change start_color += MAXCOLORS int start_color = 0; typedef std::pair<int, int> ido_entry; std::set<ido_entry> ido_priority_queue; std::vector<int> ido_vertex_colored_count; tail = num_rows; ido_vertex_colored_count.resize(num_rows); ido_priority_queue.insert(ido_entry(0, num_rows / 2)); int num_uncolored = num_rows; int all_colored_before = 0; //all vertices all colored, before this while (num_uncolored > 0) { if (head == tail) //nothing to do { //TODO find something more efficient. This happens only if the graph is disconnected. for (int i = all_colored_before; i < num_rows; i++) { if (color[i] == 0) { all_colored_before = i; color[i] = -1; //mark as visited queue[tail++] = i; break; } } if (head == tail) { break; } //continue; } int r = 0; if (ido_priority_queue.empty()) { break; } //ido takes the vertex with the most colored neighbors std::pair<int, int> to_process = *ido_priority_queue.rbegin() ; //peek vertex from queue ido_priority_queue.erase(to_process); r = ( to_process ).second; ++head; //Task cancellation was here, see P4 bool used_colors[MAXCOLORS]; for (int i = 0; i < MAXCOLORS; i++) { used_colors[i] = 0; } int row_start = A_row_offsets[r ]; int row_end = A_row_offsets[r + 1]; if (this->m_coloring_level == 1) { neighbor_checker<1, 0>::check(&A_row_offsets[0], &A_col_indices[0], &color[0], row_start, row_end, used_colors, start_color, queue, tail, bfs, use_ido_for_bfs); } else if (this->m_coloring_level == 2) { neighbor_checker<2, 0>::check(&A_row_offsets[0], &A_col_indices[0], &color[0], row_start, row_end, used_colors, start_color, queue, tail, bfs, use_ido_for_bfs); } else if (this->m_coloring_level == 3) { neighbor_checker<3, 0>::check(&A_row_offsets[0], &A_col_indices[0], &color[0], row_start, row_end, used_colors, start_color, queue, tail, bfs, use_ido_for_bfs); } else if (this->m_coloring_level == 4) { neighbor_checker<4, 0>::check(&A_row_offsets[0], &A_col_indices[0], &color[0], row_start, row_end, used_colors, start_color, queue, tail, bfs, use_ido_for_bfs); } else if (this->m_coloring_level == 5) { neighbor_checker<5, 0>::check(&A_row_offsets[0], &A_col_indices[0], &color[0], row_start, row_end, used_colors, start_color, queue, tail, bfs, use_ido_for_bfs); } //unsigned long long int available_colors = ~used_colors; //int my_color = 64 - utils::bfind( ~used_colors ); int my_color = MAXCOLORS; for (int i = 1; i < MAXCOLORS; i++) { if (used_colors[i] == 0) { my_color = i + start_color; break; } } if (my_color - start_color >= MAXCOLORS - 1) { start_color += MAXCOLORS; } //if(color[r] <= 0) --num_uncolored; color[r] = my_color; if (my_color > max_color_used) { max_color_used = my_color; } //Priority queue update: update my neighbors' colored neigbors count for (int row_it = row_start; row_it < row_end; row_it++) { int col_id = A_col_indices[row_it]; int count = ido_vertex_colored_count[col_id]; std::pair<int, int> elem(count, col_id); if (ido_priority_queue.count(elem)) { ido_priority_queue.erase(elem); } elem.first = count + 1; if (color[col_id] == 0) { ido_priority_queue.insert(elem); ido_vertex_colored_count[col_id] = count + 1; } } } this->m_num_colors = max_color_used + 1; } /* * This algorithm runs the Greedy Algorithm using, as coloring order, the same ordering as in PARALLEL_GREEDY. * This is to have a CPU algorithm performing as similar as possible as PARALLEL_GREEDY. * This can be easily parallelized. * Currently, it works for any-ring but it's not equivalent to PARALLEL_GREEDY (the order of visit is 1-ring). * */ template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void Serial_Greedy_BFS_MatrixColoring<TemplateConfig<AMGX_device, V, M, I> >::run_cpu_parallel_greedy_equivalent( Matrix_d &A, int *color, int *queue, int *A_row_offsets, int *A_col_indices ) { bool bfs = false; int num_rows = A.get_num_rows(); int num_uncol = num_rows; for (int r = 0; r < num_rows; r++) { color[r] = 0; queue[r] = r; } int max_color_used = 0; const int MAXCOLORS = 128; //if need more colors, change start_color += MAXCOLORS int start_color = 0; while (num_uncol > 0) { int tail = num_uncol; for (int it = 0; it < num_rows; it++) { int r = it;//this->queue[it]; //if(skip[r]) continue; if (color[r] > 0) { continue; } bool is_r_max = true; int hash_r = hash_function(r, 0); int row_start = A_row_offsets[r ]; int row_end = A_row_offsets[r + 1]; for (int row_it = row_start; row_it < row_end; row_it++) { int c = A_col_indices[row_it]; int col = color[c]; int hash_c = hash_function(c, 0); if (col == 0 && (hash_c > hash_r || (hash_c == hash_r && c > r))) { is_r_max = false; break; } } if (is_r_max == false) { continue; } bool used_colors[MAXCOLORS]; for (int i = 0; i < MAXCOLORS; i++) { used_colors[i] = 0; } if (this->m_coloring_level == 1) { neighbor_checker<1, 0>::check(&A_row_offsets[0], &A_col_indices[0], &color[0], row_start, row_end, used_colors, start_color, queue, tail, bfs, 0); } else if (this->m_coloring_level == 2) { neighbor_checker<2, 0>::check(&A_row_offsets[0], &A_col_indices[0], &color[0], row_start, row_end, used_colors, start_color, queue, tail, bfs, 0); } else if (this->m_coloring_level == 3) { neighbor_checker<3, 0>::check(&A_row_offsets[0], &A_col_indices[0], &color[0], row_start, row_end, used_colors, start_color, queue, tail, bfs, 0); } else if (this->m_coloring_level == 4) { neighbor_checker<4, 0>::check(&A_row_offsets[0], &A_col_indices[0], &color[0], row_start, row_end, used_colors, start_color, queue, tail, bfs, 0); } else if (this->m_coloring_level == 5) { neighbor_checker<5, 0>::check(&A_row_offsets[0], &A_col_indices[0], &color[0], row_start, row_end, used_colors, start_color, queue, tail, bfs, 0); } int my_color = MAXCOLORS; #pragma unroll 8 for (int i = 1; i < MAXCOLORS; i++) { if (used_colors[i] == 0) { my_color = i + start_color; break; } } if (my_color - start_color >= MAXCOLORS - 1) { start_color += MAXCOLORS; } color[ r ] = my_color; --tail; if (my_color > max_color_used) { max_color_used = my_color; } } num_uncol = tail; } this->m_num_colors = max_color_used + 1; } //helper structure for std::sort struct index_color { int index; int color; bool operator<(const index_color &b) const { return color < b.color; } }; template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void Serial_Greedy_BFS_MatrixColoring<TemplateConfig<AMGX_device, V, M, I> >::run_createColorArrays_on_cpu(int num_rows, int *color, int *sorted_rows_by_color) { std::vector<index_color> index_colors(num_rows); std::vector<int> offsets_rows_per_color(this->m_num_colors + 1); for (int i = 0; i < num_rows; i++) { index_colors[i].index = i; index_colors[i].color = color[i]; } { std::stable_sort(index_colors.begin(), index_colors.end()); } int prev = -1; for (int i = 0; i < num_rows; i++) { sorted_rows_by_color[i] = index_colors[i].index; int col = index_colors[i].color; if (col != prev) { offsets_rows_per_color[col] = i; } prev = col; } offsets_rows_per_color[this->m_num_colors] = num_rows; this->m_offsets_rows_per_color.resize(this->m_num_colors + 1); for (int i = 0; i < this->m_num_colors + 1; i++) { this->m_offsets_rows_per_color[i] = offsets_rows_per_color[i]; } } /* * Memcopies to a pinned buffer: to avoid expensive allocations of arbitrary size pinned memory. Very efficient if the allocated buffer fits in the pinned pool. * */ void copy_using_buffer_d2h(void *dst, void *src, size_t size) { static cudaEvent_t event = 0; cudaStream_t stream = thrust::global_thread_handle::get_stream(); cudaEventCreateWithFlags(&event, cudaEventDisableTiming); //TODO it never gets destroyed, allocate somewhere safer void *buffer = 0; size_t buffer_size = std::min((size_t)(1024 * 1024 * 1), size); thrust::global_thread_handle::cudaMallocHost((void **)&buffer, buffer_size); size_t offset = 0; while (offset < size) { size_t end = offset + buffer_size; if (end > size) { end = size; } cudaMemcpyAsync(buffer, ((unsigned char *)src) + offset, end - offset, cudaMemcpyDeviceToHost, stream); cudaEventRecord(event, stream); cudaEventSynchronize(event); memcpy(((unsigned char *)dst) + offset, buffer, end - offset); offset = end; } thrust::global_thread_handle::cudaFreeHost(buffer); } void copy_using_buffer_h2d(void *dst, void *src, size_t size) { static cudaEvent_t event = 0; if (event == 0) { cudaEventCreateWithFlags(&event, cudaEventDisableTiming); //TODO it never gets destroyed, allocate somewhere safer } cudaStream_t stream = thrust::global_thread_handle::get_stream(); void *buffer = 0; size_t buffer_size = std::min((size_t)(1024 * 1024 * 1), size); thrust::global_thread_handle::cudaMallocHost((void **)&buffer, buffer_size); size_t offset = 0; while (offset < size) { size_t end = offset + buffer_size; if (end > size) { end = size; } memcpy(buffer, ((unsigned char *)src) + offset, end - offset); cudaMemcpyAsync( ((unsigned char *)dst) + offset, buffer, end - offset, cudaMemcpyHostToDevice, stream); cudaEventRecord(event, stream); cudaEventSynchronize(event); offset = end; } thrust::global_thread_handle::cudaFreeHost(buffer); } // Block version template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void Serial_Greedy_BFS_MatrixColoring<TemplateConfig<AMGX_device, V, M, I> >::colorMatrix( Matrix_d &A ) { typedef typename Matrix<TemplateConfig<AMGX_host, V, M, I> >::IVector IVector_h; ViewType oldView = A.currentView(); if (this->m_halo_coloring == SYNC_COLORS) { A.setView(ALL); } else { A.setViewExterior(); } //initialize auxillary data int num_rows = A.get_num_rows(); int *color = new int[num_rows]; int *A_row_offsets = new int[A.row_offsets.size()]; int *A_col_indices = new int[A.col_indices.size()]; int *sorted_rows_by_color = new int[num_rows]; int *queue = new int[num_rows * 2]; //Perforn D2H copies copy_using_buffer_d2h(A_row_offsets, thrust::raw_pointer_cast(A.row_offsets.data()), A.row_offsets.size()*sizeof(int)); copy_using_buffer_d2h(A_col_indices, thrust::raw_pointer_cast(A.col_indices.data()), A.col_indices.size()*sizeof(int)); //Dispatching various cases int amg_level = A.template getParameter<int>("level"); float sparsity = ((float)A.get_num_nz()) / float(A.get_num_rows() * A.get_num_rows()); //Config argument. bool use_ido_for_bfs = this->m_coloring_custom_arg == "IDO"; //Incidence-degree-ordering, use to benchmark it against BFS ordering bool dummy_coloring = this->m_coloring_custom_arg == "DUMMY_COLORING"; //Dont' color first 3 levels: emulates JACOBI there bool use_bfs = this->m_coloring_custom_arg != "PARALLEL_GREEDY_EQUIVALENT"; //Run a CPU algorithm which emulated PARALLEL_GREEDY and gives same results bool cpu_color_arrays = true; //Small and not sparse matrix: use either parallel greedy equivalent (if NOSERIALFIRSTLEVEL specified) //or a "serial coloring of the rows" if (sparsity > 0.1 && num_rows < 32) { if (this->m_coloring_custom_arg == "NOSERIALFIRSTLEVEL") // || num_rows > 32) { run_cpu_parallel_greedy_equivalent(A, color, queue, A_row_offsets, A_col_indices); } else { for (int i = 0; i < num_rows; i++) { color[i] = i + 1; } this->m_num_colors = num_rows + 1; } /* //BFS 'natural' visiting order as color. for(int i=0; i<num_rows; i++) color[i] = 0; int head = 0; int tail = 0; queue[tail++] = 0; int index = 0; while(tail > head) { int r = queue[head]; color[r] = head; ++head; int row_start = A_row_offsets[r]; int row_end = A_row_offsets[r+1]; for(int row_it=row_start; row_it<row_end; row_it++) { int c = A_col_indices[row_it]; if(color[c] == 0) { queue[tail++] = c; } } } return;*/ } //if dummy coloring is specified: color 3 first levels (1,2,3) with color 1 (emulates a jacobi smoother) else if (dummy_coloring && amg_level < 4) { for (int i = 0; i < num_rows; i++) { color[i] = 1; } this->m_num_colors = 2; } //run the algorithm that emulates PARALLEL_GREEDY on host else if (use_bfs == 0) { run_cpu_parallel_greedy_equivalent(A, color, queue, A_row_offsets, A_col_indices); } //Use incidence degree ordering as coloring order else if (use_ido_for_bfs) { run_cpu_serial_IDO_ordering_greedy(A, color, &queue[0], A_row_offsets, A_col_indices ); } //default behaviour else { run_cpu_serial_bfs_greedy(A, color, &queue[0], A_row_offsets, A_col_indices ); } this->m_row_colors.resize(A.get_num_rows()); if (cpu_color_arrays) { this->m_sorted_rows_by_color.resize(A.get_num_rows()); this->run_createColorArrays_on_cpu(A.get_num_rows(), color, sorted_rows_by_color); copy_using_buffer_h2d(thrust::raw_pointer_cast(this->m_sorted_rows_by_color.data()), sorted_rows_by_color, A.get_num_rows()*sizeof(int)); } //copies color -> m_row_colors, using a pinned memory buffer copy_using_buffer_h2d(thrust::raw_pointer_cast(this->m_row_colors.data()), color, A.get_num_rows()*sizeof(int)); delete color; delete A_row_offsets; delete A_col_indices; delete sorted_rows_by_color; delete queue; A.setView(oldView); } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void Serial_Greedy_BFS_MatrixColoring<TemplateConfig<AMGX_device, V, M, I> >::createColorArrays(Matrix<TConfig_d> &A) { if (this->ready_for_coloring_arrays == false) { return; } MatrixColoring<TConfig_d>::createColorArrays(A); } template< class T_Config > Serial_Greedy_BFS_MatrixColoring_Base<T_Config>::Serial_Greedy_BFS_MatrixColoring_Base( AMG_Config &cfg, const std::string &cfg_scope) : MatrixColoring<T_Config>(cfg, cfg_scope) { fallback_config = cfg; fallback_config_scope = cfg_scope; fallback_config.setParameter("matrix_coloring_scheme", std::string("PARALLEL_GREEDY"), fallback_config_scope); //TODO let fallback method choosable with param ready_for_coloring_arrays = true; //if( this->m_coloring_level != 1 && this->m_coloring_level != 2 && this->m_coloring_level != 3) if ( this->m_coloring_level > 5 || this->m_coloring_level < 0) { FatalError( "Not implemented for coloring_level != 1", AMGX_ERR_NOT_SUPPORTED_TARGET ); } m_coloring_custom_arg = cfg.AMG_Config::getParameter<std::string>( "coloring_custom_arg", cfg_scope ); m_coloring_try_remove_last_color_ = cfg.AMG_Config::getParameter<int>( "coloring_try_remove_last_colors", cfg_scope ); } #define AMGX_CASE_LINE(CASE) template class Serial_Greedy_BFS_MatrixColoring_Base<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class Serial_Greedy_BFS_MatrixColoring<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // end namespace amgx
the_stack
* \file * cub::BlockScanTiles implements a stateful abstraction of CUDA thread blocks for participating in device-wide prefix scan. */ #pragma once #include <iterator> #include "scan_tiles_types.cuh" #include "../../block/block_load.cuh" #include "../../block/block_store.cuh" #include "../../block/block_scan.cuh" #include "../../grid/grid_queue.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /****************************************************************************** * Tuning policy types ******************************************************************************/ /** * Tuning policy for BlockScanTiles */ template < int _BLOCK_THREADS, int _ITEMS_PER_THREAD, BlockLoadAlgorithm _LOAD_ALGORITHM, bool _LOAD_WARP_TIME_SLICING, PtxLoadModifier _LOAD_MODIFIER, BlockStoreAlgorithm _STORE_ALGORITHM, bool _STORE_WARP_TIME_SLICING, BlockScanAlgorithm _SCAN_ALGORITHM> struct BlockScanTilesPolicy { enum { BLOCK_THREADS = _BLOCK_THREADS, ITEMS_PER_THREAD = _ITEMS_PER_THREAD, LOAD_WARP_TIME_SLICING = _LOAD_WARP_TIME_SLICING, STORE_WARP_TIME_SLICING = _STORE_WARP_TIME_SLICING, }; static const BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; static const PtxLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; static const BlockStoreAlgorithm STORE_ALGORITHM = _STORE_ALGORITHM; static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; }; /****************************************************************************** * Thread block abstractions ******************************************************************************/ /** * \brief BlockScanTiles implements a stateful abstraction of CUDA thread blocks for participating in device-wide prefix scan. * * Implements a single-pass "domino" strategy with adaptive prefix lookback. */ template < typename BlockScanTilesPolicy, ///< Tuning policy typename InputIteratorRA, ///< Input iterator type typename OutputIteratorRA, ///< Output iterator type typename ScanOp, ///< Scan functor type typename Identity, ///< Identity element type (cub::NullType for inclusive scan) typename SizeT> ///< Offset integer type struct BlockScanTiles { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- // Data type of input iterator typedef typename std::iterator_traits<InputIteratorRA>::value_type T; // Constants enum { INCLUSIVE = Equals<Identity, NullType>::VALUE, // Inclusive scan if no identity type is provided BLOCK_THREADS = BlockScanTilesPolicy::BLOCK_THREADS, ITEMS_PER_THREAD = BlockScanTilesPolicy::ITEMS_PER_THREAD, TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, }; // Block load type typedef BlockLoad< InputIteratorRA, BlockScanTilesPolicy::BLOCK_THREADS, BlockScanTilesPolicy::ITEMS_PER_THREAD, BlockScanTilesPolicy::LOAD_ALGORITHM, BlockScanTilesPolicy::LOAD_MODIFIER, BlockScanTilesPolicy::LOAD_WARP_TIME_SLICING> BlockLoadT; // Block store type typedef BlockStore< OutputIteratorRA, BlockScanTilesPolicy::BLOCK_THREADS, BlockScanTilesPolicy::ITEMS_PER_THREAD, BlockScanTilesPolicy::STORE_ALGORITHM, STORE_DEFAULT, BlockScanTilesPolicy::STORE_WARP_TIME_SLICING> BlockStoreT; // Tile status descriptor type typedef ScanTileDescriptor<T> ScanTileDescriptorT; // Block scan type typedef BlockScan< T, BlockScanTilesPolicy::BLOCK_THREADS, BlockScanTilesPolicy::SCAN_ALGORITHM> BlockScanT; // Callback type for obtaining inter-tile prefix during block scan typedef DeviceScanBlockPrefixOp<T, ScanOp> InterblockPrefixOp; // Shared memory type for this threadblock struct _TempStorage { union { typename BlockLoadT::TempStorage load; // Smem needed for tile loading typename BlockStoreT::TempStorage store; // Smem needed for tile storing struct { typename InterblockPrefixOp::TempStorage prefix; // Smem needed for cooperative prefix callback typename BlockScanT::TempStorage scan; // Smem needed for tile scanning }; }; SizeT tile_idx; // Shared tile index }; // Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; //--------------------------------------------------------------------- // Per-thread fields //--------------------------------------------------------------------- _TempStorage &temp_storage; ///< Reference to temp_storage InputIteratorRA d_in; ///< Input data OutputIteratorRA d_out; ///< Output data ScanOp scan_op; ///< Binary scan operator Identity identity; ///< Identity element //--------------------------------------------------------------------- // Block scan utility methods (first tile) //--------------------------------------------------------------------- /** * Exclusive scan specialization */ template <typename _ScanOp, typename _Identity> __device__ __forceinline__ void ScanBlock(T (&items)[ITEMS_PER_THREAD], _ScanOp scan_op, _Identity identity, T& block_aggregate) { BlockScanT(temp_storage.scan).ExclusiveScan(items, items, identity, scan_op, block_aggregate); } /** * Exclusive sum specialization */ template <typename _Identity> __device__ __forceinline__ void ScanBlock(T (&items)[ITEMS_PER_THREAD], Sum scan_op, _Identity identity, T& block_aggregate) { BlockScanT(temp_storage.scan).ExclusiveSum(items, items, block_aggregate); } /** * Inclusive scan specialization */ template <typename _ScanOp> __device__ __forceinline__ void ScanBlock(T (&items)[ITEMS_PER_THREAD], _ScanOp scan_op, NullType identity, T& block_aggregate) { BlockScanT(temp_storage.scan).InclusiveScan(items, items, scan_op, block_aggregate); } /** * Inclusive sum specialization */ __device__ __forceinline__ void ScanBlock(T (&items)[ITEMS_PER_THREAD], Sum scan_op, NullType identity, T& block_aggregate) { BlockScanT(temp_storage.scan).InclusiveSum(items, items, block_aggregate); } //--------------------------------------------------------------------- // Block scan utility methods (subsequent tiles) //--------------------------------------------------------------------- /** * Exclusive scan specialization (with prefix from predecessors) */ template <typename _ScanOp, typename _Identity, typename PrefixCallback> __device__ __forceinline__ void ScanBlock(T (&items)[ITEMS_PER_THREAD], _ScanOp scan_op, _Identity identity, T& block_aggregate, PrefixCallback &prefix_op) { BlockScanT(temp_storage.scan).ExclusiveScan(items, items, identity, scan_op, block_aggregate, prefix_op); } /** * Exclusive sum specialization (with prefix from predecessors) */ template <typename _Identity, typename PrefixCallback> __device__ __forceinline__ void ScanBlock(T (&items)[ITEMS_PER_THREAD], Sum scan_op, _Identity identity, T& block_aggregate, PrefixCallback &prefix_op) { BlockScanT(temp_storage.scan).ExclusiveSum(items, items, block_aggregate, prefix_op); } /** * Inclusive scan specialization (with prefix from predecessors) */ template <typename _ScanOp, typename PrefixCallback> __device__ __forceinline__ void ScanBlock(T (&items)[ITEMS_PER_THREAD], _ScanOp scan_op, NullType identity, T& block_aggregate, PrefixCallback &prefix_op) { BlockScanT(temp_storage.scan).InclusiveScan(items, items, scan_op, block_aggregate, prefix_op); } /** * Inclusive sum specialization (with prefix from predecessors) */ template <typename PrefixCallback> __device__ __forceinline__ void ScanBlock(T (&items)[ITEMS_PER_THREAD], Sum scan_op, NullType identity, T& block_aggregate, PrefixCallback &prefix_op) { BlockScanT(temp_storage.scan).InclusiveSum(items, items, block_aggregate, prefix_op); } //--------------------------------------------------------------------- // Constructor //--------------------------------------------------------------------- // Constructor __device__ __forceinline__ BlockScanTiles( TempStorage &temp_storage, ///< Reference to temp_storage InputIteratorRA d_in, ///< Input data OutputIteratorRA d_out, ///< Output data ScanOp scan_op, ///< Binary scan operator Identity identity) ///< Identity element : temp_storage(temp_storage.Alias()), d_in(d_in), d_out(d_out), scan_op(scan_op), identity(identity) {} //--------------------------------------------------------------------- // Domino scan //--------------------------------------------------------------------- /** * Process a tile of input (domino scan) */ template <bool FULL_TILE> __device__ __forceinline__ void ConsumeTile( SizeT num_items, ///< Total number of input items int tile_idx, ///< Tile index SizeT block_offset, ///< Tile offset ScanTileDescriptorT *d_tile_status) ///< Global list of tile status { // Load items T items[ITEMS_PER_THREAD]; if (FULL_TILE) BlockLoadT(temp_storage.load).Load(d_in + block_offset, items); else BlockLoadT(temp_storage.load).Load(d_in + block_offset, items, num_items - block_offset); __syncthreads(); T block_aggregate; if (tile_idx == 0) { ScanBlock(items, scan_op, identity, block_aggregate); // Update tile status if there are successor tiles if (FULL_TILE && (threadIdx.x == 0)) ScanTileDescriptorT::SetPrefix(d_tile_status, block_aggregate); } else { InterblockPrefixOp prefix_op(d_tile_status, temp_storage.prefix, scan_op, tile_idx); ScanBlock(items, scan_op, identity, block_aggregate, prefix_op); } __syncthreads(); // Store items if (FULL_TILE) BlockStoreT(temp_storage.store).Store(d_out + block_offset, items); else BlockStoreT(temp_storage.store).Store(d_out + block_offset, items, num_items - block_offset); } /** * Dequeue and scan tiles of items as part of a domino scan */ __device__ __forceinline__ void ConsumeTiles( int num_items, ///< Total number of input items GridQueue<int> queue, ///< Queue descriptor for assigning tiles of work to thread blocks ScanTileDescriptorT *d_tile_status) ///< Global list of tile status { #if CUB_PTX_ARCH < 200 // No concurrent kernels allowed and blocks are launched in increasing order, so just assign one tile per block (up to 65K blocks) int tile_idx = blockIdx.x; SizeT block_offset = SizeT(TILE_ITEMS) * tile_idx; if (block_offset + TILE_ITEMS <= num_items) ConsumeTile<true>(num_items, tile_idx, block_offset, d_tile_status); else if (block_offset < num_items) ConsumeTile<false>(num_items, tile_idx, block_offset, d_tile_status); #else // Get first tile if (threadIdx.x == 0) temp_storage.tile_idx = queue.Drain(1); __syncthreads(); int tile_idx = temp_storage.tile_idx; SizeT block_offset = SizeT(TILE_ITEMS) * tile_idx; while (block_offset + TILE_ITEMS <= num_items) { // Consume full tile ConsumeTile<true>(num_items, tile_idx, block_offset, d_tile_status); // Get next tile if (threadIdx.x == 0) temp_storage.tile_idx = queue.Drain(1); __syncthreads(); tile_idx = temp_storage.tile_idx; block_offset = SizeT(TILE_ITEMS) * tile_idx; } // Consume a partially-full tile if (block_offset < num_items) { ConsumeTile<false>(num_items, tile_idx, block_offset, d_tile_status); } #endif } //--------------------------------------------------------------------- // Even-share scan //--------------------------------------------------------------------- /** * Process a tile of input */ template < bool FULL_TILE, bool FIRST_TILE> __device__ __forceinline__ void ConsumeTile( SizeT block_offset, ///< Tile offset RunningBlockPrefixOp<T> &prefix_op, ///< Running prefix operator int valid_items = TILE_ITEMS) ///< Number of valid items in the tile { // Load items T items[ITEMS_PER_THREAD]; if (FULL_TILE) BlockLoadT(temp_storage.load).Load(d_in + block_offset, items); else BlockLoadT(temp_storage.load).Load(d_in + block_offset, items, valid_items); __syncthreads(); // Block scan T block_aggregate; if (FIRST_TILE) { ScanBlock(items, scan_op, identity, block_aggregate); prefix_op.running_total = block_aggregate; } else { ScanBlock(items, scan_op, identity, block_aggregate, prefix_op); } __syncthreads(); // Store items if (FULL_TILE) BlockStoreT(temp_storage.store).Store(d_out + block_offset, items); else BlockStoreT(temp_storage.store).Store(d_out + block_offset, items, valid_items); } /** * Scan a consecutive share of input tiles */ __device__ __forceinline__ void ConsumeTiles( SizeT block_offset, ///< [in] Threadblock begin offset (inclusive) SizeT block_oob) ///< [in] Threadblock end offset (exclusive) { RunningBlockPrefixOp<T> prefix_op; if (block_offset + TILE_ITEMS <= block_oob) { // Consume first tile of input (full) ConsumeTile<true, true>(block_offset, prefix_op); block_offset += TILE_ITEMS; // Consume subsequent full tiles of input while (block_offset + TILE_ITEMS <= block_oob) { ConsumeTile<true, false>(block_offset, prefix_op); block_offset += TILE_ITEMS; } // Consume a partially-full tile if (block_offset < block_oob) { int valid_items = block_oob - block_offset; ConsumeTile<false, false>(block_offset, prefix_op, valid_items); } } else { // Consume the first tile of input (partially-full) int valid_items = block_oob - block_offset; ConsumeTile<false, true>(block_offset, prefix_op, valid_items); } } /** * Scan a consecutive share of input tiles, seeded with the specified prefix value */ __device__ __forceinline__ void ConsumeTiles( SizeT block_offset, ///< [in] Threadblock begin offset (inclusive) SizeT block_oob, ///< [in] Threadblock end offset (exclusive) T prefix) ///< [in] The prefix to apply to the scan segment { RunningBlockPrefixOp<T> prefix_op; prefix_op.running_total = prefix; // Consume full tiles of input while (block_offset + TILE_ITEMS <= block_oob) { ConsumeTile<true, false>(block_offset, prefix_op); block_offset += TILE_ITEMS; } // Consume a partially-full tile if (block_offset < block_oob) { int valid_items = block_oob - block_offset; ConsumeTile<false, false>(block_offset, prefix_op, valid_items); } } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
#include <opencv2/cudafeatures2d.hpp> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" // Rasmusson2013 #define BLOCK_SIZE 6 #define PATCH_SIZE (BLOCK_SIZE + 2) using namespace cv; using namespace std; namespace { // This kernel makes use of a (BLOCK_SIZE + 2) X (BLOCK_SIZE + 2) array in shared memory // The paper actually says (BLOCK_SIZE + 1) X (BLOCK_SIZE + 1), but I can't manage to make it work that way __global__ void Init(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) { const unsigned r = blockIdx.y * BLOCK_SIZE + threadIdx.y; const unsigned c = blockIdx.x * BLOCK_SIZE + threadIdx.x; //const unsigned img_index = r * img.step + c; const unsigned labels_index = r * (labels.step / labels.elem_size) + c; const unsigned img_patch_index = (threadIdx.y + 1) * PATCH_SIZE + threadIdx.x + 1; const unsigned local_linear_index = threadIdx.y * BLOCK_SIZE + threadIdx.x; __shared__ unsigned char img_patch[PATCH_SIZE * PATCH_SIZE]; const bool in_limits = r < img.rows&& c < img.cols; // Load 34 x 34 matrix from input image // Convert local_linear_index to coordinates of the 34 x 34 matrix // Round 1 const int patch_r1 = local_linear_index / PATCH_SIZE; const int patch_c1 = local_linear_index % PATCH_SIZE; const int patch_img_r1 = blockIdx.y * BLOCK_SIZE - 1 + patch_r1; const int patch_img_c1 = blockIdx.x * BLOCK_SIZE - 1 + patch_c1; const int patch_img_index1 = patch_img_r1 * img.step + patch_img_c1; const bool patch_in_limits1 = patch_img_r1 >= 0 && patch_img_c1 >= 0 && patch_img_r1 < img.rows&& patch_img_c1 < img.cols; img_patch[patch_r1 * PATCH_SIZE + patch_c1] = patch_in_limits1 ? img[patch_img_index1] : 0; // Round 2 const int patch_r2 = (local_linear_index + BLOCK_SIZE * BLOCK_SIZE) / PATCH_SIZE; const int patch_c2 = (local_linear_index + BLOCK_SIZE * BLOCK_SIZE) % PATCH_SIZE; if (patch_r2 < PATCH_SIZE) { const int patch_img_r2 = blockIdx.y * BLOCK_SIZE - 1 + patch_r2; const int patch_img_c2 = blockIdx.x * BLOCK_SIZE - 1 + patch_c2; const int patch_img_index2 = patch_img_r2 * img.step + patch_img_c2; const bool patch_in_limits2 = patch_img_r2 >= 0 && patch_img_c2 >= 0 && patch_img_r2 < img.rows&& patch_img_c2 < img.cols; img_patch[patch_r2 * PATCH_SIZE + patch_c2] = patch_in_limits2 ? img[patch_img_index2] : 0; } __syncthreads(); if (in_limits) { unsigned label = 0; if (img_patch[img_patch_index]) { label = labels_index + 1; unsigned int connections = 0; // Enrich label with connections information if (img_patch[img_patch_index - PATCH_SIZE - 1]) { connections |= (1u << 31); } if (img_patch[img_patch_index - PATCH_SIZE]) { connections |= (1u << 30); } if (img_patch[img_patch_index - PATCH_SIZE + 1]) { connections |= (1u << 29); } if (img_patch[img_patch_index + 1]) { connections |= (1u << 28); } // Only half the connections are recorded //if (img_patch[img_patch_index + PATCH_SIZE + 1]) { // connections |= (1 << 3); //} //if (img_patch[img_patch_index + PATCH_SIZE]) { // connections |= (1 << 2); //} //if (img_patch[img_patch_index + PATCH_SIZE - 1]) { // connections |= (1 << 1); //} //if (img_patch[img_patch_index - 1]) { // connections |= (1 << 0); //} label |= connections; // It can't be done this way at compile time, because math functions are not defined constexpr // Macros could be used instead, and a technique for loop unrolling using templates is described here // https://stackoverflow.com/questions/15275023/clang-force-loop-unroll-for-specific-loop/15275701 //constexpr double pi = 3.14159265358979323846; //for (double angle = 0; angle < 2 * pi; angle += pi / 4) { // unsigned x = ceil(cos(angle)); // unsigned y = ceil(sin(angle)); //} } labels[labels_index] = label; } } __global__ void Propagate(cuda::PtrStepSzi labels, char* changed) { const unsigned r = blockIdx.y * BLOCK_SIZE + threadIdx.y; const unsigned c = blockIdx.x * BLOCK_SIZE + threadIdx.x; const unsigned labels_index = r * (labels.step / labels.elem_size) + c; const unsigned labels_patch_index = (threadIdx.y + 1) * PATCH_SIZE + threadIdx.x + 1; const unsigned local_linear_index = threadIdx.y * BLOCK_SIZE + threadIdx.x; __shared__ unsigned labels_patch[PATCH_SIZE * PATCH_SIZE]; const bool in_limits = r < labels.rows&& c < labels.cols; // Load 34 x 34 matrix from input image // Convert local_linear_index to coordinates of the 34 x 34 matrix // 2 rounds are enough only for BLOCK_SIZE >= 5 // Round 1 const int patch_r1 = local_linear_index / PATCH_SIZE; const int patch_c1 = local_linear_index % PATCH_SIZE; const int patch_labels_r1 = blockIdx.y * BLOCK_SIZE - 1 + patch_r1; const int patch_labels_c1 = blockIdx.x * BLOCK_SIZE - 1 + patch_c1; const int patch_labels_index1 = patch_labels_r1 * (labels.step / labels.elem_size) + patch_labels_c1; const bool patch_in_limits1 = patch_labels_r1 >= 0 && patch_labels_c1 >= 0 && patch_labels_r1 < labels.rows&& patch_labels_c1 < labels.cols; labels_patch[patch_r1 * PATCH_SIZE + patch_c1] = patch_in_limits1 ? labels[patch_labels_index1] : 0; // Round 2 const int patch_r2 = (local_linear_index + BLOCK_SIZE * BLOCK_SIZE) / PATCH_SIZE; if (patch_r2 < PATCH_SIZE) { const int patch_c2 = (local_linear_index + BLOCK_SIZE * BLOCK_SIZE) % PATCH_SIZE; const int patch_labels_r2 = blockIdx.y * BLOCK_SIZE - 1 + patch_r2; const int patch_labels_c2 = blockIdx.x * BLOCK_SIZE - 1 + patch_c2; const int patch_labels_index2 = patch_labels_r2 * (labels.step / labels.elem_size) + patch_labels_c2; const bool patch_in_limits2 = patch_labels_r2 >= 0 && patch_labels_c2 >= 0 && patch_labels_r2 < labels.rows&& patch_labels_c2 < labels.cols; labels_patch[patch_r2 * PATCH_SIZE + patch_c2] = patch_in_limits2 ? labels[patch_labels_index2] : 0; } __syncthreads(); const unsigned label = labels_patch[labels_patch_index]; unsigned min_label = label & 0x0FFFFFFF; // Primary/Secondary Optimization // Find the primary pixel of the sub-component, and add its label to the propagation. if (min_label) { const int primary_r = ((label & 0x0FFFFFFF) - 1) / (labels.step / labels.elem_size); const int primary_c = ((label & 0x0FFFFFFF) - 1) % (labels.step / labels.elem_size); if (primary_r >= (blockIdx.y * BLOCK_SIZE - 1) && primary_r <= (blockIdx.y + 1) * BLOCK_SIZE && primary_c >= (blockIdx.x * BLOCK_SIZE - 1) && primary_c <= (blockIdx.x + 1) * BLOCK_SIZE) { const int primary_local_r = primary_r - blockIdx.y * BLOCK_SIZE; const int primary_local_c = primary_c - blockIdx.x * BLOCK_SIZE; min_label = min(min_label, labels_patch[(primary_local_r + 1) * PATCH_SIZE + (primary_local_c + 1)] & 0x0FFFFFFF); } } __syncthreads(); // Propagation sizes are calculated in every propagation step // Propagations could be shared by threads with shuffle ops, BUT // threads must be in the same warp. It requires a different thread // organization for every direction, but it is feasible. // For now, shared memory is used instead. __shared__ unsigned propagation[BLOCK_SIZE * BLOCK_SIZE]; // Up-Left int patch_r = threadIdx.y + 1; int patch_c = threadIdx.x + 1; unsigned cur_label = label; propagation[local_linear_index] = ((label >> 31) & 1); __syncthreads(); if (propagation[local_linear_index]) { int thread_x = threadIdx.x; int thread_y = threadIdx.y; while (true) { thread_x = threadIdx.x - propagation[local_linear_index]; thread_y = threadIdx.y - propagation[local_linear_index]; if (thread_x < 0 || thread_y < 0) { break; } unsigned prop_delta = propagation[thread_y * BLOCK_SIZE + thread_x]; if (prop_delta == 0) { break; } propagation[local_linear_index] += prop_delta; } // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[(patch_r - 1) * PATCH_SIZE + (patch_c - 1)]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[(patch_r - propagation[local_linear_index]) * PATCH_SIZE + (patch_c - propagation[local_linear_index])]; min_label = min(min_label, far_label & 0x0FFFFFFF); } //while (((cur_label >> 31) & 1) && (--patch_r >= 0) && (--patch_c >= 0)) { // cur_label = labels_patch[patch_r * PATCH_SIZE + patch_c]; // // This should go after the cycle, after the optimization of Pag. 209 has been applied // // A propagation size of 1 must be mantained though // min_label = min(min_label, cur_label & 0x0FFFFFFF); //} // Up patch_r = threadIdx.y + 1; patch_c = threadIdx.x + 1; cur_label = label; __syncthreads(); propagation[local_linear_index] = ((label >> 30) & 1); __syncthreads(); if (propagation[local_linear_index]) { int thread_x = threadIdx.x; int thread_y = threadIdx.y; while (true) { thread_y = threadIdx.y - propagation[local_linear_index]; if (thread_y < 0) { break; } unsigned prop_delta = propagation[thread_y * BLOCK_SIZE + thread_x]; if (prop_delta == 0) { break; } propagation[local_linear_index] += prop_delta; } // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[(patch_r - 1) * PATCH_SIZE + patch_c]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[(patch_r - propagation[local_linear_index]) * PATCH_SIZE + patch_c]; min_label = min(min_label, far_label & 0x0FFFFFFF); } //patch_r = threadIdx.y + 1; //patch_c = threadIdx.x + 1; //cur_label = label; //while (((cur_label >> 31) & 1) && (--patch_r >= 0) && (--patch_c >= 0)) { // cur_label = labels_patch[patch_r * PATCH_SIZE + patch_c]; // // This should go after the cycle, after the optimization of Pag. 209 has been applied // // A propagation size of 1 must be mantained though // min_label = min(min_label, cur_label & 0x0FFFFFFF); //} //// Up //patch_r = threadIdx.y + 1; //patch_c = threadIdx.x + 1; //cur_label = label; //while (((cur_label >> 30) & 1) && (--patch_r >= 0)) { // cur_label = labels_patch[patch_r * PATCH_SIZE + patch_c]; // min_label = min(min_label, cur_label & 0x0FFFFFFF); //} // Up-Right patch_r = threadIdx.y + 1; patch_c = threadIdx.x + 1; cur_label = label; __syncthreads(); propagation[local_linear_index] = ((label >> 29) & 1); __syncthreads(); if (propagation[local_linear_index]) { int thread_x = threadIdx.x; int thread_y = threadIdx.y; while (true) { thread_x = threadIdx.x + propagation[local_linear_index]; thread_y = threadIdx.y - propagation[local_linear_index]; if (thread_x >= BLOCK_SIZE || thread_y < 0) { break; } unsigned prop_delta = propagation[thread_y * BLOCK_SIZE + thread_x]; if (prop_delta == 0) { break; } propagation[local_linear_index] += prop_delta; } // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[(patch_r - 1) * PATCH_SIZE + (patch_c + 1)]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[(patch_r - propagation[local_linear_index]) * PATCH_SIZE + (patch_c + propagation[local_linear_index])]; min_label = min(min_label, far_label & 0x0FFFFFFF); } //patch_r = threadIdx.y + 1; //patch_c = threadIdx.x + 1; //cur_label = label; //while (((cur_label >> 29) & 1) && (--patch_r >= 0) && (++patch_c < PATCH_SIZE)) { // cur_label = labels_patch[patch_r * PATCH_SIZE + patch_c]; // min_label = min(min_label, cur_label & 0x0FFFFFFF); //} // Right patch_r = threadIdx.y + 1; patch_c = threadIdx.x + 1; cur_label = label; __syncthreads(); propagation[local_linear_index] = ((label >> 28) & 1); __syncthreads(); if (propagation[local_linear_index]) { int thread_x = threadIdx.x; int thread_y = threadIdx.y; while (true) { thread_x = threadIdx.x + propagation[local_linear_index]; if (thread_x >= BLOCK_SIZE) { break; } unsigned prop_delta = propagation[thread_y * BLOCK_SIZE + thread_x]; if (prop_delta == 0) { break; } propagation[local_linear_index] += prop_delta; } // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[patch_r * PATCH_SIZE + (patch_c + 1)]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[patch_r * PATCH_SIZE + (patch_c + propagation[local_linear_index])]; min_label = min(min_label, far_label & 0x0FFFFFFF); } //patch_r = threadIdx.y + 1; //patch_c = threadIdx.x + 1; //cur_label = label; //while (((cur_label >> 28) & 1) && (++patch_c < PATCH_SIZE)) { // cur_label = labels_patch[patch_r * PATCH_SIZE + patch_c]; // min_label = min(min_label, cur_label & 0x0FFFFFFF); //} // The next 4 connection bits come from neighbor pixels // Down-Right patch_r = threadIdx.y + 1; patch_c = threadIdx.x + 1; cur_label = labels_patch[(patch_r + 1) * PATCH_SIZE + (patch_c + 1)]; __syncthreads(); propagation[local_linear_index] = ((cur_label >> 31) & 1); __syncthreads(); if (propagation[local_linear_index]) { int thread_x = threadIdx.x; int thread_y = threadIdx.y; while (true) { thread_x = threadIdx.x + propagation[local_linear_index]; thread_y = threadIdx.y + propagation[local_linear_index]; if (thread_x >= BLOCK_SIZE || thread_y >= BLOCK_SIZE) { break; } unsigned prop_delta = propagation[thread_y * BLOCK_SIZE + thread_x]; if (prop_delta == 0) { break; } propagation[local_linear_index] += prop_delta; } // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[(patch_r + 1) * PATCH_SIZE + (patch_c + 1)]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[(patch_r + propagation[local_linear_index]) * PATCH_SIZE + (patch_c + propagation[local_linear_index])]; min_label = min(min_label, far_label & 0x0FFFFFFF); } //while (true) { // if ((cur_label >> 31) & 1) { // min_label = min(min_label, cur_label & 0x0FFFFFFF); // } // else { // break; // } // patch_r++; // patch_c++; // if ((patch_r < PATCH_SIZE) && (patch_c < PATCH_SIZE)) { // cur_label = labels_patch[patch_r * PATCH_SIZE + patch_c]; // } // else { // break; // } //} // Down patch_r = threadIdx.y + 1; patch_c = threadIdx.x + 1; cur_label = labels_patch[(patch_r + 1) * PATCH_SIZE + patch_c]; __syncthreads(); propagation[local_linear_index] = ((cur_label >> 30) & 1); __syncthreads(); if (propagation[local_linear_index]) { int thread_x = threadIdx.x; int thread_y = threadIdx.y; while (true) { thread_y = threadIdx.y + propagation[local_linear_index]; if (thread_y >= BLOCK_SIZE) { break; } unsigned prop_delta = propagation[thread_y * BLOCK_SIZE + thread_x]; if (prop_delta == 0) { break; } propagation[local_linear_index] += prop_delta; } // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[(patch_r + 1) * PATCH_SIZE + patch_c]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[(patch_r + propagation[local_linear_index]) * PATCH_SIZE + patch_c]; min_label = min(min_label, far_label & 0x0FFFFFFF); } //patch_r = threadIdx.y + 2; //patch_c = threadIdx.x + 1; //cur_label = labels_patch[patch_r * PATCH_SIZE + patch_c]; //while (true) { // if ((cur_label >> 30) & 1) { // min_label = min(min_label, cur_label & 0x0FFFFFFF); // } // else { // break; // } // patch_r++; // if ((patch_r < PATCH_SIZE)) { // cur_label = labels_patch[patch_r * PATCH_SIZE + patch_c]; // } // else { // break; // } //} // Down-Left patch_r = threadIdx.y + 1; patch_c = threadIdx.x + 1; cur_label = labels_patch[(patch_r + 1) * PATCH_SIZE + (patch_c - 1)]; __syncthreads(); propagation[local_linear_index] = ((cur_label >> 29) & 1); __syncthreads(); if (propagation[local_linear_index]) { int thread_x = threadIdx.x; int thread_y = threadIdx.y; while (true) { thread_x = threadIdx.x - propagation[local_linear_index]; thread_y = threadIdx.y + propagation[local_linear_index]; if (thread_x < 0 || thread_y >= BLOCK_SIZE) { break; } unsigned prop_delta = propagation[thread_y * BLOCK_SIZE + thread_x]; if (prop_delta == 0) { break; } propagation[local_linear_index] += prop_delta; } // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[(patch_r + 1) * PATCH_SIZE + (patch_c - 1)]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[(patch_r + propagation[local_linear_index]) * PATCH_SIZE + (patch_c - propagation[local_linear_index])]; min_label = min(min_label, far_label & 0x0FFFFFFF); } //patch_r = threadIdx.y + 2; //patch_c = threadIdx.x; //cur_label = labels_patch[patch_r * PATCH_SIZE + patch_c]; //while (true) { // if ((cur_label >> 29) & 1) { // min_label = min(min_label, cur_label & 0x0FFFFFFF); // } // else { // break; // } // patch_r++; // patch_c--; // if ((patch_r < PATCH_SIZE) && (patch_c >= 0)) { // cur_label = labels_patch[patch_r * PATCH_SIZE + patch_c]; // } // else { // break; // } //} // Left patch_r = threadIdx.y + 1; patch_c = threadIdx.x + 1; cur_label = labels_patch[patch_r * PATCH_SIZE + (patch_c - 1)]; __syncthreads(); propagation[local_linear_index] = ((cur_label >> 28) & 1); __syncthreads(); if (propagation[local_linear_index]) { int thread_x = threadIdx.x; int thread_y = threadIdx.y; while (true) { thread_x = threadIdx.x - propagation[local_linear_index]; if (thread_x < 0) { break; } unsigned prop_delta = propagation[thread_y * BLOCK_SIZE + thread_x]; if (prop_delta == 0) { break; } propagation[local_linear_index] += prop_delta; } // A propagation size of 1 must be mantained const unsigned close_label = labels_patch[patch_r * PATCH_SIZE + (patch_c - 1)]; min_label = min(min_label, close_label & 0x0FFFFFFF); // The farthest label is gathered const unsigned far_label = labels_patch[patch_r * PATCH_SIZE + (patch_c - propagation[local_linear_index])]; min_label = min(min_label, far_label & 0x0FFFFFFF); } //patch_r = threadIdx.y + 1; //patch_c = threadIdx.x; //cur_label = labels_patch[patch_r * PATCH_SIZE + patch_c]; //while (true) { // if ((cur_label >> 28) & 1) { // min_label = min(min_label, cur_label & 0x0FFFFFFF); // } // else { // break; // } // patch_c--; // if (patch_c >= 0) { // cur_label = labels_patch[patch_r * PATCH_SIZE + patch_c]; // } // else { // break; // } //} if (min_label < (label & 0x0FFFFFFF)) { labels_patch[labels_patch_index] = min_label | (label & 0xF0000000); *changed = 1; } __syncthreads(); if (in_limits) { labels[labels_index] = labels_patch[labels_patch_index]; } // A propagation cycle could be added inside the thread block } __global__ void End(cuda::PtrStepSzi labels) { unsigned global_row = blockIdx.y * BLOCK_SIZE + threadIdx.y; unsigned global_col = blockIdx.x * BLOCK_SIZE + threadIdx.x; unsigned labels_index = global_row * (labels.step / labels.elem_size) + global_col; if (global_row < labels.rows && global_col < labels.cols) { *(reinterpret_cast<unsigned char*>(labels.data + labels_index) + 3) &= 0x0F; // Assuming little endian } } } class RASMUSSON : public GpuLabeling2D<Connectivity2D::CONN_8> { private: dim3 grid_size_; dim3 block_size_; public: RASMUSSON() {} void PerformLabeling() { d_img_labels_.create(d_img_.size(), CV_32SC1); grid_size_ = dim3((d_img_.cols + BLOCK_SIZE - 1) / BLOCK_SIZE, (d_img_.rows + BLOCK_SIZE - 1) / BLOCK_SIZE, 1); block_size_ = dim3(BLOCK_SIZE, BLOCK_SIZE, 1); char changed = 1; char* d_changed_ptr; cudaMalloc(&d_changed_ptr, 1); // Phase 1 // CCL on tiles Init << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); // Immagine di debug della prima fase cuda::GpuMat d_local_labels; d_img_labels_.copyTo(d_local_labels); // PathCompression << <grid_size_, block_size_ >> > (d_img_, d_local_labels); // ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_local_labels); Mat1i local_labels(img_.size()); d_local_labels.download(local_labels); // Phase 2 while (changed) { // changed = 0; // cudaMemcpy(d_changed_ptr, &changed, 1, cudaMemcpyDefault); cudaMemset(d_changed_ptr, 0, 1); Propagate << <grid_size_, block_size_ >> > (d_img_labels_, d_changed_ptr); //Propagate << <dim3(1, 1, 1), block_size_ >> > (d_img_labels_, d_changed_ptr); cudaMemcpy(&changed, d_changed_ptr, 1, cudaMemcpyDeviceToHost); cuda::GpuMat d_global_labels; d_img_labels_.copyTo(d_global_labels); End << <grid_size_, block_size_ >> > (d_global_labels); //PathCompression << <grid_size_, block_size_ >> > (d_img_, d_global_labels); // ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_global_labels); Mat1i global_labels(img_.size()); d_global_labels.download(global_labels); } // Merges UFTrees of different tiles // Immagine di debug della seconda fase //cuda::GpuMat d_global_labels; //d_img_labels_.copyTo(d_global_labels); //PathCompression << <grid_size_, block_size_ >> > (d_img_, d_global_labels); //// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_global_labels); //Mat1i global_labels(img_.size()); //d_global_labels.download(global_labels); // Phase 3 // Collapse UFTrees //PathCompression << <grid_size_, block_size_ >> > (d_img_, d_img_labels_); End << <grid_size_, block_size_ >> > (d_img_labels_); cudaDeviceSynchronize(); } private: double Alloc() { perf_.start(); d_img_labels_.create(d_img_.size(), CV_32SC1); perf_.stop(); return perf_.last(); } double Dealloc() { perf_.start(); perf_.stop(); return perf_.last(); } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void LocalScan() { } void GlobalScan() { cudaDeviceSynchronize(); } public: void PerformLabelingWithSteps() { double alloc_timing = Alloc(); perf_.start(); LocalScan(); perf_.stop(); perf_.store(Step(StepType::FIRST_SCAN), perf_.last()); perf_.start(); GlobalScan(); perf_.stop(); perf_.store(Step(StepType::SECOND_SCAN), perf_.last()); double dealloc_timing = Dealloc(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(RASMUSSON);
the_stack
// Licensed under the MIT license. See LICENSE file in the project root for full license information. #include "caffe/util/math_functions.hpp" #include "caffe/common.hpp" #include "GeneralizedPatchMatch.cuh" #include "Combo.cuh" #define ENABLE_VIS 0 struct Parameters { std::vector<std::string> layers; //which layers used as content std::vector<std::string> layernames; //which layers used as content std::vector<std::string> datanames; //which layers used as content int patch_size0; int iter; }; Combo::Combo() { } Combo::~Combo() { } void Combo::SetGPU(int no) { int devCount; cudaGetDeviceCount(&devCount); wcout << "CUDA Devices: " << endl << endl; for (int i = 0; i < devCount; ++i) { cudaDeviceProp props; cudaGetDeviceProperties(&props, i); size_t totalMem = 0; size_t freeMem = 0; cudaSetDevice(i); cudaMemGetInfo(&freeMem, &totalMem); wcout << "GPU " << i << ", Name = " << props.name << ", free = " << freeMem << ", total = " << totalMem << endl; } cudaSetDevice(no); int num = -1; size_t totalMem = 0; size_t freeMem = 0; cudaGetDevice(&num); cudaMemGetInfo(&freeMem, &totalMem); wcout << "Current GPU = " << num << ", free = " << freeMem << ", total = " << totalMem << endl; } bool Combo::LoadA(const char* file_A) { img_AL_col = imread(file_A); if (img_AL_col.empty()) { cout << "Error: Source image cannot read!" << endl; waitKey(); return false; } img_AL = Mat::zeros(img_AL_col.size(), CV_8UC3); // convert to grayscale image { Mat gray(img_AL_col.size(), CV_8UC3); cvtColor(img_AL_col, gray, cv::COLOR_BGR2Lab); #pragma omp parallel for for (int r = 0; r < img_AL.rows; ++r) { for (int c = 0; c < img_AL.cols; ++c) { uchar g = gray.at<Vec3b>(r, c)[0]; img_AL.at<Vec3b>(r, c) = Vec3b(g, g, g); } } } return true; } bool Combo::LoadBP(const char* file_BP) { img_BPL_col = imread(file_BP); if (img_BPL_col.empty()) { cout << "Error: Reference image cannot read!" << endl; waitKey(); return false; } img_BPL = Mat::zeros(img_BPL_col.size(), CV_8UC3); // convert to grayscale image { Mat gray(img_BPL_col.size(), CV_8UC3); cvtColor(img_BPL_col, gray, cv::COLOR_BGR2Lab); #pragma omp parallel for for (int r = 0; r < img_BPL.rows; ++r) { for (int c = 0; c < img_BPL.cols; ++c) { uchar g = gray.at<Vec3b>(r, c)[0]; img_BPL.at<Vec3b>(r, c) = Vec3b(g, g, g); } } } return true; } void Combo::GetASize(int& width, int& height) { width = img_AL.cols; height = img_AL.rows; } void Combo::GetBPSize(int& width, int& height) { width = img_BPL.cols; height = img_BPL.rows; } void Combo::ComputeDist(Classifier& classifier_A, Classifier& classifier_B, FILE* fp_a, FILE* fp_b, const char* ff_a, const char* ff_b) { if (img_BPL.empty()) { printf("Error: Image2 is empty!\n"); return; } if(img_AL.empty()) { printf("Error: Image1 is empty!\n"); return; } const int param_size = 9; int aw = img_AL.cols; int ah = img_AL.rows; int bw = img_BPL.cols; int bh = img_BPL.rows; int *params_host, *params_device_AB, *params_device_BA; unsigned int *ann_device_AB, *ann_host_AB, *ann_device_BA, *ann_host_BA; unsigned int *rann_device_AB, *rann_device_BA; float *annd_device_AB, *annd_host_AB, *annd_device_BA, *annd_host_BA; float *rannd_device_AB, *rannd_device_BA; unsigned char* bgr_device_AB, *bgr_device_BA, *bgr_host_AB, *bgr_host_BA; //set parameters Parameters params; params.layers.push_back("conv5_1/bn"); params.layers.push_back("conv4_1/bn"); params.layers.push_back("conv3_1/bn"); params.layers.push_back("conv2_1/bn"); params.layers.push_back("conv1_1/bn"); std::vector<int> sizes; sizes.push_back(3); sizes.push_back(3); sizes.push_back(3); sizes.push_back(3); sizes.push_back(3); //scale and enhance Mat img_BP = img_BPL.clone(); Mat img_A = img_AL.clone(); std::vector<float *> data_A; data_A.resize(params.layers.size()); std::vector<Dim> data_A_size; data_A_size.resize(params.layers.size()); classifier_A.Predict(img_A, params.layers, data_A, data_A_size); std::vector<float *> data_B; data_B.resize(params.layers.size()); std::vector<Dim> data_B_size; data_B_size.resize(params.layers.size()); classifier_B.Predict(img_BP, params.layers, data_B, data_B_size); int full_ann_size_AB = aw * ah; int full_ann_size_BA = bw * bh; params_host = (int *)malloc(param_size * sizeof(int)); ann_host_AB = (unsigned int *)malloc(full_ann_size_AB * sizeof(unsigned int)); annd_host_AB = (float *)malloc(full_ann_size_AB * sizeof(float)); bgr_host_AB = (unsigned char*)malloc(full_ann_size_AB * sizeof(unsigned char)* 3); ann_host_BA = (unsigned int *)malloc(full_ann_size_BA * sizeof(unsigned int)); annd_host_BA = (float *)malloc(full_ann_size_BA * sizeof(float)); bgr_host_BA = (unsigned char*)malloc(full_ann_size_BA * sizeof(unsigned int)* 3); cudaMalloc(&params_device_AB, param_size * sizeof(int)); cudaMalloc(&ann_device_AB, full_ann_size_AB * sizeof(unsigned int)); cudaMalloc(&rann_device_AB, full_ann_size_AB * sizeof(unsigned int)); cudaMalloc(&annd_device_AB, full_ann_size_AB * sizeof(float)); cudaMalloc(&rannd_device_AB, full_ann_size_AB * sizeof(float)); cudaMalloc(&bgr_device_AB, full_ann_size_AB * sizeof(unsigned int)); cudaMalloc(&params_device_BA, param_size * sizeof(int)); cudaMalloc(&ann_device_BA, full_ann_size_BA * sizeof(unsigned int)); cudaMalloc(&rann_device_BA, full_ann_size_BA * sizeof(unsigned int)); cudaMalloc(&annd_device_BA, full_ann_size_BA * sizeof(float)); cudaMalloc(&rannd_device_BA, full_ann_size_BA * sizeof(float)); cudaMalloc(&bgr_device_BA, full_ann_size_BA * sizeof(unsigned char)); int numlayer = params.layers.size(); ifstream aflow_input; aflow_input.open(ff_a); for (int y = 0; y < ah; y++) { for (int x = 0; x < aw; x++) { int dx = 0, dy = 0; aflow_input >> dx; aflow_input >> dy; int xbest = x + dx; int ybest = y + dy; ann_host_AB[y * aw + x] = XY_TO_INT(xbest, ybest); } } aflow_input.close(); ifstream bflow_input; bflow_input.open(ff_b); for (int y = 0; y < bh; y++) { for (int x = 0; x < bw; x++) { int dx = 0, dy = 0; bflow_input >> dx; bflow_input >> dy; int xbest = x + dx; int ybest = y + dy; ann_host_BA[y * bw + x] = XY_TO_INT(xbest, ybest); } } bflow_input.close(); cudaMemcpy(ann_device_AB, ann_host_AB, full_ann_size_AB * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(ann_device_BA, ann_host_BA, full_ann_size_BA * sizeof(unsigned int), cudaMemcpyHostToDevice); dim3 blocksPerGridAB(aw / 20 + 1, ah / 20 + 1, 1); dim3 blocksPerGridBA(bw / 20 + 1, bh / 20 + 1, 1); dim3 threadsPerBlock(20, 20, 1); reverse_flow << <blocksPerGridAB, threadsPerBlock >> >(ann_device_AB, ann_device_BA, rann_device_AB, ah, aw, bh, bw); reverse_flow << <blocksPerGridBA, threadsPerBlock >> >(ann_device_BA, ann_device_AB, rann_device_BA, bh, bw, ah, aw); Mat result_AB = reconstruct_avg(img_AL_col, img_BPL_col, ann_host_AB, sizes[numlayer - 1]); cudaMemcpy(ann_host_AB, rann_device_AB, full_ann_size_AB * sizeof(unsigned int), cudaMemcpyDeviceToHost); Mat reverse_AB = reconstruct_avg(img_AL_col, img_AL_col, ann_host_AB, sizes[numlayer - 1]); fwrite(&ah, sizeof(int), 1, fp_a); fwrite(&aw, sizeof(int), 1, fp_a); fwrite(&bh, sizeof(int), 1, fp_b); fwrite(&bw, sizeof(int), 1, fp_b); cv::vector<uchar> buf; imencode(".png", result_AB, buf); int sz = buf.size(); fwrite(&sz, sizeof(int), 1, fp_a); fwrite(&(buf[0]), sizeof(uchar), sz, fp_a); imencode(".png", reverse_AB, buf); sz = buf.size(); fwrite(&sz, sizeof(int), 1, fp_a); fwrite(&(buf[0]), sizeof(uchar), buf.size(), fp_a); Mat result_BA = reconstruct_avg(img_BPL_col, img_AL_col, ann_host_BA, sizes[numlayer - 1]); cudaMemcpy(ann_host_BA, rann_device_BA, full_ann_size_BA * sizeof(unsigned int), cudaMemcpyDeviceToHost); Mat reverse_BA = reconstruct_avg(img_BPL_col, img_BPL_col, ann_host_BA, sizes[numlayer - 1]); imencode(".png", result_BA, buf); sz = buf.size(); fwrite(&sz, sizeof(int), 1, fp_b); fwrite(&(buf[0]), sizeof(uchar), sz, fp_b); imencode(".png", reverse_BA, buf); sz = buf.size(); fwrite(&sz, sizeof(int), 1, fp_b); fwrite(&(buf[0]), sizeof(uchar), buf.size(), fp_b); // compute feature distance for each layer for (int curr_layer = 0; curr_layer < numlayer; curr_layer++)//from 32 to 512 { //set parameters params_host[0] = data_A_size[curr_layer].channel;//channels params_host[1] = data_A_size[curr_layer].height; params_host[2] = data_A_size[curr_layer].width; params_host[3] = data_A_size[curr_layer].height; params_host[4] = data_A_size[curr_layer].width; params_host[5] = sizes[curr_layer]; params_host[6] = params.iter; //copy to device cudaMemcpy(params_device_AB, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice); //set parameters params_host[0] = data_B_size[curr_layer].channel;//channels params_host[1] = data_B_size[curr_layer].height; params_host[2] = data_B_size[curr_layer].width; params_host[3] = data_B_size[curr_layer].height; params_host[4] = data_B_size[curr_layer].width; //copy to device cudaMemcpy(params_device_BA, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice); int scale = pow(2, 4 - curr_layer); // error ba //set parameters params_host[0] = data_A_size[curr_layer].channel;//channels params_host[1] = data_A_size[curr_layer].height; params_host[2] = data_A_size[curr_layer].width; params_host[3] = data_B_size[curr_layer].height; params_host[4] = data_B_size[curr_layer].width; params_host[5] = sizes[curr_layer]; params_host[6] = params.iter; //copy to device cudaMemcpy(params_device_AB, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice); //set parameters params_host[0] = data_B_size[curr_layer].channel;//channels params_host[1] = data_B_size[curr_layer].height; params_host[2] = data_B_size[curr_layer].width; params_host[3] = data_A_size[curr_layer].height; params_host[4] = data_A_size[curr_layer].width; //copy to device cudaMemcpy(params_device_BA, params_host, param_size * sizeof(int), cudaMemcpyHostToDevice); compute_dist_norm << <blocksPerGridAB, threadsPerBlock >> >(ann_device_AB, annd_device_AB, data_A[curr_layer], data_B[curr_layer], params_device_AB, aw, ah, scale); compute_dist_norm << <blocksPerGridBA, threadsPerBlock >> >(ann_device_BA, annd_device_BA, data_B[curr_layer], data_A[curr_layer], params_device_BA, bw, bh, scale); convert_float2bgr << <blocksPerGridAB, threadsPerBlock >> >(annd_device_AB, bgr_device_AB, aw, ah); convert_float2bgr << <blocksPerGridBA, threadsPerBlock >> >(annd_device_BA, bgr_device_BA, bw, bh); cudaMemcpy(bgr_host_AB, bgr_device_AB, full_ann_size_AB * sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaMemcpy(bgr_host_BA, bgr_device_BA, full_ann_size_BA * sizeof(unsigned char), cudaMemcpyDeviceToHost); Mat ebgrAB(ah, aw, CV_8UC1, bgr_host_AB); Mat ebgrBA(bh, bw, CV_8UC1, bgr_host_BA); imencode(".png", ebgrAB, buf); int sz = buf.size(); fwrite(&sz, sizeof(int), 1, fp_a); fwrite(&(buf[0]), sizeof(uchar), sz, fp_a); imencode(".png", ebgrBA, buf); sz = buf.size(); fwrite(&sz, sizeof(int), 1, fp_b); fwrite(&(buf[0]), sizeof(uchar), buf.size(), fp_b); // error ab reverse_dist << <blocksPerGridAB, threadsPerBlock >> >(ann_device_AB, rannd_device_AB, annd_device_BA, aw, ah, bw, bh); reverse_dist << <blocksPerGridBA, threadsPerBlock >> >(ann_device_BA, rannd_device_BA, annd_device_AB, bw, bh, aw, ah); convert_float2bgr << <blocksPerGridAB, threadsPerBlock >> >(rannd_device_AB, bgr_device_AB, aw, ah); convert_float2bgr << <blocksPerGridBA, threadsPerBlock >> >(rannd_device_BA, bgr_device_BA, bw, bh); cudaMemcpy(bgr_host_AB, bgr_device_AB, full_ann_size_AB * sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaMemcpy(bgr_host_BA, bgr_device_BA, full_ann_size_BA * sizeof(unsigned char), cudaMemcpyDeviceToHost); Mat rbgrAB(ah, aw, CV_8UC1, bgr_host_AB); Mat rbgrBA(bh, bw, CV_8UC1, bgr_host_BA); imencode(".png", rbgrAB, buf); sz = buf.size(); fwrite(&sz, sizeof(int), 1, fp_a); fwrite(&(buf[0]), sizeof(uchar), buf.size(), fp_a); imencode(".png", rbgrBA, buf); sz = buf.size(); fwrite(&sz, sizeof(int), 1, fp_b); fwrite(&(buf[0]), sizeof(uchar), buf.size(), fp_b); } cudaFree(params_device_AB); cudaFree(ann_device_AB); cudaFree(rann_device_AB); cudaFree(annd_device_AB); cudaFree(rannd_device_AB); cudaFree(params_device_BA); cudaFree(ann_device_BA); cudaFree(rann_device_BA); cudaFree(annd_device_BA); cudaFree(rannd_device_BA); cudaFree(bgr_device_AB); cudaFree(bgr_device_BA); free(ann_host_AB); free(ann_host_BA); free(annd_host_AB); free(annd_host_BA); free(params_host); free(bgr_host_AB); free(bgr_host_BA); for (int i = 0; i < numlayer; i++) { cudaFree(data_A[i]); cudaFree(data_B[i]); } }
the_stack
#include "common.cuh" #include "../../ball_cover_common.h" #include "../block_select_faiss.cuh" #include "../haversine_distance.cuh" #include "../selection_faiss.cuh" #include <cstdint> #include <limits.h> #include <raft/cuda_utils.cuh> #include <faiss/gpu/utils/Limits.cuh> #include <faiss/gpu/utils/Select.cuh> #include <faiss/utils/Heap.h> namespace raft { namespace spatial { namespace knn { namespace detail { /** * To find exact neighbors, we perform a post-processing stage * that filters out those points which might have neighbors outside * of their k closest landmarks. This is usually a very small portion * of the total points. * @tparam value_idx * @tparam value_t * @tparam value_int * @tparam tpb * @param X * @param n_cols * @param R_knn_inds * @param R_knn_dists * @param R_radius * @param landmarks * @param n_landmarks * @param bitset_size * @param k * @param output * @param weight */ template <typename value_idx, typename value_t, typename value_int = std::uint32_t, int tpb = 32, typename distance_func> __global__ void perform_post_filter_registers(const value_t* X, value_int n_cols, const value_idx* R_knn_inds, const value_t* R_knn_dists, const value_t* R_radius, const value_t* landmarks, int n_landmarks, value_int bitset_size, value_int k, distance_func dfunc, std::uint32_t* output, float weight = 1.0) { // allocate array of size n_landmarks / 32 ints extern __shared__ std::uint32_t shared_mem[]; // Start with all bits on for (value_int i = threadIdx.x; i < bitset_size; i += tpb) { shared_mem[i] = 0xffffffff; } __syncthreads(); // TODO: Would it be faster to use L1 for this? value_t local_x_ptr[2]; for (value_int j = 0; j < n_cols; ++j) { local_x_ptr[j] = X[n_cols * blockIdx.x + j]; } value_t closest_R_dist = R_knn_dists[blockIdx.x * k + (k - 1)]; // zero out bits for closest k landmarks for (value_int j = threadIdx.x; j < k; j += tpb) { _zero_bit(shared_mem, (std::uint32_t)R_knn_inds[blockIdx.x * k + j]); } __syncthreads(); // Discard any landmarks where p(q, r) > p(q, r_q) + radius(r) // That is, the distance between the current point and the current // landmark is > the distance between the current point and // its closest landmark + the radius of the current landmark. for (value_int l = threadIdx.x; l < n_landmarks; l += tpb) { // compute p(q, r) value_t dist = dfunc(local_x_ptr, landmarks + (n_cols * l), n_cols); if (dist > weight * (closest_R_dist + R_radius[l]) || dist > 3 * closest_R_dist) { _zero_bit(shared_mem, l); } } __syncthreads(); /** * Output bitset */ for (value_int l = threadIdx.x; l < bitset_size; l += tpb) { output[blockIdx.x * bitset_size + l] = shared_mem[l]; } } /** * @tparam value_idx * @tparam value_t * @tparam value_int * @tparam bitset_type * @tparam warp_q number of registers to use per warp * @tparam thread_q number of registers to use within each thread * @tparam tpb number of threads per block * @param X * @param n_cols * @param bitset * @param bitset_size * @param R_knn_dists * @param R_indptr * @param R_1nn_inds * @param R_1nn_dists * @param knn_inds * @param knn_dists * @param n_landmarks * @param k * @param dist_counter */ template <typename value_idx, typename value_t, typename value_int = std::uint32_t, typename bitset_type = std::uint32_t, typename dist_func, int warp_q = 32, int thread_q = 2, int tpb = 128, int col_q = 2> __global__ void compute_final_dists_registers(const value_t* X_index, const value_t* X, const value_int n_cols, bitset_type* bitset, value_int bitset_size, const value_t* R_knn_dists, const value_idx* R_indptr, const value_idx* R_1nn_inds, const value_t* R_1nn_dists, value_idx* knn_inds, value_t* knn_dists, value_int n_landmarks, value_int k, dist_func dfunc, value_int* dist_counter) { static constexpr int kNumWarps = tpb / faiss::gpu::kWarpSize; __shared__ value_t shared_memK[kNumWarps * warp_q]; __shared__ faiss::gpu::KeyValuePair<value_t, value_idx> shared_memV[kNumWarps * warp_q]; const value_t* x_ptr = X + (n_cols * blockIdx.x); value_t local_x_ptr[col_q]; for (value_int j = 0; j < n_cols; ++j) { local_x_ptr[j] = x_ptr[j]; } faiss::gpu::KeyValueBlockSelect<value_t, value_idx, false, faiss::gpu::Comparator<value_t>, warp_q, thread_q, tpb> heap(faiss::gpu::Limits<value_t>::getMax(), faiss::gpu::Limits<value_t>::getMax(), -1, shared_memK, shared_memV, k); const value_int n_k = faiss::gpu::utils::roundDown(k, faiss::gpu::kWarpSize); value_int i = threadIdx.x; for (; i < n_k; i += tpb) { value_idx ind = knn_inds[blockIdx.x * k + i]; heap.add(knn_dists[blockIdx.x * k + i], R_knn_dists[ind * k], ind); } if (i < k) { value_idx ind = knn_inds[blockIdx.x * k + i]; heap.addThreadQ(knn_dists[blockIdx.x * k + i], R_knn_dists[ind * k], ind); } heap.checkThreadQ(); for (value_int cur_R_ind = 0; cur_R_ind < n_landmarks; ++cur_R_ind) { // if cur R overlaps cur point's closest R, it could be a // candidate if (_get_val(bitset + (blockIdx.x * bitset_size), cur_R_ind)) { value_idx R_start_offset = R_indptr[cur_R_ind]; value_idx R_stop_offset = R_indptr[cur_R_ind + 1]; value_idx R_size = R_stop_offset - R_start_offset; // Loop through R's neighborhood in parallel // Round R_size to the nearest warp threads so they can // all be computing in parallel. const value_int limit = faiss::gpu::utils::roundDown(R_size, faiss::gpu::kWarpSize); i = threadIdx.x; for (; i < limit; i += tpb) { value_idx cur_candidate_ind = R_1nn_inds[R_start_offset + i]; value_t cur_candidate_dist = R_1nn_dists[R_start_offset + i]; value_t z = heap.warpKTopRDist == 0.00 ? 0.0 : (abs(heap.warpKTop - heap.warpKTopRDist) * abs(heap.warpKTopRDist - cur_candidate_dist) - heap.warpKTop * cur_candidate_dist) / heap.warpKTopRDist; z = isnan(z) ? 0.0 : z; // If lower bound on distance could possibly be in // the closest k neighbors, compute it and add to k-select value_t dist = std::numeric_limits<value_t>::max(); if (z <= heap.warpKTop) { const value_t* y_ptr = X_index + (n_cols * cur_candidate_ind); value_t local_y_ptr[col_q]; for (value_int j = 0; j < n_cols; ++j) { local_y_ptr[j] = y_ptr[j]; } dist = dfunc(local_x_ptr, local_y_ptr, n_cols); } heap.add(dist, cur_candidate_dist, cur_candidate_ind); } // second round guarantees to be only a single warp. if (i < R_size) { value_idx cur_candidate_ind = R_1nn_inds[R_start_offset + i]; value_t cur_candidate_dist = R_1nn_dists[R_start_offset + i]; value_t z = heap.warpKTopRDist == 0.00 ? 0.0 : (abs(heap.warpKTop - heap.warpKTopRDist) * abs(heap.warpKTopRDist - cur_candidate_dist) - heap.warpKTop * cur_candidate_dist) / heap.warpKTopRDist; z = isnan(z) ? 0.0 : z; // If lower bound on distance could possibly be in // the closest k neighbors, compute it and add to k-select value_t dist = std::numeric_limits<value_t>::max(); if (z <= heap.warpKTop) { const value_t* y_ptr = X_index + (n_cols * cur_candidate_ind); value_t local_y_ptr[col_q]; for (value_int j = 0; j < n_cols; ++j) { local_y_ptr[j] = y_ptr[j]; } dist = dfunc(local_x_ptr, local_y_ptr, n_cols); } heap.addThreadQ(dist, cur_candidate_dist, cur_candidate_ind); } heap.checkThreadQ(); } } heap.reduce(); for (value_int i = threadIdx.x; i < k; i += tpb) { knn_dists[blockIdx.x * k + i] = shared_memK[i]; knn_inds[blockIdx.x * k + i] = shared_memV[i].value; } } /** * Random ball cover kernel for n_dims == 2 * @tparam value_idx * @tparam value_t * @tparam warp_q * @tparam thread_q * @tparam tpb * @tparam value_idx * @tparam value_t * @param R_knn_inds * @param R_knn_dists * @param m * @param k * @param R_indptr * @param R_1nn_cols * @param R_1nn_dists */ template <typename value_idx = std::int64_t, typename value_t, int warp_q = 32, int thread_q = 2, int tpb = 128, int col_q = 2, typename value_int = std::uint32_t, typename distance_func> __global__ void block_rbc_kernel_registers(const value_t* X_index, const value_t* X, value_int n_cols, // n_cols should be 2 or 3 dims const value_idx* R_knn_inds, const value_t* R_knn_dists, value_int m, value_int k, const value_idx* R_indptr, const value_idx* R_1nn_cols, const value_t* R_1nn_dists, value_idx* out_inds, value_t* out_dists, value_int* dist_counter, value_t* R_radius, distance_func dfunc, float weight = 1.0) { static constexpr value_int kNumWarps = tpb / faiss::gpu::kWarpSize; __shared__ value_t shared_memK[kNumWarps * warp_q]; __shared__ faiss::gpu::KeyValuePair<value_t, value_idx> shared_memV[kNumWarps * warp_q]; // TODO: Separate kernels for different widths: // 1. Very small (between 3 and 32) just use registers for columns of "blockIdx.x" // 2. Can fit comfortably in shared memory (32 to a few thousand?) // 3. Load each time individually. const value_t* x_ptr = X + (n_cols * blockIdx.x); // Use registers only for 2d or 3d value_t local_x_ptr[col_q]; for (value_int i = 0; i < n_cols; ++i) { local_x_ptr[i] = x_ptr[i]; } // Each warp works on 1 R faiss::gpu::KeyValueBlockSelect<value_t, value_idx, false, faiss::gpu::Comparator<value_t>, warp_q, thread_q, tpb> heap(faiss::gpu::Limits<value_t>::getMax(), faiss::gpu::Limits<value_t>::getMax(), -1, shared_memK, shared_memV, k); value_t min_R_dist = R_knn_dists[blockIdx.x * k + (k - 1)]; value_int n_dists_computed = 0; /** * First add distances for k closest neighbors of R * to the heap */ // Start iterating through elements of each set from closest R elements, // determining if the distance could even potentially be in the heap. for (value_int cur_k = 0; cur_k < k; ++cur_k) { // index and distance to current blockIdx.x's closest landmark value_t cur_R_dist = R_knn_dists[blockIdx.x * k + cur_k]; value_idx cur_R_ind = R_knn_inds[blockIdx.x * k + cur_k]; // Equation (2) in Cayton's paper- prune out R's which are > 3 * p(q, r_q) if (cur_R_dist > weight * (min_R_dist + R_radius[cur_R_ind])) continue; if (cur_R_dist > 3 * min_R_dist) return; // The whole warp should iterate through the elements in the current R value_idx R_start_offset = R_indptr[cur_R_ind]; value_idx R_stop_offset = R_indptr[cur_R_ind + 1]; value_idx R_size = R_stop_offset - R_start_offset; value_int limit = faiss::gpu::utils::roundDown(R_size, faiss::gpu::kWarpSize); value_int i = threadIdx.x; for (; i < limit; i += tpb) { // Index and distance of current candidate's nearest landmark value_idx cur_candidate_ind = R_1nn_cols[R_start_offset + i]; value_t cur_candidate_dist = R_1nn_dists[R_start_offset + i]; // Take 2 landmarks l_1 and l_2 where l_1 is the furthest point in the heap // and l_2 is the current landmark R. s is the current data point and // t is the new candidate data point. We know that: // d(s, t) cannot possibly be any smaller than | d(s, l_1) - d(l_1, l_2) | * | d(l_1, l_2) - // d(l_2, t) | - d(s, l_1) * d(l_2, t) // Therefore, if d(s, t) >= d(s, l_1) from the computation above, we know that the distance to // the candidate point cannot possibly be in the nearest neighbors. However, if d(s, t) < d(s, // l_1) then we should compute the distance because it's possible it could be smaller. // value_t z = heap.warpKTopRDist == 0.00 ? 0.0 : (abs(heap.warpKTop - heap.warpKTopRDist) * abs(heap.warpKTopRDist - cur_candidate_dist) - heap.warpKTop * cur_candidate_dist) / heap.warpKTopRDist; z = isnan(z) ? 0.0 : z; value_t dist = std::numeric_limits<value_t>::max(); if (i < k || z <= heap.warpKTop) { const value_t* y_ptr = X_index + (n_cols * cur_candidate_ind); value_t local_y_ptr[col_q]; for (value_int j = 0; j < n_cols; ++j) { local_y_ptr[j] = y_ptr[j]; } dist = dfunc(local_x_ptr, local_y_ptr, n_cols); ++n_dists_computed; } heap.add(dist, cur_candidate_dist, cur_candidate_ind); } if (i < R_size) { value_idx cur_candidate_ind = R_1nn_cols[R_start_offset + i]; value_t cur_candidate_dist = R_1nn_dists[R_start_offset + i]; value_t z = heap.warpKTopRDist == 0.0 ? 0.0 : (abs(heap.warpKTop - heap.warpKTopRDist) * abs(heap.warpKTopRDist - cur_candidate_dist) - heap.warpKTop * cur_candidate_dist) / heap.warpKTopRDist; z = isnan(z) ? 0.0 : z; value_t dist = std::numeric_limits<value_t>::max(); if (i < k || z <= heap.warpKTop) { const value_t* y_ptr = X_index + (n_cols * cur_candidate_ind); value_t local_y_ptr[col_q]; for (value_int j = 0; j < n_cols; ++j) { local_y_ptr[j] = y_ptr[j]; } dist = dfunc(local_x_ptr, local_y_ptr, n_cols); ++n_dists_computed; } heap.addThreadQ(dist, cur_candidate_dist, cur_candidate_ind); } heap.checkThreadQ(); } heap.reduce(); for (int i = threadIdx.x; i < k; i += tpb) { out_dists[blockIdx.x * k + i] = shared_memK[i]; out_inds[blockIdx.x * k + i] = shared_memV[i].value; } } template <typename value_idx, typename value_t, typename value_int = std::uint32_t, typename dist_func> void rbc_low_dim_pass_one(const raft::handle_t& handle, BallCoverIndex<value_idx, value_t, value_int>& index, const value_t* query, const value_int n_query_rows, value_int k, const value_idx* R_knn_inds, const value_t* R_knn_dists, dist_func& dfunc, value_idx* inds, value_t* dists, float weight, value_int* dists_counter) { if (k <= 32) block_rbc_kernel_registers<value_idx, value_t, 32, 2, 128, 2, value_int> <<<n_query_rows, 128, 0, handle.get_stream()>>>(index.get_X(), query, index.n, R_knn_inds, R_knn_dists, index.m, k, index.get_R_indptr(), index.get_R_1nn_cols(), index.get_R_1nn_dists(), inds, dists, dists_counter, index.get_R_radius(), dfunc, weight); else if (k <= 64) block_rbc_kernel_registers<value_idx, value_t, 64, 3, 128, 2, value_int> <<<n_query_rows, 128, 0, handle.get_stream()>>>(index.get_X(), query, index.n, R_knn_inds, R_knn_dists, index.m, k, index.get_R_indptr(), index.get_R_1nn_cols(), index.get_R_1nn_dists(), inds, dists, dists_counter, index.get_R_radius(), dfunc, weight); else if (k <= 128) block_rbc_kernel_registers<value_idx, value_t, 128, 3, 128, 2, value_int> <<<n_query_rows, 128, 0, handle.get_stream()>>>(index.get_X(), query, index.n, R_knn_inds, R_knn_dists, index.m, k, index.get_R_indptr(), index.get_R_1nn_cols(), index.get_R_1nn_dists(), inds, dists, dists_counter, index.get_R_radius(), dfunc, weight); else if (k <= 256) block_rbc_kernel_registers<value_idx, value_t, 256, 4, 128, 2, value_int> <<<n_query_rows, 128, 0, handle.get_stream()>>>(index.get_X(), query, index.n, R_knn_inds, R_knn_dists, index.m, k, index.get_R_indptr(), index.get_R_1nn_cols(), index.get_R_1nn_dists(), inds, dists, dists_counter, index.get_R_radius(), dfunc, weight); else if (k <= 512) block_rbc_kernel_registers<value_idx, value_t, 512, 8, 64, 2, value_int> <<<n_query_rows, 64, 0, handle.get_stream()>>>(index.get_X(), query, index.n, R_knn_inds, R_knn_dists, index.m, k, index.get_R_indptr(), index.get_R_1nn_cols(), index.get_R_1nn_dists(), inds, dists, dists_counter, index.get_R_radius(), dfunc, weight); else if (k <= 1024) block_rbc_kernel_registers<value_idx, value_t, 1024, 8, 64, 2, value_int> <<<n_query_rows, 64, 0, handle.get_stream()>>>(index.get_X(), query, index.n, R_knn_inds, R_knn_dists, index.m, k, index.get_R_indptr(), index.get_R_1nn_cols(), index.get_R_1nn_dists(), inds, dists, dists_counter, index.get_R_radius(), dfunc, weight); } template <typename value_idx, typename value_t, typename value_int = std::uint32_t, typename dist_func> void rbc_low_dim_pass_two(const raft::handle_t& handle, BallCoverIndex<value_idx, value_t, value_int>& index, const value_t* query, const value_int n_query_rows, value_int k, const value_idx* R_knn_inds, const value_t* R_knn_dists, dist_func& dfunc, value_idx* inds, value_t* dists, float weight, value_int* post_dists_counter) { const value_int bitset_size = ceil(index.n_landmarks / 32.0); rmm::device_uvector<std::uint32_t> bitset(bitset_size * index.m, handle.get_stream()); perform_post_filter_registers<value_idx, value_t, value_int, 128> <<<n_query_rows, 128, bitset_size * sizeof(std::uint32_t), handle.get_stream()>>>( index.get_X(), index.n, R_knn_inds, R_knn_dists, index.get_R_radius(), index.get_R(), index.n_landmarks, bitset_size, k, dfunc, bitset.data(), weight); if (k <= 32) compute_final_dists_registers<value_idx, value_t, value_int, std::uint32_t, dist_func, 32, 2, 128, 2> <<<n_query_rows, 128, 0, handle.get_stream()>>>(index.get_X(), query, index.n, bitset.data(), bitset_size, R_knn_dists, index.get_R_indptr(), index.get_R_1nn_cols(), index.get_R_1nn_dists(), inds, dists, index.n_landmarks, k, dfunc, post_dists_counter); else if (k <= 64) compute_final_dists_registers<value_idx, value_t, value_int, std::uint32_t, dist_func, 64, 3, 128, 2> <<<n_query_rows, 128, 0, handle.get_stream()>>>(index.get_X(), query, index.n, bitset.data(), bitset_size, R_knn_dists, index.get_R_indptr(), index.get_R_1nn_cols(), index.get_R_1nn_dists(), inds, dists, index.n_landmarks, k, dfunc, post_dists_counter); else if (k <= 128) compute_final_dists_registers<value_idx, value_t, value_int, std::uint32_t, dist_func, 128, 3, 128, 2> <<<n_query_rows, 128, 0, handle.get_stream()>>>(index.get_X(), query, index.n, bitset.data(), bitset_size, R_knn_dists, index.get_R_indptr(), index.get_R_1nn_cols(), index.get_R_1nn_dists(), inds, dists, index.n_landmarks, k, dfunc, post_dists_counter); else if (k <= 256) compute_final_dists_registers<value_idx, value_t, value_int, std::uint32_t, dist_func, 256, 4, 128, 2> <<<n_query_rows, 128, 0, handle.get_stream()>>>(index.get_X(), query, index.n, bitset.data(), bitset_size, R_knn_dists, index.get_R_indptr(), index.get_R_1nn_cols(), index.get_R_1nn_dists(), inds, dists, index.n_landmarks, k, dfunc, post_dists_counter); else if (k <= 512) compute_final_dists_registers<value_idx, value_t, value_int, std::uint32_t, dist_func, 512, 8, 64, 2> <<<n_query_rows, 64, 0, handle.get_stream()>>>(index.get_X(), query, index.n, bitset.data(), bitset_size, R_knn_dists, index.get_R_indptr(), index.get_R_1nn_cols(), index.get_R_1nn_dists(), inds, dists, index.n_landmarks, k, dfunc, post_dists_counter); else if (k <= 1024) compute_final_dists_registers<value_idx, value_t, value_int, std::uint32_t, dist_func, 1024, 8, 64, 2> <<<n_query_rows, 64, 0, handle.get_stream()>>>(index.get_X(), query, index.n, bitset.data(), bitset_size, R_knn_dists, index.get_R_indptr(), index.get_R_1nn_cols(), index.get_R_1nn_dists(), inds, dists, index.n_landmarks, k, dfunc, post_dists_counter); } }; // namespace detail }; // namespace knn }; // namespace spatial }; // namespace raft
the_stack
// includes #include <stdio.h> #include <string.h> #include <iostream> #include <string> #include <vector> #include <fstream> #include <stdio.h> #include "shrUtils.h" #include "cmd_arg_reader.h" using namespace std; // size of PGM file header const unsigned int PGMHeaderSize = 0x40; #define MIN_EPSILON_ERROR 1e-3f // Deallocate memory allocated within shrUtils // ********************************************************************* void shrFree(void* ptr) { if( NULL != ptr) free( ptr); } // Helper function to init data arrays // ********************************************************************* void shrFillArray(float* pfData, int iSize) { int i; const float fScale = 1.0f / (float)RAND_MAX; for (i = 0; i < iSize; ++i) { pfData[i] = fScale * rand(); } } // Helper function to print data arrays // ********************************************************************* void shrPrintArray(float* pfData, int iSize) { int i; for (i = 0; i < iSize; ++i) { shrLog("%d: %.3f\n", i, pfData[i]); } } // Optional LogFileName Override function // ********************************************************************* char* cLogFilePathAndName = NULL; void shrSetLogFileName (const char* cOverRideName) { if( cLogFilePathAndName != NULL ) { free(cLogFilePathAndName); } cLogFilePathAndName = (char*) malloc(strlen(cOverRideName) + 1); #ifdef WIN32 strcpy_s(cLogFilePathAndName, strlen(cOverRideName) + 1, cOverRideName); #else strcpy(cLogFilePathAndName, cOverRideName); #endif return; } // Function to log standardized information to console, file or both // ********************************************************************* static int shrLogV(int iLogMode, int iErrNum, const char* cFormatString, va_list vaArgList) { static FILE* pFileStream0 = NULL; static FILE* pFileStream1 = NULL; size_t szNumWritten = 0; char cFileMode [3]; // if the sample log file is closed and the call includes a "write-to-file", open file for writing if ((pFileStream0 == NULL) && (iLogMode & LOGFILE)) { // if the default filename has not been overriden, set to default if (cLogFilePathAndName == NULL) { shrSetLogFileName(DEFAULTLOGFILE); } #ifdef _WIN32 // Windows version // set the file mode if (iLogMode & APPENDMODE) // append to prexisting file contents { sprintf_s (cFileMode, 3, "a+"); } else // replace prexisting file contents { sprintf_s (cFileMode, 3, "w"); } // open the individual sample log file in the requested mode errno_t err = fopen_s(&pFileStream0, cLogFilePathAndName, cFileMode); // if error on attempt to open, be sure the file is null or close it, then return negative error code if (err != 0) { if (pFileStream0) { fclose (pFileStream0); } iLogMode = LOGCONSOLE; // if we can't open a file, we will still output to the console window } #else // Linux & Mac version // set the file mode if (iLogMode & APPENDMODE) // append to prexisting file contents { sprintf (cFileMode, "a+"); } else // replace prexisting file contents { sprintf (cFileMode, "w"); } // open the file in the requested mode if ((pFileStream0 = fopen(cLogFilePathAndName, cFileMode)) == 0) { // if error on attempt to open, be sure the file is null or close it, then return negative error code if (pFileStream0) { fclose (pFileStream0); } iLogMode = LOGCONSOLE; // if we can't open a file, we will still output to the console window } #endif } // if the master log file is closed and the call incudes a "write-to-file" and MASTER, open master logfile file for writing if ((pFileStream1 == NULL) && (iLogMode & LOGFILE) && (iLogMode & MASTER)) { #ifdef _WIN32 // Windows version // open the master log file in append mode errno_t err = fopen_s(&pFileStream1, MASTERLOGFILE, "a+"); // if error on attempt to open, be sure the file is null or close it, then return negative error code if (err != 0) { if (pFileStream1) { fclose (pFileStream1); pFileStream1 = NULL; } iLogMode = LOGCONSOLE; // Force to LOGCONSOLE only since the file stream is invalid // return -err; } #else // Linux & Mac version // open the file in the requested mode if ((pFileStream1 = fopen(MASTERLOGFILE, "a+")) == 0) { // if error on attempt to open, be sure the file is null or close it, then return negative error code if (pFileStream1) { fclose (pFileStream1); pFileStream1 = NULL; } iLogMode = LOGCONSOLE; // Force to LOGCONSOLE only since the file stream is invalid // return -1; } #endif // If master log file length has become excessive, empty/reopen if (iLogMode != LOGCONSOLE) { fseek(pFileStream1, 0L, SEEK_END); if (ftell(pFileStream1) > 50000L) { fclose (pFileStream1); #ifdef _WIN32 // Windows version fopen_s(&pFileStream1, MASTERLOGFILE, "w"); #else pFileStream1 = fopen(MASTERLOGFILE, "w"); #endif } } } // Handle special Error Message code if (iLogMode & ERRORMSG) { // print string to console if flagged if (iLogMode & LOGCONSOLE) { szNumWritten = printf ("\n !!! Error # %i at ", iErrNum); // console } // print string to file if flagged if (iLogMode & LOGFILE) { szNumWritten = fprintf (pFileStream0, "\n !!! Error # %i at ", iErrNum); // sample log file } } // Vars used for variable argument processing const char* pStr; const char* cArg; int iArg; double dArg; unsigned int uiArg; std::string sFormatSpec; const std::string sFormatChars = " -+#0123456789.dioufnpcsXxEeGgAa"; const std::string sTypeChars = "dioufnpcsXxEeGgAa"; char cType = 'c'; // Start at the head of the string and scan to the null at the end for (pStr = cFormatString; *pStr; ++pStr) { // Check if the current character is not a formatting specifier ('%') if (*pStr != '%') { // character is not '%', so print it verbatim to console and/or files as flagged if (iLogMode & LOGCONSOLE) { szNumWritten = putc(*pStr, stdout); // console } if (iLogMode & LOGFILE) { szNumWritten = putc(*pStr, pFileStream0); // sample log file if (iLogMode & MASTER) { szNumWritten = putc(*pStr, pFileStream1); // master log file } } } else { // character is '%', so skip over it and read the full format specifier for the argument ++pStr; sFormatSpec = '%'; // special handling for string of %%%% bool bRepeater = (*pStr == '%'); if (bRepeater) { cType = '%'; } // chars after the '%' are part of format if on list of constants... scan until that isn't true or NULL is found while (pStr && ((sFormatChars.find(*pStr) != string::npos) || bRepeater)) { sFormatSpec += *pStr; // If the char is a type specifier, trap it and stop scanning // (a type specifier char is always the last in the format except for string of %%%) if (sTypeChars.find(*pStr) != string::npos) { cType = *pStr; break; } // Special handling for string of %%% // If a string of %%% was started and then it ends, break (There won't be a typical type specifier) if (bRepeater && (*pStr != '%')) { break; } pStr++; } // Now handle the arg according to type switch (cType) { case '%': // special handling for string of %%%% { if (iLogMode & LOGCONSOLE) { szNumWritten = printf(sFormatSpec.c_str()); // console } if (iLogMode & LOGFILE) { szNumWritten = fprintf (pFileStream0, sFormatSpec.c_str()); // sample log file if (iLogMode & MASTER) { szNumWritten = fprintf(pFileStream1, sFormatSpec.c_str()); // master log file } } continue; } case 'c': // single byte char case 's': // string of single byte chars { // Set cArg as the next value in list and print to console and/or files if flagged cArg = va_arg(vaArgList, char*); if (iLogMode & LOGCONSOLE) { szNumWritten = printf(sFormatSpec.c_str(), cArg); // console } if (iLogMode & LOGFILE) { szNumWritten = fprintf (pFileStream0, sFormatSpec.c_str(), cArg); // sample log file if (iLogMode & MASTER) { szNumWritten = fprintf(pFileStream1, sFormatSpec.c_str(), cArg); // master log file } } continue; } case 'd': // signed decimal integer case 'i': // signed decimal integer { // set iArg as the next value in list and print to console and/or files if flagged iArg = va_arg(vaArgList, int); if (iLogMode & LOGCONSOLE) { szNumWritten = printf(sFormatSpec.c_str(), iArg); // console } if (iLogMode & LOGFILE) { szNumWritten = fprintf (pFileStream0, sFormatSpec.c_str(), iArg); // sample log file if (iLogMode & MASTER) { szNumWritten = fprintf(pFileStream1, sFormatSpec.c_str(), iArg); // master log file } } continue; } case 'u': // unsigned decimal integer case 'o': // unsigned octal integer case 'x': // unsigned hexadecimal integer using "abcdef" case 'X': // unsigned hexadecimal integer using "ABCDEF" { // set uiArg as the next value in list and print to console and/or files if flagged uiArg = va_arg(vaArgList, unsigned int); if (iLogMode & LOGCONSOLE) { szNumWritten = printf(sFormatSpec.c_str(), uiArg); // console } if (iLogMode & LOGFILE) { szNumWritten = fprintf (pFileStream0, sFormatSpec.c_str(), uiArg); // sample log file if (iLogMode & MASTER) { szNumWritten = fprintf(pFileStream1, sFormatSpec.c_str(), uiArg); // master log file } } continue; } case 'f': // float/double case 'e': // scientific double/float case 'E': // scientific double/float case 'g': // scientific double/float case 'G': // scientific double/float case 'a': // signed hexadecimal double precision float case 'A': // signed hexadecimal double precision float { // set dArg as the next value in list and print to console and/or files if flagged dArg = va_arg(vaArgList, double); if (iLogMode & LOGCONSOLE) { szNumWritten = printf(sFormatSpec.c_str(), dArg); // console } if (iLogMode & LOGFILE) { szNumWritten = fprintf (pFileStream0, sFormatSpec.c_str(), dArg); // sample log file if (iLogMode & MASTER) { szNumWritten = fprintf(pFileStream1, sFormatSpec.c_str(), dArg); // master log file } } continue; } default: { // print arg of unknown/unsupported type to console and/or file if flagged if (iLogMode & LOGCONSOLE) // console { szNumWritten = putc(*pStr, stdout); } if (iLogMode & LOGFILE) { szNumWritten = putc(*pStr, pFileStream0); // sample log file if (iLogMode & MASTER) { szNumWritten = putc(*pStr, pFileStream1); // master log file } } } } } } // end the sample log with a horizontal line if closing if (iLogMode & CLOSELOG) { if (iLogMode & LOGCONSOLE) { printf(HDASHLINE); } if (iLogMode & LOGFILE) { fprintf(pFileStream0, HDASHLINE); } } // flush console and/or file buffers if updated if (iLogMode & LOGCONSOLE) { fflush(stdout); } if (iLogMode & LOGFILE) { fflush (pFileStream0); // if the master log file has been updated, flush it too if (iLogMode & MASTER) { fflush (pFileStream1); } } // If the log file is open and the caller requests "close file", then close and NULL file handle if ((pFileStream0) && (iLogMode & CLOSELOG)) { fclose (pFileStream0); pFileStream0 = NULL; } if ((pFileStream1) && (iLogMode & CLOSELOG)) { fclose (pFileStream1); pFileStream1 = NULL; } // return error code or OK if (iLogMode & ERRORMSG) { return iErrNum; } else { return 0; } } // Function to log standardized information to console, file or both // ********************************************************************* int shrLogEx(int iLogMode = LOGCONSOLE, int iErrNum = 0, const char* cFormatString = "", ...) { va_list vaArgList; // Prepare variable agument list va_start(vaArgList, cFormatString); int ret = shrLogV(iLogMode, iErrNum, cFormatString, vaArgList); // end variable argument handler va_end(vaArgList); return ret; } // Function to log standardized information to console, file or both // ********************************************************************* int shrLog(const char* cFormatString = "", ...) { va_list vaArgList; // Prepare variable agument list va_start(vaArgList, cFormatString); int ret = shrLogV(LOGBOTH, 0, cFormatString, vaArgList); // end variable argument handler va_end(vaArgList); return ret; } ////////////////////////////////////////////////////////////////////////////// //! Find the path for a file assuming that //! files are found in the searchPath. //! //! @return the path if succeeded, otherwise 0 //! @param filename name of the file //! @param executable_path optional absolute path of the executable ////////////////////////////////////////////////////////////////////////////// char* shrFindFilePath(const char* filename, const char* executable_path) { // <executable_name> defines a variable that is replaced with the name of the executable // Typical relative search paths to locate needed companion files (e.g. sample input data, or JIT source files) // The origin for the relative search may be the .exe file, a .bat file launching an .exe, a browser .exe launching the .exe or .bat, etc const char* searchPath[] = { "./", // same dir "./data/", // "/data/" subdir "./src/", // "/src/" subdir "./src/<executable_name>/data/", // "/src/<executable_name>/data/" subdir "./inc/", // "/inc/" subdir "../", // up 1 in tree "../data/", // up 1 in tree, "/data/" subdir "../src/", // up 1 in tree, "/src/" subdir "../inc/", // up 1 in tree, "/inc/" subdir "../OpenCL/src/<executable_name>/", // up 1 in tree, "/OpenCL/src/<executable_name>/" subdir "../OpenCL/src/<executable_name>/data/", // up 1 in tree, "/OpenCL/src/<executable_name>/data/" subdir "../OpenCL/src/<executable_name>/src/", // up 1 in tree, "/OpenCL/src/<executable_name>/src/" subdir "../OpenCL/src/<executable_name>/inc/", // up 1 in tree, "/OpenCL/src/<executable_name>/inc/" subdir "../C/src/<executable_name>/", // up 1 in tree, "/C/src/<executable_name>/" subdir "../C/src/<executable_name>/data/", // up 1 in tree, "/C/src/<executable_name>/data/" subdir "../C/src/<executable_name>/src/", // up 1 in tree, "/C/src/<executable_name>/src/" subdir "../C/src/<executable_name>/inc/", // up 1 in tree, "/C/src/<executable_name>/inc/" subdir "../DirectCompute/src/<executable_name>/", // up 1 in tree, "/DirectCompute/src/<executable_name>/" subdir "../DirectCompute/src/<executable_name>/data/", // up 1 in tree, "/DirectCompute/src/<executable_name>/data/" subdir "../DirectCompute/src/<executable_name>/src/", // up 1 in tree, "/DirectCompute/src/<executable_name>/src/" subdir "../DirectCompute/src/<executable_name>/inc/", // up 1 in tree, "/DirectCompute/src/<executable_name>/inc/" subdir "../../", // up 2 in tree "../../data/", // up 2 in tree, "/data/" subdir "../../src/", // up 2 in tree, "/src/" subdir "../../inc/", // up 2 in tree, "/inc/" subdir "../../../", // up 3 in tree "../../../src/<executable_name>/", // up 3 in tree, "/src/<executable_name>/" subdir "../../../src/<executable_name>/data/", // up 3 in tree, "/src/<executable_name>/data/" subdir "../../../src/<executable_name>/src/", // up 3 in tree, "/src/<executable_name>/src/" subdir "../../../src/<executable_name>/inc/", // up 3 in tree, "/src/<executable_name>/inc/" subdir "../../../sandbox/<executable_name>/", // up 3 in tree, "/sandbox/<executable_name>/" subdir "../../../sandbox/<executable_name>/data/", // up 3 in tree, "/sandbox/<executable_name>/data/" subdir "../../../sandbox/<executable_name>/src/", // up 3 in tree, "/sandbox/<executable_name>/src/" subdir "../../../sandbox/<executable_name>/inc/" // up 3 in tree, "/sandbox/<executable_name>/inc/" subdir }; // Extract the executable name std::string executable_name; if (executable_path != 0) { executable_name = std::string(executable_path); #ifdef _WIN32 // Windows path delimiter size_t delimiter_pos = executable_name.find_last_of('\\'); executable_name.erase(0, delimiter_pos + 1); if (executable_name.rfind(".exe") != string::npos) { // we strip .exe, only if the .exe is found executable_name.resize(executable_name.size() - 4); } #else // Linux & OSX path delimiter size_t delimiter_pos = executable_name.find_last_of('/'); executable_name.erase(0,delimiter_pos+1); #endif } // Loop over all search paths and return the first hit for( unsigned int i = 0; i < sizeof(searchPath)/sizeof(char*); ++i ) { std::string path(searchPath[i]); size_t executable_name_pos = path.find("<executable_name>"); // If there is executable_name variable in the searchPath // replace it with the value if(executable_name_pos != std::string::npos) { if(executable_path != 0) { path.replace(executable_name_pos, strlen("<executable_name>"), executable_name); } else { // Skip this path entry if no executable argument is given continue; } } // Test if the file exists path.append(filename); std::fstream fh(path.c_str(), std::fstream::in); if (fh.good()) { // File found // returning an allocated array here for backwards compatibility reasons char* file_path = (char*) malloc(path.length() + 1); #ifdef _WIN32 strcpy_s(file_path, path.length() + 1, path.c_str()); #else strcpy(file_path, path.c_str()); #endif return file_path; } } // File not found return 0; } ////////////////////////////////////////////////////////////////////////////// //! Read file \filename and return the data //! @return shrTRUE if reading the file succeeded, otherwise shrFALSE //! @param filename name of the source file //! @param data uninitialized pointer, returned initialized and pointing to //! the data read //! @param len number of data elements in data, -1 on error ////////////////////////////////////////////////////////////////////////////// template<class T> shrBOOL shrReadFile( const char* filename, T** data, unsigned int* len, bool verbose) { // check input arguments ARGCHECK(NULL != filename); ARGCHECK(NULL != len); // intermediate storage for the data read std::vector<T> data_read; // open file for reading std::fstream fh( filename, std::fstream::in); // check if filestream is valid if(!fh.good()) { if (verbose) std::cerr << "shrReadFile() : Opening file failed." << std::endl; return shrFALSE; } // read all data elements T token; while( fh.good()) { fh >> token; data_read.push_back( token); } // the last element is read twice data_read.pop_back(); // check if reading result is consistent if( ! fh.eof()) { if (verbose) std::cerr << "WARNING : readData() : reading file might have failed." << std::endl; } fh.close(); // check if the given handle is already initialized if( NULL != *data) { if( *len != data_read.size()) { std::cerr << "shrReadFile() : Initialized memory given but " << "size mismatch with signal read " << "(data read / data init = " << (unsigned int)data_read.size() << " / " << *len << ")" << std::endl; return shrFALSE; } } else { // allocate storage for the data read *data = (T*) malloc( sizeof(T) * data_read.size()); // store signal size *len = static_cast<unsigned int>( data_read.size()); } // copy data memcpy( *data, &data_read.front(), sizeof(T) * data_read.size()); return shrTRUE; } ////////////////////////////////////////////////////////////////////////////// //! Write a data file \filename //! @return shrTRUE if writing the file succeeded, otherwise shrFALSE //! @param filename name of the source file //! @param data data to write //! @param len number of data elements in data, -1 on error //! @param epsilon epsilon for comparison ////////////////////////////////////////////////////////////////////////////// template<class T> shrBOOL shrWriteFile( const char* filename, const T* data, unsigned int len, const T epsilon, bool verbose) { ARGCHECK(NULL != filename); ARGCHECK(NULL != data); // open file for writing std::fstream fh( filename, std::fstream::out); // check if filestream is valid if(!fh.good()) { if (verbose) std::cerr << "shrWriteFile() : Opening file failed." << std::endl; return shrFALSE; } // first write epsilon fh << "# " << epsilon << "\n"; // write data for( unsigned int i = 0; (i < len) && (fh.good()); ++i) { fh << data[i] << ' '; } // Check if writing succeeded if( ! fh.good()) { if (verbose) std::cerr << "shrWriteFile() : Writing file failed." << std::endl; return shrFALSE; } // file ends with nl fh << std::endl; return shrTRUE; } //////////////////////////////////////////////////////////////////////////////// //! Read file \filename containg single precision floating point data //! @return shrTRUEif reading the file succeeded, otherwise shrFALSE //! @param filename name of the source file //! @param data uninitialized pointer, returned initialized and pointing to //! the data read //! @param len number of data elements in data, -1 on error //////////////////////////////////////////////////////////////////////////////// shrBOOL shrReadFilef( const char* filename, float** data, unsigned int* len, bool verbose) { return shrReadFile( filename, data, len, verbose); } //////////////////////////////////////////////////////////////////////////////// //! Read file \filename containg double precision floating point data //! @return shrTRUEif reading the file succeeded, otherwise shrFALSE //! @param filename name of the source file //! @param data uninitialized pointer, returned initialized and pointing to //! the data read //! @param len number of data elements in data, -1 on error //////////////////////////////////////////////////////////////////////////////// shrBOOL shrReadFiled( const char* filename, double** data, unsigned int* len, bool verbose) { return shrReadFile( filename, data, len, verbose); } //////////////////////////////////////////////////////////////////////////////// //! Read file \filename containg integer data //! @return shrTRUEif reading the file succeeded, otherwise shrFALSE //! @param filename name of the source file //! @param data uninitialized pointer, returned initialized and pointing to //! the data read //! @param len number of data elements in data, -1 on error //////////////////////////////////////////////////////////////////////////////// shrBOOL shrReadFilei( const char* filename, int** data, unsigned int* len, bool verbose) { return shrReadFile( filename, data, len, verbose); } //////////////////////////////////////////////////////////////////////////////// //! Read file \filename containg unsigned integer data //! @return shrTRUEif reading the file succeeded, otherwise shrFALSE //! @param filename name of the source file //! @param data uninitialized pointer, returned initialized and pointing to //! the data read //! @param len number of data elements in data, -1 on error //////////////////////////////////////////////////////////////////////////////// shrBOOL shrReadFileui( const char* filename, unsigned int** data, unsigned int* len, bool verbose) { return shrReadFile( filename, data, len, verbose); } //////////////////////////////////////////////////////////////////////////////// //! Read file \filename containg char / byte data //! @return shrTRUEif reading the file succeeded, otherwise shrFALSE //! @param filename name of the source file //! @param data uninitialized pointer, returned initialized and pointing to //! the data read //! @param len number of data elements in data, -1 on error //////////////////////////////////////////////////////////////////////////////// shrBOOL shrReadFileb( const char* filename, char** data, unsigned int* len, bool verbose) { return shrReadFile( filename, data, len, verbose); } //////////////////////////////////////////////////////////////////////////////// //! Read file \filename containg unsigned char / byte data //! @return shrTRUEif reading the file succeeded, otherwise shrFALSE //! @param filename name of the source file //! @param data uninitialized pointer, returned initialized and pointing to //! the data read //! @param len number of data elements in data, -1 on error //////////////////////////////////////////////////////////////////////////////// shrBOOL shrReadFileub( const char* filename, unsigned char** data, unsigned int* len, bool verbose) { return shrReadFile( filename, data, len, verbose); } //////////////////////////////////////////////////////////////////////////////// //! Write a data file \filename for single precision floating point data //! @return shrTRUEif writing the file succeeded, otherwise shrFALSE //! @param filename name of the source file //! @param data data to write //! @param len number of data elements in data, -1 on error //! @param epsilon epsilon for comparison //////////////////////////////////////////////////////////////////////////////// shrBOOL shrWriteFilef( const char* filename, const float* data, unsigned int len, const float epsilon, bool verbose) { return shrWriteFile( filename, data, len, epsilon, verbose); } //////////////////////////////////////////////////////////////////////////////// //! Write a data file \filename for double precision floating point data //! @return shrTRUEif writing the file succeeded, otherwise shrFALSE //! @param filename name of the source file //! @param data data to write //! @param len number of data elements in data, -1 on error //! @param epsilon epsilon for comparison //////////////////////////////////////////////////////////////////////////////// shrBOOL shrWriteFiled( const char* filename, const double* data, unsigned int len, const double epsilon, bool verbose) { return shrWriteFile( filename, data, len, epsilon, verbose); } //////////////////////////////////////////////////////////////////////////////// //! Write a data file \filename for integer data //! @return shrTRUEif writing the file succeeded, otherwise shrFALSE //! @param filename name of the source file //! @param data data to write //! @param len number of data elements in data, -1 on error //! @param epsilon epsilon for comparison //////////////////////////////////////////////////////////////////////////////// shrBOOL shrWriteFilei( const char* filename, const int* data, unsigned int len, bool verbose) { return shrWriteFile( filename, data, len, 0, verbose); } //////////////////////////////////////////////////////////////////////////////// //! Write a data file \filename for unsigned integer data //! @return shrTRUEif writing the file succeeded, otherwise shrFALSE //! @param filename name of the source file //! @param data data to write //! @param len number of data elements in data, -1 on error //! @param epsilon epsilon for comparison //////////////////////////////////////////////////////////////////////////////// shrBOOL shrWriteFileui( const char* filename,const unsigned int* data,unsigned int len, bool verbose) { return shrWriteFile( filename, data, len, static_cast<unsigned int>(0), verbose); } //////////////////////////////////////////////////////////////////////////////// //! Write a data file \filename for byte / char data //! @return shrTRUEif writing the file succeeded, otherwise shrFALSE //! @param filename name of the source file //! @param data data to write //! @param len number of data elements in data, -1 on error //! @param epsilon epsilon for comparison //////////////////////////////////////////////////////////////////////////////// shrBOOL shrWriteFileb( const char* filename, const char* data, unsigned int len, bool verbose) { return shrWriteFile( filename, data, len, static_cast<char>(0), verbose); } //////////////////////////////////////////////////////////////////////////////// //! Write a data file \filename for byte / char data //! @return shrTRUEif writing the file succeeded, otherwise shrFALSE //! @param filename name of the source file //! @param data data to write //! @param len number of data elements in data, -1 on error //! @param epsilon epsilon for comparison //////////////////////////////////////////////////////////////////////////////// shrBOOL shrWriteFileub( const char* filename, const unsigned char* data, unsigned int len, bool verbose) { return shrWriteFile( filename, data, len, static_cast<unsigned char>(0), verbose); } //////////////////////////////////////////////////////////////////////////////// //! Write a data file \filename for unsigned byte / char data //! @return shrTRUEif writing the file succeeded, otherwise shrFALSE //! @param filename name of the source file //! @param data data to write //! @param len number of data elements in data, -1 on error //! @param epsilon epsilon for comparison //////////////////////////////////////////////////////////////////////////////// shrBOOL shrWriteFileb( const char* filename,const unsigned char* data,unsigned int len, bool verbose) { return shrWriteFile( filename, data, len, static_cast<unsigned char>(0), verbose); } ////////////////////////////////////////////////////////////////////////////// //! Load PGM or PPM file //! @note if data == NULL then the necessary memory is allocated in the //! function and w and h are initialized to the size of the image //! @return shrTRUE if the file loading succeeded, otherwise shrFALSE //! @param file name of the file to load //! @param data handle to the memory for the image file data //! @param w width of the image //! @param h height of the image //! @param channels number of channels in image ////////////////////////////////////////////////////////////////////////////// shrBOOL loadPPM(const char* file, unsigned char** data, unsigned int *w, unsigned int *h, unsigned int *channels) { FILE* fp = 0; #ifdef _WIN32 // open the file for binary read errno_t err; if ((err = fopen_s(&fp, file, "rb")) != 0) #else // open the file for binary read if ((fp = fopen(file, "rb")) == 0) #endif { // if error on attempt to open, be sure the file is null or close it, then return negative error code if (fp) { fclose (fp); } std::cerr << "loadPPM() : Failed to open file: " << file << std::endl; return shrFALSE; } // check header char header[PGMHeaderSize]; if ((fgets( header, PGMHeaderSize, fp) == NULL) && ferror(fp)) { if (fp) { fclose (fp); } std::cerr << "loadPPM() : File is not a valid PPM or PGM image" << std::endl; *channels = 0; return shrFALSE; } if (strncmp(header, "P5", 2) == 0) { *channels = 1; } else if (strncmp(header, "P6", 2) == 0) { *channels = 3; } else { std::cerr << "loadPPM() : File is not a PPM or PGM image" << std::endl; *channels = 0; return shrFALSE; } // parse header, read maxval, width and height unsigned int width = 0; unsigned int height = 0; unsigned int maxval = 0; unsigned int i = 0; while(i < 3) { if ((fgets(header, PGMHeaderSize, fp) == NULL) && ferror(fp)) { if (fp) { fclose (fp); } std::cerr << "loadPPM() : File is not a valid PPM or PGM image" << std::endl; return shrFALSE; } if(header[0] == '#') continue; #ifdef _WIN32 if(i == 0) { i += sscanf_s(header, "%u %u %u", &width, &height, &maxval); } else if (i == 1) { i += sscanf_s(header, "%u %u", &height, &maxval); } else if (i == 2) { i += sscanf_s(header, "%u", &maxval); } #else if(i == 0) { i += sscanf(header, "%u %u %u", &width, &height, &maxval); } else if (i == 1) { i += sscanf(header, "%u %u", &height, &maxval); } else if (i == 2) { i += sscanf(header, "%u", &maxval); } #endif } // check if given handle for the data is initialized if(NULL != *data) { if (*w != width || *h != height) { fclose(fp); std::cerr << "loadPPM() : Invalid image dimensions." << std::endl; return shrFALSE; } } else { *data = (unsigned char*)malloc( sizeof(unsigned char) * width * height * *channels); *w = width; *h = height; } // read and close file if (fread(*data, sizeof(unsigned char), width * height * *channels, fp) != width * height * *channels) { fclose(fp); std::cerr << "loadPPM() : Invalid image." << std::endl; return shrFALSE; } fclose(fp); return shrTRUE; } ////////////////////////////////////////////////////////////////////////////// //! Write / Save PPM or PGM file //! @note Internal usage only //! @param file name of the image file //! @param data handle to the data read //! @param w width of the image //! @param h height of the image ////////////////////////////////////////////////////////////////////////////// shrBOOL savePPM( const char* file, unsigned char *data, unsigned int w, unsigned int h, unsigned int channels) { ARGCHECK(NULL != data); ARGCHECK(w > 0); ARGCHECK(h > 0); std::fstream fh( file, std::fstream::out | std::fstream::binary ); if( fh.bad()) { std::cerr << "savePPM() : Opening file failed." << std::endl; return shrFALSE; } if (channels == 1) { fh << "P5\n"; } else if (channels == 3) { fh << "P6\n"; } else { std::cerr << "savePPM() : Invalid number of channels." << std::endl; return shrFALSE; } fh << w << "\n" << h << "\n" << 0xff << std::endl; for( unsigned int i = 0; (i < (w*h*channels)) && fh.good(); ++i) { fh << data[i]; } fh.flush(); if( fh.bad()) { std::cerr << "savePPM() : Writing data failed." << std::endl; return shrFALSE; } fh.close(); return shrTRUE; } //////////////////////////////////////////////////////////////////////////////// //! Load PPM image file (with unsigned char as data element type), padding 4th component //! @return shrTrue if reading the file succeeded, otherwise shrFALSE //! @param file name of the image file //! @param data handle to the data read //! @param w width of the image //! @param h height of the image //////////////////////////////////////////////////////////////////////////////// shrBOOL shrLoadPPM4ub( const char* file, unsigned char** OutData, unsigned int *w, unsigned int *h) { // Load file data into a temporary buffer with automatic allocation unsigned char* cLocalData = 0; unsigned int channels; shrBOOL bLoadOK = loadPPM(file, &cLocalData, w, h, &channels); // this allocates cLocalData, which must be freed later // If the data loaded OK from file to temporary buffer, then go ahead with padding and transfer if (shrTRUE == bLoadOK) { // if the receiving buffer is null, allocate it... caller must free this int size = *w * *h; if (*OutData == NULL) { *OutData = (unsigned char*)malloc(sizeof(unsigned char) * size * 4); } // temp pointers for incrementing unsigned char* cTemp = cLocalData; unsigned char* cOutPtr = *OutData; // transfer data, padding 4th element for(int i=0; i<size; i++) { *cOutPtr++ = *cTemp++; *cOutPtr++ = *cTemp++; *cOutPtr++ = *cTemp++; *cOutPtr++ = 0; } // free temp lcoal buffer and return OK free(cLocalData); return shrTRUE; } else { // image wouldn't load free(cLocalData); return shrFALSE; } } //////////////////////////////////////////////////////////////////////////////// //! Save PPM image file (with unsigned char as data element type, padded to 4 byte) //! @return shrTrue if reading the file succeeded, otherwise shrFALSE //! @param file name of the image file //! @param data handle to the data read //! @param w width of the image //! @param h height of the image //////////////////////////////////////////////////////////////////////////////// shrBOOL shrSavePPM4ub( const char* file, unsigned char *data, unsigned int w, unsigned int h) { // strip 4th component int size = w * h; unsigned char *ndata = (unsigned char*) malloc( sizeof(unsigned char) * size*3); unsigned char *ptr = ndata; for(int i=0; i<size; i++) { *ptr++ = *data++; *ptr++ = *data++; *ptr++ = *data++; data++; } shrBOOL succ = savePPM(file, ndata, w, h, 3); free(ndata); return succ; } //////////////////////////////////////////////////////////////////////////////// //! Save PGM image file (with unsigned char as data element type) //! @param file name of the image file //! @param data handle to the data read //! @param w width of the image //! @param h height of the image //////////////////////////////////////////////////////////////////////////////// shrBOOL shrSavePGMub( const char* file, unsigned char *data, unsigned int w, unsigned int h) { return savePPM( file, data, w, h, 1); } //////////////////////////////////////////////////////////////////////////////// //! Load PGM image file (with unsigned char as data element type) //! @return shrTRUE if reading the file succeeded, otherwise shrFALSE //! @param file name of the image file //! @param data handle to the data read //! @param w width of the image //! @param h height of the image //////////////////////////////////////////////////////////////////////////////// shrBOOL shrLoadPGMub( const char* file, unsigned char** data, unsigned int *w,unsigned int *h) { unsigned int channels; return loadPPM( file, data, w, h, &channels); } //////////////////////////////////////////////////////////////////////////////// //! Check if command line argument \a flag-name is given //! @return shrTRUE if command line argument \a flag_name has been given, otherwise shrFALSE //! @param argc argc as passed to main() //! @param argv argv as passed to main() //! @param flag_name name of command line flag //////////////////////////////////////////////////////////////////////////////// shrBOOL shrCheckCmdLineFlag( const int argc, const char** argv, const char* flag_name) { shrBOOL ret_val = shrFALSE; try { // initalize CmdArgReader::init( argc, argv); // check if the command line argument exists if( CmdArgReader::existArg( flag_name)) { ret_val = shrTRUE; } } catch( const std::exception& /*ex*/) { std::cerr << "Error when parsing command line argument string." << std::endl; } return ret_val; } //////////////////////////////////////////////////////////////////////////////// //! Get the value of a command line argument of type int //! @return shrTRUE if command line argument \a arg_name has been given and //! is of the requested type, otherwise shrFALSE //! @param argc argc as passed to main() //! @param argv argv as passed to main() //! @param arg_name name of the command line argument //! @param val value of the command line argument //////////////////////////////////////////////////////////////////////////////// shrBOOL shrGetCmdLineArgumenti( const int argc, const char** argv, const char* arg_name, int* val) { shrBOOL ret_val = shrFALSE; try { // initialize CmdArgReader::init( argc, argv); // access argument const int* v = CmdArgReader::getArg<int>( arg_name); if( NULL != v) { // assign value *val = *v; ret_val = shrTRUE; } else { // fail safe val = NULL; } } catch( const std::exception& /*ex*/) { std::cerr << "Error when parsing command line argument string." << std::endl; } return ret_val; } //////////////////////////////////////////////////////////////////////////////// //! Get the value of a command line argument of type unsigned int //! @return shrTRUE if command line argument \a arg_name has been given and //! is of the requested type, otherwise shrFALSE //! @param argc argc as passed to main() //! @param argv argv as passed to main() //! @param arg_name name of the command line argument //! @param val value of the command line argument //////////////////////////////////////////////////////////////////////////////// shrBOOL shrGetCmdLineArgumentu( const int argc, const char** argv, const char* arg_name, unsigned int* val) { shrBOOL ret_val = shrFALSE; try { // initialize CmdArgReader::init( argc, argv); // access argument const int* v = CmdArgReader::getArg<int>( arg_name); if( NULL != v) { // assign value *val = *v; ret_val = shrTRUE; } else { // fail safe val = NULL; } } catch( const std::exception& /*ex*/) { std::cerr << "Error when parsing command line argument string." << std::endl; } return ret_val; } //////////////////////////////////////////////////////////////////////////////// //! Get the value of a command line argument of type float //! @return shrTRUE if command line argument \a arg_name has been given and //! is of the requested type, otherwise shrFALSE //! @param argc argc as passed to main() //! @param argv argv as passed to main() //! @param arg_name name of the command line argument //! @param val value of the command line argument //////////////////////////////////////////////////////////////////////////////// shrBOOL shrGetCmdLineArgumentf( const int argc, const char** argv, const char* arg_name, float* val) { shrBOOL ret_val = shrFALSE; try { // initialize CmdArgReader::init( argc, argv); // access argument const float* v = CmdArgReader::getArg<float>( arg_name); if( NULL != v) { // assign value *val = *v; ret_val = shrTRUE; } else { // fail safe val = NULL; } } catch( const std::exception& /*ex*/) { std::cerr << "Error when parsing command line argument string." << std::endl; } return ret_val; } //////////////////////////////////////////////////////////////////////////////// //! Get the value of a command line argument of type string //! @return shrTRUE if command line argument \a arg_name has been given and //! is of the requested type, otherwise shrFALSE //! @param argc argc as passed to main() //! @param argv argv as passed to main() //! @param arg_name name of the command line argument //! @param val value of the command line argument //////////////////////////////////////////////////////////////////////////////// shrBOOL shrGetCmdLineArgumentstr( const int argc, const char** argv, const char* arg_name, char** val) { shrBOOL ret_val = shrFALSE; try { // initialize CmdArgReader::init( argc, argv); // access argument const std::string* v = CmdArgReader::getArg<std::string>( arg_name); if( NULL != v) { // allocate memory for the string *val = (char*)malloc(sizeof(char) * (v->length() + 1)); // copy from string to c_str #ifdef WIN32 strcpy_s(*val, v->length() + 1, v->c_str()); #else strcpy(*val, v->c_str()); #endif ret_val = shrTRUE; } else { // fail safe *val = NULL; } } catch( const std::exception& /*ex*/) { std::cerr << "Error when parsing command line argument string."<< std::endl; } return ret_val; } ////////////////////////////////////////////////////////////////////////////// //! Compare two arrays of arbitrary type //! @return shrTRUE if \a reference and \a data are identical, otherwise shrFALSE //! @param reference handle to the reference data / gold image //! @param data handle to the computed data //! @param len number of elements in reference and data //! @param epsilon epsilon to use for the comparison ////////////////////////////////////////////////////////////////////////////// template<class T, class S> shrBOOL compareData( const T* reference, const T* data, const unsigned int len, const S epsilon, const float threshold) { ARGCHECK( epsilon >= 0); bool result = true; unsigned int error_count = 0; for( unsigned int i = 0; i < len; ++i) { T diff = reference[i] - data[i]; bool comp = (diff <= epsilon) && (diff >= -epsilon); result &= comp; error_count += !comp; #ifdef _DEBUG if( ! comp) { std::cerr << "ERROR, i = " << i << ",\t " << reference[i] << " / " << data[i] << " (reference / data)\n"; } #endif } if (threshold == 0.0f) { return (result) ? shrTRUE : shrFALSE; } else { return (len*threshold > error_count) ? shrTRUE : shrFALSE; } } ////////////////////////////////////////////////////////////////////////////// //! Compare two arrays of arbitrary type //! @return shrTRUE if \a reference and \a data are identical, otherwise shrFALSE //! @param reference handle to the reference data / gold image //! @param data handle to the computed data //! @param len number of elements in reference and data //! @param epsilon epsilon to use for the comparison ////////////////////////////////////////////////////////////////////////////// template<class T, class S> shrBOOL compareDataAsFloat( const T* reference, const T* data, const unsigned int len, const S epsilon) { ARGCHECK(epsilon >= 0); // If we set epsilon to be 0, let's set a minimum threshold float max_error = MAX( (float)epsilon, MIN_EPSILON_ERROR ); int error_count = 0; bool result = true; for( unsigned int i = 0; i < len; ++i) { float diff = fabs((float)reference[i] - (float)data[i]); bool comp = (diff < max_error); result &= comp; if( ! comp) { error_count++; #ifdef _DEBUG if (error_count < 50) { shrLog("\n ERROR(epsilon=%4.3f), i=%d, (ref)0x%02x / (data)0x%02x / (diff)%d\n", max_error, i, reference[i], data[i], (unsigned int)diff); } #endif } } if (error_count) { shrLog("\n Total # of errors = %d\n", error_count); } return (error_count == 0) ? shrTRUE : shrFALSE; } ////////////////////////////////////////////////////////////////////////////// //! Compare two arrays of arbitrary type //! @return shrTRUE if \a reference and \a data are identical, otherwise shrFALSE //! @param reference handle to the reference data / gold image //! @param data handle to the computed data //! @param len number of elements in reference and data //! @param epsilon epsilon to use for the comparison //! @param epsilon threshold % of (# of bytes) for pass/fail ////////////////////////////////////////////////////////////////////////////// template<class T, class S> shrBOOL compareDataAsFloatThreshold( const T* reference, const T* data, const unsigned int len, const S epsilon, const float threshold) { ARGCHECK(epsilon >= 0); // If we set epsilon to be 0, let's set a minimum threshold float max_error = MAX( (float)epsilon, MIN_EPSILON_ERROR); int error_count = 0; bool result = true; for( unsigned int i = 0; i < len; ++i) { float diff = fabs((float)reference[i] - (float)data[i]); bool comp = (diff < max_error); result &= comp; if( ! comp) { error_count++; #ifdef _DEBUG if (error_count < 50) { shrLog("\n ERROR(epsilon=%4.3f), i=%d, (ref)0x%02x / (data)0x%02x / (diff)%d\n", max_error, i, reference[i], data[i], (unsigned int)diff); } #endif } } if (threshold == 0.0f) { if (error_count) { shrLog("\n Total # of errors = %d\n", error_count); } return (error_count == 0) ? shrTRUE : shrFALSE; } else { if (error_count) { shrLog("\n %.2f(%%) of bytes mismatched (count=%d)\n", (float)error_count*100/(float)len, error_count); } return ((len*threshold > error_count) ? shrTRUE : shrFALSE); } } //////////////////////////////////////////////////////////////////////////////// //! Compare two float arrays //! @return shrTRUE if \a reference and \a data are identical, otherwise shrFALSE //! @param reference handle to the reference data / gold image //! @param data handle to the computed data //! @param len number of elements in reference and data //////////////////////////////////////////////////////////////////////////////// shrBOOL shrComparef( const float* reference, const float* data, const unsigned int len ) { const float epsilon = 0.0; return compareData( reference, data, len, epsilon, 0.0f ); } //////////////////////////////////////////////////////////////////////////////// //! Compare two integer arrays //! @return shrTRUE if \a reference and \a data are identical, otherwise shrFALSE //! @param reference handle to the reference data / gold image //! @param data handle to the computed data //! @param len number of elements in reference and data //////////////////////////////////////////////////////////////////////////////// shrBOOL shrComparei( const int* reference, const int* data, const unsigned int len ) { const int epsilon = 0; return compareData( reference, data, len, epsilon, 0.0f); } //////////////////////////////////////////////////////////////////////////////// //! Compare two unsigned integer arrays, with epsilon and threshold //! @return shrTRUE if \a reference and \a data are identical, otherwise shrFALSE //! @param reference handle to the reference data / gold image //! @param data handle to the computed data //! @param len number of elements in reference and data //////////////////////////////////////////////////////////////////////////////// shrBOOL shrCompareuit( const unsigned int* reference, const unsigned int* data, const unsigned int len, const float epsilon, const float threshold ) { return compareDataAsFloatThreshold( reference, data, len, epsilon, threshold ); } //////////////////////////////////////////////////////////////////////////////// //! Compare two integer arrays //! @return shrTRUE if \a reference and \a data are identical, otherwise shrFALSE //! @param reference handle to the reference data / gold image //! @param data handle to the computed data //! @param len number of elements in reference and data //////////////////////////////////////////////////////////////////////////////// shrBOOL shrCompareub( const unsigned char* reference, const unsigned char* data, const unsigned int len ) { const int epsilon = 0; return compareData( reference, data, len, epsilon, 0.0f); } //////////////////////////////////////////////////////////////////////////////// //! Compare two integer arrays (inc Threshold for # of pixel we can have errors) //! @return shrTRUE if \a reference and \a data are identical, otherwise shrFALSE //! @param reference handle to the reference data / gold image //! @param data handle to the computed data //! @param len number of elements in reference and data //////////////////////////////////////////////////////////////////////////////// shrBOOL shrCompareubt( const unsigned char* reference, const unsigned char* data, const unsigned int len, const float epsilon, const float threshold ) { return compareDataAsFloatThreshold( reference, data, len, epsilon, threshold ); } //////////////////////////////////////////////////////////////////////////////// //! Compare two integer arrays //! @return shrTRUE if \a reference and \a data are identical, //! otherwise shrFALSE //! @param reference handle to the reference data / gold image //! @param data handle to the computed data //! @param len number of elements in reference and data //////////////////////////////////////////////////////////////////////////////// shrBOOL shrCompareube( const unsigned char* reference, const unsigned char* data, const unsigned int len, const float epsilon ) { return compareDataAsFloat( reference, data, len, epsilon ); } //////////////////////////////////////////////////////////////////////////////// //! Compare two float arrays with an epsilon tolerance for equality //! @return shrTRUE if \a reference and \a data are identical, otherwise shrFALSE //! @param reference handle to the reference data / gold image //! @param data handle to the computed data //! @param len number of elements in reference and data //! @param epsilon epsilon to use for the comparison //////////////////////////////////////////////////////////////////////////////// shrBOOL shrComparefe( const float* reference, const float* data, const unsigned int len, const float epsilon ) { return compareData( reference, data, len, epsilon, 0.0f); } //////////////////////////////////////////////////////////////////////////////// //! Compare two float arrays with an epsilon tolerance for equality and a //! threshold for # pixel errors //! @return shrTRUE if \a reference and \a data are identical, otherwise shrFALSE //! @param reference handle to the reference data / gold image //! @param data handle to the computed data //! @param len number of elements in reference and data //! @param epsilon epsilon to use for the comparison //////////////////////////////////////////////////////////////////////////////// shrBOOL shrComparefet( const float* reference, const float* data, const unsigned int len, const float epsilon, const float threshold ) { return compareDataAsFloatThreshold( reference, data, len, epsilon, threshold ); } //////////////////////////////////////////////////////////////////////////////// //! Compare two float arrays using L2-norm with an epsilon tolerance for equality //! @return shrTRUE if \a reference and \a data are identical, otherwise shrFALSE //! @param reference handle to the reference data / gold image //! @param data handle to the computed data //! @param len number of elements in reference and data //! @param epsilon epsilon to use for the comparison //////////////////////////////////////////////////////////////////////////////// shrBOOL shrCompareL2fe( const float* reference, const float* data, const unsigned int len, const float epsilon ) { ARGCHECK(epsilon >= 0); float error = 0; float ref = 0; for( unsigned int i = 0; i < len; ++i) { float diff = reference[i] - data[i]; error += diff * diff; ref += reference[i] * reference[i]; } float normRef = sqrtf(ref); if (fabs(ref) < 1e-7) { #ifdef _DEBUG std::cerr << "ERROR, reference l2-norm is 0\n"; #endif return shrFALSE; } float normError = sqrtf(error); error = normError / normRef; bool result = error < epsilon; #ifdef _DEBUG if( ! result) { std::cerr << "ERROR, l2-norm error " << error << " is greater than epsilon " << epsilon << "\n"; } #endif return result ? shrTRUE : shrFALSE; } //////////////////////////////////////////////////////////////////////////////// //! Compare two PPM image files with an epsilon tolerance for equality //! @return shrTRUE if \a reference and \a data are identical, otherwise shrFALSE //! @param src_file filename for the image to be compared //! @param data filename for the reference data / gold image //! @param epsilon epsilon to use for the comparison //! @param threshold threshold of pixels that can still mismatch to pass (i.e. 0.15f = 15% must pass) //! @param verboseErrors output details of image mismatch to std::cerr //////////////////////////////////////////////////////////////////////////////// shrBOOL shrComparePPM( const char *src_file, const char *ref_file, const float epsilon, const float threshold) { unsigned char* src_data = NULL; unsigned char* ref_data = NULL; unsigned long error_count = 0; unsigned int ref_width, ref_height; unsigned int src_width, src_height; // Check sample and reference file pointers if (src_file == NULL || ref_file == NULL) { shrLog("\n> shrComparePGM: src_file or ref_file is NULL\n Aborting comparison !!!\n\n"); return shrFALSE; } shrLog("\n> shrComparePPM:\n (a)rendered: <%s>\n (b)reference: <%s>\n", src_file, ref_file); // Load the ref image file if (shrLoadPPM4ub(ref_file, &ref_data, &ref_width, &ref_height) != shrTRUE) { shrLog("\n Unable to load ref image file: %s\n Aborting comparison !!!\n\n", ref_file); return shrFALSE; } // Load the sample image file if (shrLoadPPM4ub(src_file, &src_data, &src_width, &src_height) != shrTRUE) { shrLog("\n Unable to load src image file: %s\n Aborting comparison !!!\n\n", src_file); return shrFALSE; } // check to see if image dimensions match if(src_height != ref_height || src_width != ref_width) { shrLog("\n Source and ref size mismatch (%u x %u) vs (%u x %u)\n Aborting Comparison !!!\n\n ", src_width, src_height, ref_width, ref_height); return shrFALSE; } // compare the images if (shrCompareubt(ref_data, src_data, src_width*src_height*4, epsilon, threshold ) == shrFALSE) { error_count=1; } shrLog(" Images %s\n\n", (error_count == 0) ? "Match" : "Don't Match !!!"); return (error_count == 0) ? shrTRUE : shrFALSE; // returns true if all pixels pass } //////////////////////////////////////////////////////////////////////////////// //! Compare two PGM image files with an epsilon tolerance for equality //! @return shrTRUE if \a reference and \a data are identical, otherwise shrFALSE //! @param src_file filename for the image to be compared //! @param data filename for the reference data / gold image //! @param epsilon epsilon to use for the comparison //! @param threshold threshold of pixels that can still mismatch to pass (i.e. 0.15f = 15% must pass) //////////////////////////////////////////////////////////////////////////////// shrBOOL shrComparePGM( const char *src_file, const char *ref_file, const float epsilon, const float threshold) { unsigned char* src_data = NULL; unsigned char* ref_data = NULL; unsigned long error_count = 0; unsigned int ref_width, ref_height; unsigned int src_width, src_height; // Check sample and reference file pointers if (src_file == NULL || ref_file == NULL) { shrLog("\n> shrComparePGM: src_file or ref_file is NULL\n Aborting comparison !!!\n\n"); return shrFALSE; } shrLog("\n> shrComparePGM:\n (a)rendered: <%s>\n (b)reference: <%s>\n", src_file, ref_file); // Load the ref image file if (shrLoadPPM4ub(ref_file, &ref_data, &ref_width, &ref_height) != shrTRUE) { shrLog("\n Unable to load ref image file: %s\n Aborting comparison !!!\n\n", ref_file); return shrFALSE; } // Load the sample image file if (shrLoadPPM4ub(src_file, &src_data, &src_width, &src_height) != shrTRUE) { shrLog("\n Unable to load src image file: %s\n Aborting comparison !!!\n\n", src_file); return shrFALSE; } // check to see if image dimensions match if(src_height != ref_height || src_width != ref_width) { shrLog("\n Source and ref size mismatch (%u x %u) vs (%u x %u)\n Aborting Comparison !!!\n\n ", src_width, src_height, ref_width, ref_height); return shrFALSE; } // compare the images if (shrCompareubt(ref_data, src_data, src_width*src_height*4, epsilon, threshold ) == shrFALSE) { error_count=1; } shrLog(" Images %s\n\n", (error_count == 0) ? "Match" : "Don't Match !!!"); return (error_count == 0) ? shrTRUE : shrFALSE; // returns true if all pixels pass } // Load raw data from disk unsigned char* shrLoadRawFile(const char* filename, size_t size) { FILE *fp = NULL; #ifdef WIN32 errno_t err; if ((err = fopen_s(&fp, filename, "rb")) != 0) #else if ((fp = fopen(filename, "rb")) == NULL) #endif { shrLog(" Error opening file '%s' !!!\n", filename); return 0; } unsigned char* data = (unsigned char*)malloc(size); size_t read = fread(data, 1, size, fp); fclose(fp); shrLog(" Read '%s', %d bytes\n", filename, read); return data; } // Round Up Division function size_t shrRoundUp(int group_size, int global_size) { int r = global_size % group_size; if(r == 0) { return global_size; } else { return global_size + group_size - r; } }
the_stack
template <typename scalar_t> __global__ void FaceAreasNormalsForwardKernel( const scalar_t* __restrict__ verts, const int64_t* __restrict__ faces, scalar_t* __restrict__ face_areas, scalar_t* __restrict__ face_normals, const size_t V, const size_t F) { const size_t tid = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = gridDim.x * blockDim.x; // Faces split evenly over the number of threads in the grid. // Each thread computes the area & normal of its respective faces and adds it // to the global face_areas tensor. for (size_t f = tid; f < F; f += stride) { const int64_t i0 = faces[3 * f + 0]; const int64_t i1 = faces[3 * f + 1]; const int64_t i2 = faces[3 * f + 2]; const scalar_t v0_x = verts[3 * i0 + 0]; const scalar_t v0_y = verts[3 * i0 + 1]; const scalar_t v0_z = verts[3 * i0 + 2]; const scalar_t v1_x = verts[3 * i1 + 0]; const scalar_t v1_y = verts[3 * i1 + 1]; const scalar_t v1_z = verts[3 * i1 + 2]; const scalar_t v2_x = verts[3 * i2 + 0]; const scalar_t v2_y = verts[3 * i2 + 1]; const scalar_t v2_z = verts[3 * i2 + 2]; const scalar_t ax = v1_x - v0_x; const scalar_t ay = v1_y - v0_y; const scalar_t az = v1_z - v0_z; const scalar_t bx = v2_x - v0_x; const scalar_t by = v2_y - v0_y; const scalar_t bz = v2_z - v0_z; const scalar_t cx = ay * bz - az * by; const scalar_t cy = az * bx - ax * bz; const scalar_t cz = ax * by - ay * bx; scalar_t norm = sqrt(cx * cx + cy * cy + cz * cz); face_areas[f] = norm / 2.0; norm = (norm < 1e-6) ? 1e-6 : norm; // max(norm, 1e-6) face_normals[3 * f + 0] = cx / norm; face_normals[3 * f + 1] = cy / norm; face_normals[3 * f + 2] = cz / norm; } } // TODO(gkioxari) support all data types once AtomicAdd supports doubles. // Currently, support is for floats only. __global__ void FaceAreasNormalsBackwardKernel( const float* __restrict__ grad_areas, const float* __restrict__ grad_normals, const float* __restrict__ verts, const int64_t* __restrict__ faces, float* __restrict__ grad_verts, const size_t V, const size_t F) { const size_t tid = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = gridDim.x * blockDim.x; // Faces split evenly over the number of threads in the grid. // Each thread computes the area & normal of its respective faces and adds it // to the global face_areas tensor. for (size_t f = tid; f < F; f += stride) { const int64_t i0 = faces[3 * f + 0]; const int64_t i1 = faces[3 * f + 1]; const int64_t i2 = faces[3 * f + 2]; const float v0_x = verts[3 * i0 + 0]; const float v0_y = verts[3 * i0 + 1]; const float v0_z = verts[3 * i0 + 2]; const float v1_x = verts[3 * i1 + 0]; const float v1_y = verts[3 * i1 + 1]; const float v1_z = verts[3 * i1 + 2]; const float v2_x = verts[3 * i2 + 0]; const float v2_y = verts[3 * i2 + 1]; const float v2_z = verts[3 * i2 + 2]; const float ax = v1_x - v0_x; const float ay = v1_y - v0_y; const float az = v1_z - v0_z; const float bx = v2_x - v0_x; const float by = v2_y - v0_y; const float bz = v2_z - v0_z; const float cx = ay * bz - az * by; const float cy = az * bx - ax * bz; const float cz = ax * by - ay * bx; float norm = sqrt(cx * cx + cy * cy + cz * cz); norm = (norm < 1e-6) ? 1e-6 : norm; // max(norm, 1e-6) float inv_norm = 1. / norm; float inv_norm_2 = pow(inv_norm, 2.0f); float inv_norm_3 = pow(inv_norm, 3.0f); // We compute gradients with respect to the input vertices. // For each vertex, gradients come from grad_areas and grad_normals. // eg, grad_v0_x = (d / d v0_x) // = \sum_f (d / d areas[f]) * (d areas[f] / d v0_x) // + (d / d normals[f, 0]) * (d normals[f, 0] / d v0_x) // + (d / d normals[f, 1]) * (d normals[f, 1] / d v0_x) // + (d / d normals[f, 2]) * (d normals[f, 2] / d v0_x) // with (d / d areas[f]) = grad_areas[f] and // (d / d normals[f, j]) = grad_normals[f][j]. // The equations below are derived after taking // derivatives wrt to the vertices (fun times!). // grad v0 coming from grad areas and grad normals const float grad_v0_x = ((-az + bz) * cy + (-by + ay) * cz) / 2.0 * inv_norm * grad_areas[f] + -cx * ((-az + bz) * cy + (-by + ay) * cz) * inv_norm_3 * grad_normals[3 * f + 0] + ((-az + bz) - cy * ((-az + bz) * cy + (-by + ay) * cz) * inv_norm_2) * inv_norm * grad_normals[3 * f + 1] + ((-by + ay) - cz * ((-az + bz) * cy + (-by + ay) * cz) * inv_norm_2) * inv_norm * grad_normals[3 * f + 2]; atomicAdd(grad_verts + 3 * i0 + 0, grad_v0_x); const float grad_v0_y = ((-bz + az) * cx + (-ax + bx) * cz) / 2.0 * inv_norm * grad_areas[f] + ((-bz + az) - cx * ((-bz + az) * cx + (-ax + bx) * cz) * inv_norm_2) * inv_norm * grad_normals[3 * f + 0] + -cy * ((-bz + az) * cx + (-ax + bx) * cz) * inv_norm_3 * grad_normals[3 * f + 1] + ((-ax + bx) - cz * ((-bz + az) * cx + (-ax + bx) * cz) * inv_norm_2) * inv_norm * grad_normals[3 * f + 2]; atomicAdd(grad_verts + 3 * i0 + 1, grad_v0_y); const float grad_v0_z = ((-ay + by) * cx + (-bx + ax) * cy) / 2.0 * inv_norm * grad_areas[f] + ((-ay + by) - cx * ((-ay + by) * cx + (-bx + ax) * cy) * inv_norm_2) * inv_norm * grad_normals[3 * f + 0] + ((-bx + ax) - cy * ((-ay + by) * cx + (-bx + ax) * cy) * inv_norm_2) * inv_norm * grad_normals[3 * f + 1] + -cz * ((-ay + by) * cx + (-bx + ax) * cy) * inv_norm_3 * grad_normals[3 * f + 2]; atomicAdd(grad_verts + 3 * i0 + 2, grad_v0_z); // grad v1 coming from grad areas and grad normals const float grad_v1_x = (by * cz - bz * cy) / 2.0 * inv_norm * grad_areas[f] + -cx * (by * cz - bz * cy) * inv_norm_3 * grad_normals[3 * f + 0] + (-bz - cy * (by * cz - bz * cy) * inv_norm_2) * inv_norm * grad_normals[3 * f + 1] + (by - cz * (by * cz - bz * cy) * inv_norm_2) * inv_norm * grad_normals[3 * f + 2]; atomicAdd(grad_verts + 3 * i1 + 0, grad_v1_x); const float grad_v1_y = (bz * cx - bx * cz) / 2.0 * inv_norm * grad_areas[f] + (bz - cx * (bz * cx - bx * cz) * inv_norm_2) * inv_norm * grad_normals[3 * f + 0] + -cy * (bz * cx - bx * cz) * inv_norm_3 * grad_normals[3 * f + 1] + (-bx - cz * (bz * cx - bx * cz) * inv_norm_2) * inv_norm * grad_normals[3 * f + 2]; atomicAdd(grad_verts + 3 * i1 + 1, grad_v1_y); const float grad_v1_z = (bx * cy - by * cx) / 2.0 * inv_norm * grad_areas[f] + (-by - cx * (bx * cy - by * cx) * inv_norm_2) * inv_norm * grad_normals[3 * f + 0] + (bx - cx * (bx * cy - by * cx) * inv_norm_2) * inv_norm * grad_normals[3 * f + 1] + -cz * (bx * cy - by * cx) * inv_norm_3 * grad_normals[3 * f + 2]; atomicAdd(grad_verts + 3 * i1 + 2, grad_v1_z); // grad v2 coming from grad areas const float grad_v2_x = (az * cy - ay * cz) / 2.0 * inv_norm * grad_areas[f] + -cx * (az * cy - ay * cz) * inv_norm_3 * grad_normals[3 * f + 0] + (az - cy * (az * cy - ay * cz) * inv_norm_2) * inv_norm * grad_normals[3 * f + 1] + (-ay - cz * (az * cy - ay * cz) * inv_norm_2) * inv_norm * grad_normals[3 * f + 2]; atomicAdd(grad_verts + 3 * i2 + 0, grad_v2_x); const float grad_v2_y = (ax * cz - az * cx) / 2.0 * inv_norm * grad_areas[f] + (-az - cx * (ax * cz - az * cx) * inv_norm_2) * inv_norm * grad_normals[3 * f + 0] + -cy * (ax * cz - az * cx) * inv_norm_3 * grad_normals[3 * f + 1] + (ax - cz * (ax * cz - az * cx) * inv_norm_2) * inv_norm * grad_normals[3 * f + 2]; atomicAdd(grad_verts + 3 * i2 + 1, grad_v2_y); const float grad_v2_z = (ay * cx - ax * cy) / 2.0 * inv_norm * grad_areas[f] + (ay - cx * (ay * cx - ax * cy) * inv_norm_2) * inv_norm * grad_normals[3 * f + 0] + (-ax - cy * (ay * cx - ax * cy) * inv_norm_2) * inv_norm * grad_normals[3 * f + 1] + -cz * (ay * cx - ax * cy) * inv_norm_3 * grad_normals[3 * f + 2]; atomicAdd(grad_verts + 3 * i2 + 2, grad_v2_z); } } std::tuple<at::Tensor, at::Tensor> FaceAreasNormalsForwardCuda( const at::Tensor verts, const at::Tensor faces) { const auto V = verts.size(0); const auto F = faces.size(0); // Check inputs are on the same device at::TensorArg verts_t{verts, "verts", 1}, faces_t{faces, "faces", 2}; at::CheckedFrom c = "FaceAreasNormalsForwardCuda"; at::checkAllSameGPU(c, {verts_t, faces_t}); // Set the device for the kernel launch based on the device of verts at::cuda::CUDAGuard device_guard(verts.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); at::Tensor areas = at::empty({F}, verts.options()); at::Tensor normals = at::empty({F, 3}, verts.options()); if (areas.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(areas, normals); } const int blocks = 64; const int threads = 512; AT_DISPATCH_FLOATING_TYPES( verts.scalar_type(), "face_areas_normals_forward_cuda", ([&] { FaceAreasNormalsForwardKernel<scalar_t><<<blocks, threads, 0, stream>>>( verts.contiguous().data_ptr<scalar_t>(), faces.contiguous().data_ptr<int64_t>(), areas.data_ptr<scalar_t>(), normals.data_ptr<scalar_t>(), V, F); })); AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(areas, normals); } at::Tensor FaceAreasNormalsBackwardCuda( const at::Tensor grad_areas, const at::Tensor grad_normals, const at::Tensor verts, const at::Tensor faces) { // Check inputs are on the same device at::TensorArg verts_t{verts, "verts", 1}, faces_t{faces, "faces", 2}, grad_areas_t{grad_areas, "grad_areas", 3}, grad_normals_t{grad_normals, "grad_normals", 4}; at::CheckedFrom c = "FaceAreasNormalsBackwardCuda"; at::checkAllSameGPU(c, {verts_t, faces_t, grad_areas_t, grad_normals_t}); // Set the device for the kernel launch based on the device of verts at::cuda::CUDAGuard device_guard(verts.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); const auto V = verts.size(0); const auto F = faces.size(0); at::Tensor grad_verts = at::zeros({V, 3}, grad_areas.options()); if (grad_verts.numel() == 0) { AT_CUDA_CHECK(cudaGetLastError()); return grad_verts; } const int blocks = 64; const int threads = 512; // TODO(gkioxari) add AT_DISPATCH_FLOATING_TYPES once atomicAdd supports // doubles. Currently, support is for floats only. FaceAreasNormalsBackwardKernel<<<blocks, threads, 0, stream>>>( grad_areas.contiguous().data_ptr<float>(), grad_normals.contiguous().data_ptr<float>(), verts.contiguous().data_ptr<float>(), faces.contiguous().data_ptr<int64_t>(), grad_verts.data_ptr<float>(), V, F); AT_CUDA_CHECK(cudaGetLastError()); return grad_verts; }
the_stack
// #include <mem.h> #include "interp.h" #include "GSplat.h" #include "GSplat.cuh" #include <GMemOpers.h> // #include "GImageFieldOpers.h" // #include "GImageOpers.h" // #include "FOpers.h" #include "FiniteDiff.h" // TEST make sure boost isn't included in nvcc code #if defined(BOOST_COMPILER) int bla[-1]; #endif namespace PyCA { //////////////////////////////////////////////////////////////////////////// // compose a velocity and hfield to get an hfield // h(x) = g(x) + delta * v(g(x)) //////////////////////////////////////////////////////////////////////////// __device__ __constant__ float c_delta; __device__ __constant__ float c_trans[3]; template<bool fwd, BackgroundStrategy bg> __global__ void ComposeVH_kernel(float* d_hx, float* d_hy, float* d_hz, const float* d_vx, const float* d_vy, const float* d_vz, const float* d_gx, const float* d_gy, const float* d_gz, float delta, int w, int h, int l, float ispX, float ispY, float ispZ) { uint i = blockIdx.x * blockDim.x + threadIdx.x; uint j = blockIdx.y * blockDim.y + threadIdx.y; if (i < w && j < h){ int id = i + w * j; for (int k=0; k < l; ++k, id+=w*h){ float gx = d_gx[id]; float gy = d_gy[id]; float gz = d_gz[id]; float vgx, vgy, vgz; triLerp<bg>(vgx, vgy, vgz, d_vx, d_vy, d_vz, gx, gy, gz, w, h, l); if (fwd){ d_hx[id] = gx + delta * ispX * vgx ; d_hy[id] = gy + delta * ispY * vgy; d_hz[id] = gz + delta * ispZ * vgz; } else { d_hx[id] = gx - delta * ispX * vgx; d_hy[id] = gy - delta * ispY * vgy; d_hz[id] = gz - delta * ispZ * vgz; } } } } template<bool fwd, BackgroundStrategy bg> __global__ void ComposeVH_const_kernel(float* d_hx, float* d_hy, float* d_hz, const float* d_vx, const float* d_vy, const float* d_vz, const float* d_gx, const float* d_gy, const float* d_gz, int w, int h, int l, float ispX, float ispY, float ispZ) { uint i = blockIdx.x * blockDim.x + threadIdx.x; uint j = blockIdx.y * blockDim.y + threadIdx.y; float delta = c_delta; if (i < w && j < h){ int id = i + w * j; for (int k=0; k < l; ++k, id+=w*h){ float gx = d_gx[id]; float gy = d_gy[id]; float gz = d_gz[id]; float vgx, vgy, vgz; triLerp<bg>(vgx, vgy, vgz, d_vx, d_vy, d_vz, gx, gy, gz, w, h, l); if (fwd){ d_hx[id] = gx + delta * ispX * vgx ; d_hy[id] = gy + delta * ispY * vgy; d_hz[id] = gz + delta * ispZ * vgz; } else { d_hx[id] = gx - delta * ispX * vgx; d_hy[id] = gy - delta * ispY * vgy; d_hz[id] = gz - delta * ispZ * vgz; } } } } // what is this for?? jsp2014 template<bool fwd, BackgroundStrategy bg> __global__ void ComposeVH_kernel(float* d_hx, const float* d_vx, const float* d_gx, int nAlign, float delta, int w, int h, int l, float ispX, float ispY, float ispZ) { uint i = blockIdx.x * blockDim.x + threadIdx.x; uint j = blockIdx.y * blockDim.y + threadIdx.y; if (i < w && j < h){ int id = i + w * j; for (int k=0; k < l; ++k, id+=w*h){ float gx = d_gx[id ]; float gy = d_gx[id + nAlign ]; float gz = d_gx[id + 2 * nAlign]; float vgx, vgy, vgz; triLerp<bg>(vgx, vgy, vgz, d_vx, d_vx + nAlign, d_vx + 2 * nAlign, gx, gy, gz, w, h, l); if (fwd){ d_hx[id ] = gx + delta * ispX * vgx ; d_hx[id + nAlign ] = gy + delta * ispY * vgy; d_hx[id + 2* nAlign] = gz + delta * ispZ * vgz; } else { d_hx[id ] = gx - delta * ispX * vgx; d_hx[id + nAlign ] = gy - delta * ispY * vgy; d_hx[id + 2* nAlign] = gz - delta * ispZ * vgz; } } } } template<bool fwd, BackgroundStrategy bg> void ComposeVH(float* d_hx, float* d_hy, float* d_hz, const float* d_vx, const float* d_vy, const float* d_vz, const float* d_gx, const float* d_gy, const float* d_gz, const float& delta, int w, int h, int l, float spX, float spY, float spZ, StreamT stream, bool onDev) { MK_CHECK_VFIELD_BACKGROUND(bg); dim3 threads(16,16); dim3 grids(iDivUp(w, threads.x), iDivUp(h, threads.y)); if (onDev) { cudaMemcpyToSymbolAsync(c_delta, &delta,sizeof(float), 0, cudaMemcpyDeviceToDevice,stream); ComposeVH_const_kernel<fwd, bg><<<grids, threads, 0, stream>>> (d_hx, d_hy, d_hz, d_vx, d_vy, d_vz, d_gx, d_gy, d_gz, w, h, l, 1.f / spX, 1.f / spY, 1.f / spZ); } else { ComposeVH_kernel<fwd, bg><<<grids, threads, 0, stream>>> (d_hx, d_hy, d_hz, d_vx, d_vy, d_vz, d_gx, d_gy, d_gz, delta, w, h, l, 1.f / spX, 1.f / spY, 1.f / spZ); } } /** * Compose a h field and a velocify field to get an hfield * h(x) = g(x+ delta * v(x)) * * davisb 2007 */ template<bool fwd, BackgroundStrategy bg> __global__ void ComposeHV_kernel(float* d_hx, float* d_hy, float* d_hz, const float* d_gx, const float* d_gy, const float* d_gz, const float* d_vx, const float* d_vy, const float* d_vz, float delta, int w, int h, int l, float ispX, float ispY, float ispZ){ uint i = blockIdx.x * blockDim.x + threadIdx.x; uint j = blockIdx.y * blockDim.y + threadIdx.y; if (i < w && j < h){ int id = i + w * j; for (int k=0; k < l; ++k, id+=w*h){ float x,y,z; if (fwd){ x = i + d_vx[id] * ispX * delta; y = j + d_vy[id] * ispY * delta; z = k + d_vz[id] * ispZ * delta; } else { x = i - d_vx[id] * ispX * delta; y = j - d_vy[id] * ispY * delta; z = k - d_vz[id] * ispZ * delta; } float hx, hy, hz; triLerp<bg>(hx, hy, hz, d_gx, d_gy, d_gz, x, y, z, w, h, l); d_hx[id] = hx; d_hy[id] = hy; d_hz[id] = hz; } } } template<bool fwd, BackgroundStrategy bg> __global__ void ComposeHV_const_kernel( float* d_hx, float* d_hy, float* d_hz, const float* d_gx, const float* d_gy, const float* d_gz, const float* d_vx, const float* d_vy, const float* d_vz, int w, int h, int l, float ispX, float ispY, float ispZ) { uint i = blockIdx.x * blockDim.x + threadIdx.x; uint j = blockIdx.y * blockDim.y + threadIdx.y; if (i < w && j < h){ float delta = c_delta; int id = i + w * j; for (int k=0; k < l; ++k, id+=w*h){ float x,y,z; if (fwd){ x = i + d_vx[id] * ispX * delta; y = j + d_vy[id] * ispY * delta; z = k + d_vz[id] * ispZ * delta; } else { x = i - d_vx[id] * ispX * delta; y = j - d_vy[id] * ispY * delta; z = k - d_vz[id] * ispZ * delta; } float hx, hy, hz; triLerp<bg>(hx, hy, hz, d_gx, d_gy, d_gz, x, y, z, w, h, l); d_hx[id] = hx; d_hy[id] = hy; d_hz[id] = hz; } } } template<bool fwd, BackgroundStrategy bg> void ComposeHV(float* d_hx, float* d_hy, float* d_hz, const float* d_gx, const float* d_gy, const float* d_gz, const float* d_vx, const float* d_vy, const float* d_vz, const float& delta, int w, int h, int l, float spX, float spY, float spZ, StreamT stream, bool onDev) { MK_CHECK_HFIELD_BACKGROUND(bg); dim3 threads(16,16); dim3 grids(iDivUp(w, threads.x), iDivUp(h, threads.y)); if (onDev) { cudaMemcpyToSymbolAsync(c_delta, &delta,sizeof(float), 0, cudaMemcpyDeviceToDevice,stream); ComposeHV_const_kernel<fwd, bg><<<grids, threads,0,stream>>> (d_hx, d_hy, d_hz, d_gx, d_gy, d_gz, d_vx, d_vy, d_vz, w, h, l, 1.f / spX, 1.f / spY, 1.f / spZ); } else { ComposeHV_kernel<fwd, bg><<<grids, threads,0,stream>>> (d_hx, d_hy, d_hz, d_gx, d_gy, d_gz, d_vx, d_vy, d_vz, delta, w, h, l, 1.f / spX, 1.f / spY, 1.f / spZ); } } template<BackgroundStrategy bg> __global__ void ComposeTranslation_kernel( float* d_ox, float* d_oy, float* d_oz, const float* d_ix, const float* d_iy, const float* d_iz, const float tx, const float ty, const float tz, int sizeX, int sizeY, int sizeZ){ int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < sizeX && j < sizeY){ int id = j * sizeX + i; for (int k=0; k< sizeZ; ++k, id+= sizeX*sizeY){ float x = i + tx; float y = j + ty; float z = k + tz; float ox, oy, oz; triLerp<bg>(ox, oy, oz, d_ix, d_iy, d_iz, x, y, z, sizeX, sizeY, sizeZ); d_ox[id] = ox; d_oy[id] = oy; d_oz[id] = oz; } } } template<BackgroundStrategy bg> __global__ void ComposeTranslation_const_kernel( float* d_ox, float* d_oy, float* d_oz, const float* d_ix, const float* d_iy, const float* d_iz, int sizeX, int sizeY, int sizeZ){ int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < sizeX && j < sizeY){ int id = j * sizeX + i; for (int k=0; k< sizeZ; ++k, id+= sizeX*sizeY){ float x = i + c_trans[0]; float y = j + c_trans[1]; float z = k + c_trans[2]; float ox, oy, oz; triLerp<bg>(ox, oy, oz, d_ix, d_iy, d_iz, x, y, z, sizeX, sizeY, sizeZ); d_ox[id] = ox; d_oy[id] = oy; d_oz[id] = oz; } } } template<BackgroundStrategy bg> void ComposeTranslation(float *d_ox, float *d_oy, float *d_oz, const float *d_ix, const float *d_iy, const float *d_iz, const Vec3Di& sz, const Vec3Df& t, StreamT stream, bool onDev) { dim3 threads(16,16); dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y)); if (onDev) { cudaMemcpyToSymbolAsync(c_trans, &t.x,sizeof(float) * 3, 0, cudaMemcpyDeviceToDevice,stream); ComposeTranslation_const_kernel<bg><<<grids, threads, 0, stream>>> (d_ox, d_oy, d_oz, d_ix, d_iy, d_iz, sz.x, sz.y, sz.z); } else { ComposeTranslation_kernel<bg><<<grids, threads, 0, stream>>> (d_ox, d_oy, d_oz, d_ix, d_iy, d_iz, t.x, t.y, t.z, sz.x, sz.y, sz.z); } } template<BackgroundStrategy bg> __global__ void ApplyH_kernel(float* d_hx, float* d_hy, float* d_hz, const float* d_fx, const float* d_fy, const float* d_fz, const float* d_gx, const float* d_gy, const float* d_gz, int w, int h, int l){ uint i = blockIdx.x * blockDim.x + threadIdx.x; uint j = blockIdx.y * blockDim.y + threadIdx.y; if (i < w && j < h){ int id = i + w * j; for (int k=0; k < l; ++k, id+=w*h){ float x = d_gx[id]; float y = d_gy[id]; float z = d_gz[id]; float hx, hy, hz; triLerp<bg>(hx, hy, hz, d_fx, d_fy, d_fz, x, y, z, w, h, l); d_hx[id] = hx; d_hy[id] = hy; d_hz[id] = hz; } } } template<BackgroundStrategy bg> void ApplyH(float *d_ox, float *d_oy, float *d_oz, const float *d_ix, const float *d_iy, const float *d_iz, const float *d_hx, const float *d_hy, const float *d_hz, const Vec3Di &sz, StreamT stream) { dim3 threads(16,16); dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y)); ApplyH_kernel<bg><<<grids, threads, 0, stream>>>( d_ox, d_oy, d_oz, d_ix, d_iy, d_iz, d_hx, d_hy, d_hz, sz.x, sz.y, sz.z); } template<bool fwd, BackgroundStrategy bg> __global__ void ApplyV_kernel(float* d_hx, float* d_hy, float* d_hz, const float* d_fx, const float* d_fy, const float* d_fz, const float* d_ux, const float* d_uy, const float* d_uz, float delta, int w, int h, int l, float iSpX, float iSpY, float iSpZ){ uint i = blockIdx.x * blockDim.x + threadIdx.x; uint j = blockIdx.y * blockDim.y + threadIdx.y; if (i < w && j < h){ int id = i + w * j; for (int k=0; k < l; ++k, id+=w*h){ float x, y, z; if (fwd) { x = i + delta * iSpX * d_ux[id]; y = j + delta * iSpY * d_uy[id]; z = k + delta * iSpZ * d_uz[id]; } else { x = i - delta * iSpX * d_ux[id]; y = j - delta * iSpY * d_uy[id]; z = k - delta * iSpZ * d_uz[id]; } float hx, hy, hz; triLerp<bg>(hx, hy, hz, d_fx, d_fy, d_fz, x, y, z, w, h, l); d_hx[id] = hx; d_hy[id] = hy; d_hz[id] = hz; } } } template<bool fwd, BackgroundStrategy bg> __global__ void ApplyV_const_kernel(float* d_hx, float* d_hy, float* d_hz, const float* d_fx, const float* d_fy, const float* d_fz, const float* d_ux, const float* d_uy, const float* d_uz, int w, int h, int l, float iSpX, float iSpY, float iSpZ){ uint i = blockIdx.x * blockDim.x + threadIdx.x; uint j = blockIdx.y * blockDim.y + threadIdx.y; if (i < w && j < h){ float delta = c_delta; int id = i + w * j; for (int k=0; k < l; ++k, id+=w*h){ float x, y, z; if (fwd) { x = i + delta * iSpX * d_ux[id]; y = j + delta * iSpY * d_uy[id]; z = k + delta * iSpZ * d_uz[id]; } else { x = i - delta * iSpX * d_ux[id]; y = j - delta * iSpY * d_uy[id]; z = k - delta * iSpZ * d_uz[id]; } float hx, hy, hz; triLerp<bg>(hx, hy, hz, d_fx, d_fy, d_fz, x, y, z, w, h, l); d_hx[id] = hx; d_hy[id] = hy; d_hz[id] = hz; } } } template<bool fwd, BackgroundStrategy bg> void ApplyV(float *d_ox, float *d_oy, float *d_oz, const float *d_ix, const float *d_iy, const float *d_iz, const float *d_ux, const float *d_uy, const float *d_uz, const Vec3Di &sz, const Vec3Df &sp, const float& delta, StreamT stream, bool onDev) { Vec3Df iSp = Vec3Df(1.f / sp.x, 1.f / sp.y, 1.f / sp.z); dim3 threads(16,16); dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y)); if (onDev) { cudaMemcpyToSymbolAsync(c_delta, &delta,sizeof(float), 0, cudaMemcpyDeviceToDevice,stream); ApplyV_const_kernel<fwd, bg><<<grids, threads, 0, stream>>>( d_ox, d_oy, d_oz, d_ix, d_iy, d_iz, d_ux, d_uy, d_uz, sz.x, sz.y, sz.z, iSp.x, iSp.y, iSp.z); } else { ApplyV_kernel<fwd, bg><<<grids, threads, 0, stream>>>( d_ox, d_oy, d_oz, d_ix, d_iy, d_iz, d_ux, d_uy, d_uz, delta, sz.x, sz.y, sz.z, iSp.x, iSp.y, iSp.z); } } template<BackgroundStrategy bg, bool rescale> __global__ void Resampling_kernel(float* d_ox, float* d_oy, float* d_oz, const float* d_ix, const float* d_iy, const float* d_iz, int osizeX, int osizeY, int osizeZ, int isizeX, int isizeY, int isizeZ) { uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; float rX = (float)isizeX / (float)osizeX; float rY = (float)isizeY / (float)osizeY; float rZ = (float)isizeZ / (float)osizeZ; if (x < osizeX && y < osizeY){ int id = x + osizeX * y; float i_x = (rX - 1.f) / 2.f + x * rX; float i_y = (rY - 1.f) / 2.f + y * rY; for (int z=0; z < osizeZ; ++z, id+=osizeX * osizeY){ float i_z = (rZ - 1.f) / 2.f + z * rZ; float ox, oy, oz; triLerp<bg>(ox, oy, oz, d_ix, d_iy, d_iz, i_x, i_y, i_z, isizeX, isizeY, isizeZ); if (rescale){ ox /= rX; oy /= rY; oz /= rZ; } d_ox[id] = ox; d_oy[id] = oy; d_oz[id] = oz; } } } template<BackgroundStrategy bg, bool rescaleVector> void Resample(float *d_ox, float *d_oy, float *d_oz, const Vec3Di &oSz, const float *d_ix, const float *d_iy, const float *d_iz, const Vec3Di &iSz, StreamT stream) { dim3 threads(16,16); dim3 grids(iDivUp(oSz.x, threads.x), iDivUp(oSz.y, threads.y)); Resampling_kernel<bg, rescaleVector><<<grids, threads, 0, stream>>> (d_ox, d_oy, d_oz, d_ix, d_iy, d_iz, oSz.x, oSz.y, oSz.z, iSz.x, iSz.y, iSz.z); } __global__ void ReprojectToUnitVec_kernel (float* d_ox, float* d_oy, float *d_oz, int szX, int szY, int szZ) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < szX && j < szY){ int id = j * szX + i; for (int k=0; k< szZ; ++k, id+= szX*szY){ float vx = d_ox[id]; float vy = d_oy[id]; float vz = d_oz[id]; float l = sqrt(vx*vx+vy*vy+vz*vz); if(l>1.0){ d_ox[id] = vx/l; d_oy[id] = vy/l; d_oz[id] = vz/l; } } } } void ReprojectToUnitVec(float *d_ox, float *d_oy, float *d_oz, const Vec3Di &sz, StreamT st) { dim3 threads(16,16); dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y)); ReprojectToUnitVec_kernel<<<grids, threads, 0, st>>> (d_ox, d_oy, d_oz, sz.x, sz.y, sz.z); } __global__ void NormalizeSafe_kernel (float* d_ox, float* d_oy, float *d_oz, const float* d_ix, const float* d_iy, const float* d_iz, float eps, int szX, int szY, int szZ) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < szX && j < szY){ int id = j * szX + i; for (int k=0; k< szZ; ++k, id+= szX*szY){ float vx = d_ix[id]; float vy = d_iy[id]; float vz = d_iz[id]; float l = sqrt(vx*vx+vy*vy+vz*vz); if(l>eps){ d_ox[id] = vx/l; d_oy[id] = vy/l; d_oz[id] = vz/l; }else{ d_ox[id] = 0; d_oy[id] = 0; d_oz[id] = 0; } } } } void NormalizeSafe(float *d_ox, float *d_oy, float *d_oz, const float *d_ix, const float *d_iy, const float *d_iz, const Vec3Di &sz, const float& eps, StreamT st) { dim3 threads(16,16); dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y)); NormalizeSafe_kernel<<<grids, threads, 0, st>>> (d_ox, d_oy, d_oz, d_ix, d_iy, d_iz, eps, sz.x, sz.y, sz.z); } __global__ void Shrink_kernel (float* d_ox, float* d_oy, float *d_oz, const float* d_ix, const float* d_iy, const float* d_iz, float eps, int szX, int szY, int szZ) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < szX && j < szY){ int id = j * szX + i; for (int k=0; k< szZ; ++k, id+= szX*szY){ float vx = d_ix[id]; float vy = d_iy[id]; float vz = d_iz[id]; float l = sqrt(vx*vx+vy*vy+vz*vz); float shrink = (l-eps)/l; if(l>eps){ d_ox[id] = vx*shrink; d_oy[id] = vy*shrink; d_oz[id] = vz*shrink; }else{ d_ox[id] = 0; d_oy[id] = 0; d_oz[id] = 0; } } } } void Shrink(float *d_ox, float *d_oy, float *d_oz, const float *d_ix, const float *d_iy, const float *d_iz, const Vec3Di &sz, const float& eps, StreamT st) { dim3 threads(16,16); dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y)); Shrink_kernel<<<grids, threads, 0, st>>> (d_ox, d_oy, d_oz, d_ix, d_iy, d_iz, eps, sz.x, sz.y, sz.z); } template<BackgroundStrategy bg> __global__ void FixedPointInverse_kernel (float* d_ginvx, float* d_ginvy, float *d_ginvz, const float* d_gx, const float* d_gy, const float *d_gz, int szX, int szY, int szZ, unsigned int numIter) { // This does the fixed point iteration: // g^{-1}_{k+1}(x) = g^{-1}_{k}(x) + (x-g \circ g^{-1}_{k}(x)) int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < szX && y < szY) { int id = y * szX + x; float ghx, ghy, ghz; // interpolate into g at various places for (int z=0; z < szZ; ++z, id+= szX*szY) { float hx = d_ginvx[id]; // start with given initial estimate ginv(x) float hy = d_ginvy[id]; float hz = d_ginvz[id]; // this will be the output for (unsigned int iter=0;iter < numIter;++iter) { /*if (hx < 0 || hx > szX-1 ||*/ /*hy < 0 || hy > szY-1 ||*/ /*hz < 0 || hz > szZ-1)*/ /*break;*/ triLerp<bg>(ghx, ghy, ghz, d_gx, d_gy, d_gz, hx, hy, hz, szX, szY, szZ); hx += ((float)x - ghx); hy += ((float)y - ghy); hz += ((float)z - ghz); } // set output d_ginvx[id] = hx; d_ginvy[id] = hy; d_ginvz[id] = hz; } } } template<BackgroundStrategy bg> void FixedPointInverse(float *ginvx, float *ginvy, float *ginvz, const float *gx, const float *gy, const float *gz, const Vec3Di &sz, unsigned int numIter, StreamT stream, bool onDev) { dim3 threads(16,16); dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y)); FixedPointInverse_kernel<bg><<<grids, threads, 0, stream>>> (ginvx, ginvy, ginvz, gx, gy, gz, sz.x, sz.y, sz.z, numIter); } __global__ void updateInverseSubFromIndentity_kernel (float* d_hx, float* d_hy, float* d_hz, int w, int h, int l, float ispX, float ispY, float ispZ){ uint i = blockIdx.x * blockDim.x + threadIdx.x; uint j = blockIdx.y * blockDim.y + threadIdx.y; uint index = j * w + i; if (i < w && j < h){ for (int k=0; k<l; ++k, index+=w*h){ d_hx[index] = float(i) - ispX * d_hx[index]; d_hy[index] = float(j) - ispY * d_hy[index]; d_hz[index] = float(k) - ispZ * d_hz[index]; } } } void updateInverseSubFromIndentity(float *d_hx, float *d_hy, float *d_hz, const Vec3Di &sz, const Vec3Df &sp, StreamT stream) { Vec3Df iSp = Vec3Df(1.f / sp.x, 1.f / sp.y, 1.f / sp.z); dim3 threads(16,16); dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y)); updateInverseSubFromIndentity_kernel<<<grids, threads, 0, stream>>> (d_hx, d_hy, d_hz, sz.x, sz.y, sz.z, iSp.x, iSp.y, iSp.z); } // Lie algebra methods /* * Adjoint action of Diff on its Lie algebra * This is just the pushforward * Z = Ad_g X = |Dg|\circ g^{-1} X\circ g^{-1} */ template<BackgroundStrategy bg> __global__ void Ad_kernel (int* d_Zx, int* d_Zy, int *d_Zz, const float* d_gx, const float* d_gy, const float *d_gz, const float* d_Xx, const float* d_Xy, const float *d_Xz, float scalex, float scaley, float scalez, int szX, int szY, int szZ) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; float Jxx,Jxy,Jxz,Jyx,Jyy,Jyz,Jzx,Jzy,Jzz; float dgvx, dgvy, dgvz; if (i < szX && j < szY) { int id = j * szX + i; for (int k=0; k< szZ; ++k, id+= szX*szY) { // Get Jacobian matrix jacobianPoint<float,DIFF_CENTRAL,BC_CLAMP>(d_gx,d_gy,d_gz,i,j,k,Jxx,Jxy,Jxz,Jyx,Jyy,Jyz,Jzx,Jzy,Jzz,szX,szY,szZ); if (szZ == 1) { // Special case for flat images Jxz = Jyz = Jzx = Jzy = 0; Jzz = 1; } // Compute determinant float det = Jxx*(Jyy*Jzz-Jyz*Jzy) -Jxy*(Jyx*Jzz-Jyz*Jzx) +Jxz*(Jyx*Jzy-Jyy*Jzx); // Multiply by det*Dg X dgvx = det*(Jxx*d_Xx[id] + Jxy*d_Xy[id] + Jxz*d_Xz[id]); dgvy = det*(Jyx*d_Xx[id] + Jyy*d_Xy[id] + Jyz*d_Xz[id]); dgvz = det*(Jzx*d_Xx[id] + Jzy*d_Xy[id] + Jzz*d_Xz[id]); // Splat each component (non-normalized) Splatting::atomicSplat(d_Zx, d_Zy, d_Zz, dgvx, dgvy, dgvz, d_gx[id], d_gy[id], d_gz[id], szX, szY, szZ); } } } template<BackgroundStrategy bg> void Ad(float *Zx, float *Zy, float *Zz, const float *gx, const float *gy, const float *gz, const float *Xx, const float *Xy, const float *Xz, const Vec3Di &sz, const Vec3Df &sp, StreamT s,bool onDev) { size_t nVox = sz.x * sz.y * sz.z; dim3 threads(16,16); dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y)); int* i_Zx =(int*)Zx; int* i_Zy =(int*)Zy; int* i_Zz =(int*)Zz; GMemOpers<int>::SetMem(i_Zx, 0, nVox, s, false); GMemOpers<int>::SetMem(i_Zy, 0, nVox, s, false); GMemOpers<int>::SetMem(i_Zz, 0, nVox, s, false); Ad_kernel<bg><<<grids, threads, 0, s>>> (i_Zx, i_Zy, i_Zz, gx, gy, gz, Xx, Xy, Xz, 1.f/sp.x, 1.f/sp.y, 1.f/sp.z, // scaling for Jacobian sz.x, sz.y, sz.z); Splatting::FixedToFloating_I(Zx, Zy, Zz, nVox, s); } /* * infinitesimal adjoint action * Z = ad_X Y = DX Y - DY X */ __global__ void ad_kernel (float* d_Zx, float* d_Zy, float *d_Zz, const float* d_Xx, const float* d_Xy, const float *d_Xz, const float* d_Yx, const float* d_Yy, const float *d_Yz, float scalex, float scaley, float scalez, int szX, int szY, int szZ) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; float Jxx,Jxy,Jxz,Jyx,Jyy,Jyz,Jzx,Jzy,Jzz; if (i < szX && j < szY) { int id = j * szX + i; for (int k=0; k< szZ; ++k, id+= szX*szY) { // Get DX jacobianPoint<float,DIFF_CENTRAL,BC_CLAMP>(d_Xx,d_Xy,d_Xz,i,j,k,Jxx,Jxy,Jxz,Jyx,Jyy,Jyz,Jzx,Jzy,Jzz,szX,szY,szZ); // Start with DX Y d_Zx[id] = Jxx*d_Yx[id] + Jxy*d_Yy[id] + Jxz*d_Yz[id]; d_Zy[id] = Jyx*d_Yx[id] + Jyy*d_Yy[id] + Jyz*d_Yz[id]; d_Zz[id] = Jzx*d_Yx[id] + Jzy*d_Yy[id] + Jzz*d_Yz[id]; // Get DY jacobianPoint<float,DIFF_CENTRAL,BC_CLAMP>(d_Yx,d_Yy,d_Yz,i,j,k,Jxx,Jxy,Jxz,Jyx,Jyy,Jyz,Jzx,Jzy,Jzz,szX,szY,szZ); // Subtract DY X d_Zx[id] -= Jxx*d_Xx[id] + Jxy*d_Xy[id] + Jxz*d_Xz[id]; d_Zy[id] -= Jyx*d_Xx[id] + Jyy*d_Xy[id] + Jyz*d_Xz[id]; d_Zz[id] -= Jzx*d_Xx[id] + Jzy*d_Xy[id] + Jzz*d_Xz[id]; } } } void AdInf(float *Zx, float *Zy, float *Zz, const float *Xx, const float *Xy, const float *Xz, const float *Yx, const float *Yy, const float *Yz, const Vec3Di &sz, const Vec3Df &sp, StreamT s,bool onDev) { dim3 threads(16,16); dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y)); ad_kernel<<<grids, threads, 0, s>>> (Zx, Zy, Zz, Xx, Xy, Xz, Yx, Yy, Yz, 1.f/sp.x, 1.f/sp.y, 1.f/sp.z, // scaling for Jacobian sz.x, sz.y, sz.z); } /* * Jacobian X times Y * Z = DX Y */ __global__ void jacobianXY_kernel (float* d_Zx, float* d_Zy, float *d_Zz, const float* d_Xx, const float* d_Xy, const float *d_Xz, const float* d_Yx, const float* d_Yy, const float *d_Yz, float scalex, float scaley, float scalez, int szX, int szY, int szZ) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; float Jxx,Jxy,Jxz,Jyx,Jyy,Jyz,Jzx,Jzy,Jzz; if (i < szX && j < szY) { int id = j * szX + i; for (int k=0; k< szZ; ++k, id+= szX*szY) { // Get DX jacobianPoint<float,DIFF_CENTRAL,BC_CLAMP>(d_Xx,d_Xy,d_Xz,i,j,k,Jxx,Jxy,Jxz,Jyx,Jyy,Jyz,Jzx,Jzy,Jzz,szX,szY,szZ); // Compute DX Y d_Zx[id] = Jxx*d_Yx[id] + Jxy*d_Yy[id] + Jxz*d_Yz[id]; d_Zy[id] = Jyx*d_Yx[id] + Jyy*d_Yy[id] + Jyz*d_Yz[id]; d_Zz[id] = Jzx*d_Yx[id] + Jzy*d_Yy[id] + Jzz*d_Yz[id]; } } } void JacobianXY(float *Zx, float *Zy, float *Zz, const float *Xx, const float *Xy, const float *Xz, const float *Yx, const float *Yy, const float *Yz, const Vec3Di &sz, const Vec3Df &sp, StreamT s, bool onDev) { dim3 threads(16,16); dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y)); jacobianXY_kernel<<<grids, threads, 0, s>>> (Zx, Zy, Zz, Xx, Xy, Xz, Yx, Yy, Yz, 1.f/sp.x, 1.f/sp.y, 1.f/sp.z, // scaling for Jacobian sz.x, sz.y, sz.z); } /* * Jacobian X transpose times Y * Z = (DX)' Y */ __global__ void jacobianXtY_kernel (float* d_Zx, float* d_Zy, float *d_Zz, const float* d_Xx, const float* d_Xy, const float *d_Xz, const float* d_Yx, const float* d_Yy, const float *d_Yz, float scalex, float scaley, float scalez, int szX, int szY, int szZ) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; float Jxx,Jxy,Jxz,Jyx,Jyy,Jyz,Jzx,Jzy,Jzz; if (i < szX && j < szY) { int id = j * szX + i; for (int k=0; k< szZ; ++k, id+= szX*szY) { // Get DX jacobianPoint<float,DIFF_CENTRAL,BC_CLAMP>(d_Xx,d_Xy,d_Xz,i,j,k,Jxx,Jxy,Jxz,Jyx,Jyy,Jyz,Jzx,Jzy,Jzz,szX,szY,szZ); // Compute (DX)' Y d_Zx[id] = Jxx*d_Yx[id] + Jyx*d_Yy[id] + Jzx*d_Yz[id]; d_Zy[id] = Jxy*d_Yx[id] + Jyy*d_Yy[id] + Jzy*d_Yz[id]; d_Zz[id] = Jxz*d_Yx[id] + Jyz*d_Yy[id] + Jzz*d_Yz[id]; } } } void JacobianXtY(float *Zx, float *Zy, float *Zz, const float *Xx, const float *Xy, const float *Xz, const float *Yx, const float *Yy, const float *Yz, const Vec3Di &sz, const Vec3Df &sp, StreamT s, bool onDev) { dim3 threads(16,16); dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y)); jacobianXtY_kernel<<<grids, threads, 0, s>>> (Zx, Zy, Zz, Xx, Xy, Xz, Yx, Yy, Yz, 1.f/sp.x, 1.f/sp.y, 1.f/sp.z, // scaling for Jacobian sz.x, sz.y, sz.z); } /* * Coadjoint action of Diff on its Lie algebra * n = Ad_g^* m = (Dg)^T m\circ g |Dg| */ template<BackgroundStrategy bg> __global__ void CoAd_kernel (float* d_nx, float* d_ny, float *d_nz, const float* d_gx, const float* d_gy, const float *d_gz, const float* d_mx, const float* d_my, const float *d_mz, float scalex, float scaley, float scalez, int szX, int szY, int szZ) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; float Jxx,Jxy,Jxz,Jyx,Jyy,Jyz,Jzx,Jzy,Jzz; float mgx, mgy, mgz; if (i < szX && j < szY) { int id = j * szX + i; for (int k=0; k< szZ; ++k, id+= szX*szY) { float gx = d_gx[id]; float gy = d_gy[id]; float gz = d_gz[id]; // Get Jacobian matrix jacobianPoint<float,DIFF_CENTRAL,BC_CLAMP>(d_gx,d_gy,d_gz,i,j,k,Jxx,Jxy,Jxz,Jyx,Jyy,Jyz,Jzx,Jzy,Jzz,szX,szY,szZ); if (szZ == 1) { // Special case for flat images Jxz = Jyz = Jzx = Jzy = 0; Jzz = 1; } // Compute determinant float det = Jxx*(Jyy*Jzz-Jyz*Jzy) -Jxy*(Jyx*Jzz-Jyz*Jzx) +Jxz*(Jyx*Jzy-Jyy*Jzx); // Interpolate m triLerp<bg>(mgx, mgy, mgz, d_mx, d_my, d_mz, gx, gy, gz, szX, szY, szZ); // Multiply by det*Dg^T d_nx[id] = det*(Jxx*mgx + Jyx*mgy + Jzx*mgz); d_ny[id] = det*(Jxy*mgx + Jyy*mgy + Jzy*mgz); d_nz[id] = det*(Jxz*mgx + Jyz*mgy + Jzz*mgz); } } } template<BackgroundStrategy bg> void CoAd(float *nx, float *ny, float *nz, const float *gx, const float *gy, const float *gz, const float *mx, const float *my, const float *mz, const Vec3Di &sz, const Vec3Df &sp, StreamT s,bool onDev) { dim3 threads(16,16); dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y)); CoAd_kernel<bg><<<grids, threads, 0, s>>> (nx, ny, nz, gx, gy, gz, mx, my, mz, 1.f/sp.x, 1.f/sp.y, 1.f/sp.z, // scaling for Jacobian sz.x, sz.y, sz.z); } /* * infinitesimal coadjoint action * n = ad_X^* m = (DX)^T m + div(m \otimes X) */ __global__ void CoAdInf_kernel (float* d_nx, float* d_ny, float *d_nz, const float* d_Xx, const float* d_Xy, const float *d_Xz, const float* d_mx, const float* d_my, const float *d_mz, float scalex, float scaley, float scalez, int szX, int szY, int szZ) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; float Jxx,Jxy,Jxz,Jyx,Jyy,Jyz,Jzx,Jzy,Jzz; if (i < szX && j < szY) { int id = j * szX + i; for (int k=0; k< szZ; ++k, id+= szX*szY) { // Start with the tensor product divergence piece divtensorprodPoint<float,DIFF_CENTRAL,BC_CLAMP>(d_mx,d_my,d_mz,d_Xx,d_Xy,d_Xz,i,j,k,d_nx[id],d_ny[id],d_nz[id],szX,szY,szZ); // Get DX jacobianPoint<float,DIFF_CENTRAL,BC_CLAMP>(d_Xx,d_Xy,d_Xz,i,j,k,Jxx,Jxy,Jxz,Jyx,Jyy,Jyz,Jzx,Jzy,Jzz,szX,szY,szZ); // Add the DX^T m term d_nx[id] += Jxx*d_mx[id] + Jyx*d_my[id] + Jzx*d_mz[id]; d_ny[id] += Jxy*d_mx[id] + Jyy*d_my[id] + Jzy*d_mz[id]; d_nz[id] += Jxz*d_mx[id] + Jyz*d_my[id] + Jzz*d_mz[id]; } } } void CoAdInf(float *nx, float *ny, float *nz, const float *Xx, const float *Xy, const float *Xz, const float *mx, const float *my, const float *mz, const Vec3Di &sz, const Vec3Df &sp, StreamT s,bool onDev) { dim3 threads(16,16); dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y)); CoAdInf_kernel<<<grids, threads, 0, s>>> (nx, ny, nz, Xx, Xy, Xz, mx, my, mz, 1.f/sp.x, 1.f/sp.y, 1.f/sp.z, // scaling for Jacobian sz.x, sz.y, sz.z); } /* * computes tensor divergence of outer product of two vector fields * Z = div(X \otimes Y) */ __global__ void DivergenceTensor_kernel (float* d_Zx, float* d_Zy, float *d_Zz, const float* d_Xx, const float* d_Xy, const float *d_Xz, const float* d_Yx, const float* d_Yy, const float *d_Yz, int szX, int szY, int szZ) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < szX && j < szY) { int id = j * szX + i; for (int k=0; k< szZ; ++k, id+= szX*szY) { // The tensor product divergence divtensorprodPoint<float,DIFF_CENTRAL,BC_CLAMP>(d_Xx,d_Xy,d_Xz,d_Yx,d_Yy,d_Yz,i,j,k,d_Zx[id],d_Zy[id],d_Zz[id],szX,szY,szZ); } } } void DivergenceTensor(float *Zx, float *Zy, float *Zz, const float *Xx, const float *Xy, const float *Xz, const float *Yx, const float *Yy, const float *Yz, const Vec3Di &sz, const Vec3Df &sp, StreamT s,bool onDev) { dim3 threads(16,16); dim3 grids(iDivUp(sz.x, threads.x), iDivUp(sz.y, threads.y)); DivergenceTensor_kernel<<<grids, threads, 0, s>>> (Zx, Zy, Zz, Xx, Xy, Xz, Yx, Yy, Yz, sz.x, sz.y, sz.z); } //Instantiation #include "GFieldOperKernels_inst.cxx" } // end namespace PyCA
the_stack
#include <cudatbx/cuda_utilities.cuh> #include <cudatbx/math/reduction.cuh> #include <cudatbx/scattering/direct_summation.h> #include <cudatbx/scattering/form_factors.cuh> /* ============================================================================ Changing fType will affect the number of values that can be stored in shared and constant memory. It will also affect the sincos and exp functions used in the kernel since there are only hardware intrinsic functions for single-precision. */ const int threads_per_block = 1024; // threads/block for GPU const int padding = 128 / sizeof(fType); // padding for data arrays /* ============================================================================ */ namespace cudatbx { namespace scattering { // constants __device__ __constant__ fType two_pi = fType(2.0)*CUDART_PI_F; const int padded_size = 16; __device__ __constant__ int d_padded_size = padded_size; /* ========================================================================== Memory properties for C2070, compute capability 2.0 (CUDA C Programming Guide v 4.0, Appendix F) --------------------------------------------------- registers - register, r/w, "fast", per-multiprocessor 32K 4B registers local memory - r/w, "slow", per-thread 512 KB __shared__ - shared memory, r/w, "fast", block-wide 48 KB __device__ - global memory, r/w, "slow", grid-wide, 6 GB __constant__ - constant memory, r, "fast", grid-wide 64 KB Shared memory is broken up into 32 banks and is interleaved into 32-bit words (4 bytes). For example, an array of length 64 containing single precision values will have elements 0 and 32 in the same bank, 1 and 33 in the same bank, etc. To access shared memory with no conflicts (all threads get data with one read), each thread should read from a different bank, or have multiple threads read the same value in the same bank. In the previous example, having all threads access element 0 or having each thread read a different element between 0 and 31, inclusive, will only require one read. Accessing elements 0 and 32 will require two reads. Appendix F.4 describes the memory properties for compute capability 2.0 devices in more detail and has figures for efficient memory access patterns. Basic approach -------------- Each thread calculates the sum for one h, so each thread will independently loop over all atoms and put the sum into global memory All coordinates are loaded into global memory and then each thread copies sections into shared memory. The kernel loops over all sections to sum over all atoms. Rotation matrix/translation vectors pairs are also loaded and looped in the same manner. Form factors are stored in constant memory The thread index is checked against the length of the array multiple times because all threads are used for reading atom data from global, but only threads whose index is less than the array length are needed for summation. Additionaly, two __syncthreads() calls are required. The first is to make sure all the atom data is copied into shared memory before any summation is started, and the second is to make sure all the summation is finished before the atom data in shared memory is replaced with new data. Data format for kernel ---------------------- xyz = x_0 ... x_n y_0 ... y_n z_0 ... z_n solvent_weights = s_0 s_1 ... s_n h = h_0 ... h_n k_0 ... k_n l_0 ... l_n rt = r_00 ... r_08 t_00 ... t_02 ... r_10 ... t_n2 a = a_00 a_01 a_02 ... a_n3 a_n4 a_n5 b = "" c = c_0 c_1 ... c_n To facilitate coalesced reads from global memory, the data is grouped into sections. For example, for xyz, all the x's come first, then all the y's, and lastly, all the z's. When read from global memory, three coalesced reads will read in all the xyz's for a set of 32 atoms, one read from each section. The size of the shared arrays is equal to the number of threads so that all threads will attempt to read from global memory. There are checks against the actual length of available data. For the structue_factor_kernel, the general format of the loops is, ----------------------------- | x_0 | x_1 | x_2 | x_3 | ... xyz array in global memory ----------------------------- | | | | each thread stores one value into | | | | shared memory V V V V x[threadIdx.x] = xyz[current_atom]; ----------------------------- | x_0 | x_1 | x_2 | x_3 | ... x array in shared memory ----------------------------- | |-----|-----|-----| each thread reads one value V V V V x_a = x[a]; -------------------------------------------------------- |each thread calculates its own sum with its registers | -------------------------------------------------------- | | | | | | | | loop over all atoms V V V V ----------------------------- | r_0 | r_1 | r_2 | r_3 | ... each thread copies its sums into ----------------------------- the structure factor arrays in ----------------------------- global memory | i_0 | i_1 | i_2 | i_3 | ... ----------------------------- -------------------------------------------------------------------------- */ // kernel template <typename floatType> __global__ void structure_factor_kernel (const int* scattering_type, const floatType* xyz, const floatType* solvent_weights, const int n_xyz, const int padded_n_xyz, const floatType* h, const int n_h, const int padded_n_h, const floatType* rt, const int n_rt, floatType* sf_real, floatType* sf_imag) { int i = blockDim.x * blockIdx.x + threadIdx.x; floatType h_i, k_i, l_i, stol_sq; floatType f[max_types]; if (i < n_h) { // read h from global memory (stored in registers) h_i = h[ i]; k_i = h[ padded_n_h + i]; l_i = h[2*padded_n_h + i]; // calculate form factors (stored in local memory) // last form factor is always for boundary solvent layer stol_sq = floatType(0.25) * (h_i*h_i + k_i*k_i + l_i*l_i); for (int type=0; type<dc_n_types; type++) { f[type] = form_factor(type,stol_sq); } } // copy atoms into shared memory one chunk at a time and sum // all threads are used for reading data // shared arrays can be allocated at kernel invocation, but it requires // partitioning a big array (implement later) __shared__ floatType x[threads_per_block]; __shared__ floatType y[threads_per_block]; __shared__ floatType z[threads_per_block]; __shared__ floatType solvent[threads_per_block]; __shared__ int s_type[threads_per_block]; __shared__ floatType rot_trans[threads_per_block]; floatType real_sum = 0.0; floatType imag_sum = 0.0; floatType s,c,f1,f2,xx,yy,zz,x_a,y_a,z_a; int current_atom, current_rt, rt_offset; for (int atom=0; atom<n_xyz; atom += blockDim.x) { current_atom = atom + threadIdx.x; // coalesce reads using threads, but don't read past n_xyz // one read for each variable should fill chunk of 32 atoms // total length = # of threads/block if (current_atom < n_xyz) { x[threadIdx.x] = xyz[ current_atom]; y[threadIdx.x] = xyz[ padded_n_xyz + current_atom]; z[threadIdx.x] = xyz[2*padded_n_xyz + current_atom]; solvent[threadIdx.x] = solvent_weights[current_atom]; s_type[threadIdx.x] = scattering_type[current_atom]; } // loop over all rotation/translation operators // one coalesced read will copy (# of threads)/(padded_size) rot/trans // since the number of threads is a multiple of 32, it will also always // be evenly divisible by padded_size for (int rt_i=0; rt_i<n_rt; rt_i += blockDim.x/d_padded_size) { current_rt = rt_i*d_padded_size + threadIdx.x; if (current_rt < n_rt*d_padded_size) { rot_trans[threadIdx.x] = rt[current_rt]; } // wait for all data to be copied into shared memory __syncthreads(); // then sum over all the atoms that are now available to all threads if (i < n_h) { for (int r=0; r<blockDim.x/d_padded_size; r++) { current_rt = rt_i + r; // overall counter for rot/trans pairs if (current_rt < n_rt) { for (int a=0; a<blockDim.x; a++) { current_atom = atom + a; // overall counter for atom number if (current_atom < n_xyz) { x_a = x[a]; // transfer from shared memory to registers y_a = y[a]; // might not be necessary due to cache z_a = z[a]; rt_offset = r*d_padded_size; // apply rotation and translation by expanding Rx + t xx = (x_a*rot_trans[rt_offset ] + y_a*rot_trans[rt_offset + 1] + z_a*rot_trans[rt_offset + 2] + rot_trans[rt_offset + 9]); yy = (x_a*rot_trans[rt_offset + 3] + y_a*rot_trans[rt_offset + 4] + z_a*rot_trans[rt_offset + 5] + rot_trans[r*padded_size + 10]); zz = (x_a*rot_trans[rt_offset + 6] + y_a*rot_trans[rt_offset + 7] + z_a*rot_trans[rt_offset + 8] + rot_trans[rt_offset + 11]); __sincosf(two_pi*(xx * h_i + yy * k_i + zz * l_i),&s,&c); // bulk solvent correction in f // boundary layer solvent scale in solvent // structure factor = sum{(form factor)[exp(2pi h*x)]} if (dc_complex_form_factor) { // form factor = f1 + i f2 f1 = dc_a[s_type[a]*dc_n_terms] + solvent[a]*dc_a[(dc_n_types-1)*dc_n_terms]; f2 = dc_b[s_type[a]*dc_n_terms] + solvent[a]*dc_b[(dc_n_types-1)*dc_n_terms]; real_sum += f1*c - f2*s; imag_sum += f1*s + f2*c; } else { f1 = f[s_type[a]] + solvent[a]*f[dc_n_types-1]; real_sum += f1 * c; imag_sum += f1 * s; } } } } } } // wait before starting next chunk so data isn't changed for lagging threads __syncthreads(); } } // transfer result to global memory if (i < n_h) { sf_real[i] += real_sum; sf_imag[i] += imag_sum; } } /* ========================================================================== "Rapid and accurate calculation of small-angle scattering profiles using the golden ratio" Watson, MC, Curtis, JE. J. Appl. Cryst. (2013). 46, 1171-1177 Quadrature approach for calculating SAXS intensities -------------------------------------------------------------------------- */ template <typename floatType> __global__ void expand_q_lattice_kernel (const floatType* q, const int n_q, const floatType* lattice, const int n_lattice, const int padded_n_lattice, floatType* workspace, const int padded_n_workspace) { int i = blockDim.x * blockIdx.x + threadIdx.x; __shared__ floatType lx[threads_per_block]; __shared__ floatType ly[threads_per_block]; __shared__ floatType lz[threads_per_block]; int current_l; floatType q_j; if (i < n_lattice) { // copy lattice points into shared memory lx[threadIdx.x] = lattice[ i]; ly[threadIdx.x] = lattice[ padded_n_lattice + i]; lz[threadIdx.x] = lattice[2*padded_n_lattice + i]; __syncthreads(); // and expand for all q for (int j=0; j<n_q; j++) { current_l = j*n_lattice + i; q_j = q[j]; workspace[ current_l] = q_j * lx[threadIdx.x]; workspace[ padded_n_workspace + current_l] = q_j * ly[threadIdx.x]; workspace[2*padded_n_workspace + current_l] = q_j * lz[threadIdx.x]; } __syncthreads(); } } // -------------------------------------------------------------------------- template <typename floatType> __global__ void saxs_kernel (const int* scattering_type, const floatType* xyz, const floatType* solvent_weights, const int n_xyz, const int padded_n_xyz, const int n_q, const int n_lattice, floatType* workspace, const int padded_n_workspace) { int i = blockDim.x * blockIdx.x + threadIdx.x; // loop over all q and lattice points (more parallel than loop over q) floatType q_x,q_y,q_z,stol_sq; floatType f[max_types]; int n_total = n_q * n_lattice; floatType c = 0.0625/(CUDART_PI_F*CUDART_PI_F); if (i < n_total) { // read q from global memory (stored in registers) q_x = workspace[ i]; q_y = workspace[ padded_n_workspace + i]; q_z = workspace[2*padded_n_workspace + i]; // calculate form factors (stored in local memory) stol_sq = c * (q_x*q_x + q_y*q_y + q_z*q_z); for (int type=0; type<dc_n_types; type++) { f[type] = form_factor(type,stol_sq); } } // copy atoms into shared memory one chunk at a time and sum // all threads are used for reading data __shared__ floatType x[threads_per_block]; __shared__ floatType y[threads_per_block]; __shared__ floatType z[threads_per_block]; __shared__ floatType solvent[threads_per_block]; __shared__ int s_type[threads_per_block]; floatType real_sum = 0.0; floatType imag_sum = 0.0; floatType s,f1,f2; int current_atom; for (int atom=0; atom<n_xyz; atom += blockDim.x) { current_atom = atom + threadIdx.x; // coalesce reads using threads, but don't read past n_xyz // one read for each variable should fill chunk of 32 atoms // total length = # of threads/block if (current_atom < n_xyz) { x[threadIdx.x] = xyz[ current_atom]; y[threadIdx.x] = xyz[ padded_n_xyz + current_atom]; z[threadIdx.x] = xyz[2*padded_n_xyz + current_atom]; solvent[threadIdx.x] = solvent_weights[current_atom]; s_type[threadIdx.x] = scattering_type[current_atom]; } // wait for all data to be copied into shared memory __syncthreads(); // then sum over all the atoms that are now available to all threads if (i < n_total) { for (int a=0; a<blockDim.x; a++) { current_atom = atom + a; // overall counter for atom number if (current_atom < n_xyz) { __sincosf((x[a] * q_x + y[a] * q_y + z[a] * q_z),&s,&c); // structure factor = sum{(form factor)[exp(2pi h*x)]} if (dc_complex_form_factor) { // form factor = f1 + i f2 f1 = dc_a[s_type[a]*dc_n_terms] + solvent[a]*dc_a[(dc_n_types-1)*dc_n_terms]; f2 = dc_b[s_type[a]*dc_n_terms] + solvent[a]*dc_b[(dc_n_types-1)*dc_n_terms]; real_sum += f1*c - f2*s; imag_sum += f1*s + f2*c; } else { f1 = f[s_type[a]] + solvent[a]*f[dc_n_types-1]; real_sum += f1 * c; imag_sum += f1 * s; } } } } // wait before starting next chunk so data isn't changed for lagging threads __syncthreads(); } // transfer result to global memory if (i < n_total) { workspace[i] = real_sum * real_sum + imag_sum * imag_sum; } } // -------------------------------------------------------------------------- template <typename floatType> __global__ void solvent_saxs_kernel (const int* scattering_type, const floatType* xyz, const floatType* solvent_weights, const int n_xyz, const int padded_n_xyz, const int n_q, const int n_lattice, floatType* workspace, const int padded_n_workspace) { int i = blockDim.x * blockIdx.x + threadIdx.x; // loop over all q and lattice points (more parallel than loop over q) floatType q_x,q_y,q_z,stol_sq; floatType f[max_types]; int n_total = n_q * n_lattice; floatType c = 0.0625/(CUDART_PI_F*CUDART_PI_F); if (i < n_total) { // read q from global memory (stored in registers) q_x = workspace[ i]; q_y = workspace[ padded_n_workspace + i]; q_z = workspace[2*padded_n_workspace + i]; // calculate form factors (stored in local memory) stol_sq = c * (q_x*q_x + q_y*q_y + q_z*q_z); for (int type=0; type<dc_n_types; type++) { f[ type] = p_form_factor(type,stol_sq); f[dc_n_types + type] = x_form_factor(type,stol_sq); } } // copy atoms into shared memory one chunk at a time and sum // all threads are used for reading data __shared__ floatType x[threads_per_block]; __shared__ floatType y[threads_per_block]; __shared__ floatType z[threads_per_block]; __shared__ floatType solvent[threads_per_block]; __shared__ int s_type[threads_per_block]; floatType p_real = 0.0; floatType p_imag = 0.0; floatType x_real = 0.0; floatType x_imag = 0.0; floatType bl_real = 0.0; floatType bl_imag = 0.0; floatType s,f1,f2; int current_atom; for (int atom=0; atom<n_xyz; atom += blockDim.x) { current_atom = atom + threadIdx.x; // coalesce reads using threads, but don't read past n_xyz // one read for each variable should fill chunk of 32 atoms // total length = # of threads/block if (current_atom < n_xyz) { x[threadIdx.x] = xyz[ current_atom]; y[threadIdx.x] = xyz[ padded_n_xyz + current_atom]; z[threadIdx.x] = xyz[2*padded_n_xyz + current_atom]; solvent[threadIdx.x] = solvent_weights[current_atom]; s_type[threadIdx.x] = scattering_type[current_atom]; } // wait for all data to be copied into shared memory __syncthreads(); // then sum over all the atoms that are now available to all threads if (i < n_total) { for (int a=0; a<blockDim.x; a++) { current_atom = atom + a; // overall counter for atom number if (current_atom < n_xyz) { __sincosf((x[a] * q_x + y[a] * q_y + z[a] * q_z),&s,&c); // structure factor = sum{(form factor)[exp(2pi h*x)]} f1 = f[s_type[a]]; p_real += f1 * c; p_imag += f1 * s; f2 = f[dc_n_types + s_type[a]]; x_real += f2 * c; x_imag += f2 * s; f1 = solvent[a]*f[dc_n_types-1]; bl_real += f1 * c; bl_imag += f1 * s; } } } // wait before starting next chunk so data isn't changed for lagging threads __syncthreads(); } // transfer result to global memory if (i < n_total) { workspace[ padded_n_workspace + i] = p_real; workspace[2*padded_n_workspace + i] = p_imag; workspace[3*padded_n_workspace + i] = x_real; workspace[4*padded_n_workspace + i] = x_imag; workspace[5*padded_n_workspace + i] = bl_real; workspace[6*padded_n_workspace + i] = bl_imag; } } // -------------------------------------------------------------------------- template <typename floatType> __global__ void collect_solvent_saxs_kernel (const int n_q, const int n_lattice, const floatType c1, const floatType c2, floatType* workspace, const int padded_n_workspace) { int i = blockDim.x * blockIdx.x + threadIdx.x; floatType real_sum, imag_sum; if (i < n_q*n_lattice) { real_sum = workspace[ padded_n_workspace + i] + c1 * workspace[3*padded_n_workspace + i] + c2 * workspace[5*padded_n_workspace + i]; imag_sum = workspace[2*padded_n_workspace + i] + c1 * workspace[4*padded_n_workspace + i] + c2 * workspace[6*padded_n_workspace + i]; workspace[i] = real_sum * real_sum + imag_sum * imag_sum; } } /* ========================================================================== */ } } #endif // DIRECT_SUMMATION_CUH
the_stack
#include <cublas_v2.h> #include <thrust/device_ptr.h> #include <thrust/functional.h> #include <thrust/transform_reduce.h> #include "cml/cml_matrix.cuh" #include "cml/cml_utils.cuh" #include "cml/cml_vector.cuh" // Cuda Matrix Library namespace cml { namespace { cublasFillMode_t InvFillMode(cublasFillMode_t uplo) { return uplo == CUBLAS_FILL_MODE_LOWER ? CUBLAS_FILL_MODE_UPPER : CUBLAS_FILL_MODE_LOWER; } cublasOperation_t InvOp(cublasOperation_t trans) { return trans == CUBLAS_OP_N ? CUBLAS_OP_T : CUBLAS_OP_N; } } // namespace // // BLAS LEVEL 1 // // Nrm2. inline cublasStatus_t blas_nrm2(cublasHandle_t handle, const vector<double> *x, double *result) { cublasStatus_t err = cublasDnrm2(handle, static_cast<int>(x->size), x->data, static_cast<int>(x->stride), result); CublasCheckError(err); return err; } inline cublasStatus_t blas_nrm2(cublasHandle_t handle, const vector<float> *x, float *result) { cublasStatus_t err = cublasSnrm2(handle, static_cast<int>(x->size), x->data, static_cast<int>(x->stride), result); CublasCheckError(err); return err; } // Scal. inline cublasStatus_t blas_scal(cublasHandle_t handle, const double alpha, vector<double> *x) { cublasStatus_t err = cublasDscal(handle, static_cast<int>(x->size), &alpha, x->data, static_cast<int>(x->stride)); CublasCheckError(err); return err; } inline cublasStatus_t blas_scal(cublasHandle_t handle, const float alpha, vector<float> *x) { cublasStatus_t err = cublasSscal(handle, static_cast<int>(x->size), &alpha, x->data, static_cast<int>(x->stride)); CublasCheckError(err); return err; } inline cublasStatus_t blas_scal(cublasHandle_t handle, const double *alpha, vector<double> *x) { cublasStatus_t err = cublasDscal(handle, static_cast<int>(x->size), alpha, x->data, static_cast<int>(x->stride)); CublasCheckError(err); return err; } inline cublasStatus_t blas_scal(cublasHandle_t handle, const float *alpha, vector<float> *x) { cublasStatus_t err = cublasSscal(handle, static_cast<int>(x->size), alpha, x->data, static_cast<int>(x->stride)); CublasCheckError(err); return err; } // Asum. inline cublasStatus_t blas_asum(cublasHandle_t handle, const vector<double> *x, double *result) { cublasStatus_t err = cublasDasum(handle, x->size, x->data, x->stride, result); CublasCheckError(err); return err; } inline cublasStatus_t blas_asum(cublasHandle_t handle, const vector<float> *x, float *result) { cublasStatus_t err = cublasSasum(handle, x->size, x->data, x->stride, result); CublasCheckError(err); return err; } // Dot. inline cublasStatus_t blas_dot(cublasHandle_t handle, const vector<double> *x, const vector<double> *y, double *result) { cublasStatus_t err = cublasDdot(handle, static_cast<int>(x->size), x->data, static_cast<int>(x->stride), y->data, static_cast<int>(y->stride), result); CublasCheckError(err); return err; } inline cublasStatus_t blas_dot(cublasHandle_t handle, const vector<float> *x, const vector<float> *y, float *result) { cublasStatus_t err = cublasSdot(handle, static_cast<int>(x->size), x->data, static_cast<int>(x->stride), y->data, static_cast<int>(y->stride), result); CublasCheckError(err); return err; } // Axpy. inline cublasStatus_t blas_axpy(cublasHandle_t handle, double alpha, const vector<double> *x, vector<double> *y) { cublasStatus_t err = cublasDaxpy(handle, static_cast<int>(x->size), &alpha, x->data, static_cast<int>(x->stride), y->data, static_cast<int>(y->stride)); CublasCheckError(err); return err; } inline cublasStatus_t blas_axpy(cublasHandle_t handle, float alpha, const vector<float> *x, vector<float> *y) { cublasStatus_t err = cublasSaxpy(handle, static_cast<int>(x->size), &alpha, x->data, static_cast<int>(x->stride), y->data, static_cast<int>(y->stride)); CublasCheckError(err); return err; } // Asum template <typename T> T blas_asum(cublasHandle_t handle, const vector<T> *x) { T result; blas_asum(handle, x, &result); return result; } // Nrm2. template <typename T> struct Square : thrust::unary_function<T, T> { __device__ T operator()(const T &x) { return x * x; } }; template <typename T> T blas_nrm2(cublasHandle_t handle, const vector<T> *x) { strided_range<thrust::device_ptr<T> > strided_x( thrust::device_pointer_cast(x->data), thrust::device_pointer_cast(x->data + x->stride * x->size), x->stride); T nrm2 = sqrt(thrust::transform_reduce(strided_x.begin(), strided_x.end(), Square<T>(), static_cast<T>(0.0), thrust::plus<T>())); return nrm2; } // // BLAS LEVEL 2 // // Gemv. template <CBLAS_ORDER O> cublasStatus_t blas_gemv(cublasHandle_t handle, cublasOperation_t trans, double alpha, const matrix<double, O> *A, const vector<double> *x, double beta, vector<double> *y) { cublasStatus_t err; if (O == CblasColMajor) { err = cublasDgemv(handle, trans, static_cast<int>(A->size1), static_cast<int>(A->size2), &alpha, A->data, static_cast<int>(A->tda), x->data, static_cast<int>(x->stride), &beta, y->data, static_cast<int>(y->stride)); } else { trans = InvOp(trans); err = cublasDgemv(handle, trans, static_cast<int>(A->size2), static_cast<int>(A->size1), &alpha, A->data, static_cast<int>(A->tda), x->data, static_cast<int>(x->stride), &beta, y->data, static_cast<int>(y->stride)); } CublasCheckError(err); return err; } template <CBLAS_ORDER O> cublasStatus_t blas_gemv(cublasHandle_t handle, cublasOperation_t trans, float alpha, const matrix<float, O> *A, const vector<float> *x, float beta, vector<float> *y) { cublasStatus_t err; if (O == CblasColMajor) { err = cublasSgemv(handle, trans, static_cast<int>(A->size1), static_cast<int>(A->size2), &alpha, A->data, static_cast<int>(A->tda), x->data, static_cast<int>(x->stride), &beta, y->data, static_cast<int>(y->stride)); } else { trans = InvOp(trans); err = cublasSgemv(handle, trans, static_cast<int>(A->size2), static_cast<int>(A->size1), &alpha, A->data, static_cast<int>(A->tda), x->data, static_cast<int>(x->stride), &beta, y->data, static_cast<int>(y->stride)); } CublasCheckError(err); return err; } // Trsv. template <CBLAS_ORDER O> cublasStatus_t blas_trsv(cublasHandle_t handle, cublasFillMode_t uplo, cublasOperation_t trans, cublasDiagType_t diag, const matrix<double, O> *A, vector<double> *x) { if (O == CblasRowMajor) { uplo = InvFillMode(uplo); trans = InvOp(trans); } cublasStatus_t err = cublasDtrsv(handle, uplo, trans, diag, static_cast<int>(A->size1), A->data, static_cast<int>(A->tda), x->data, static_cast<int>(x->stride)); CublasCheckError(err); return err; } template <CBLAS_ORDER O> cublasStatus_t blas_trsv(cublasHandle_t handle, cublasFillMode_t uplo, cublasOperation_t trans, cublasDiagType_t diag, const matrix<float, O> *A, vector<float> *x) { if (O == CblasRowMajor) { uplo = InvFillMode(uplo); trans = InvOp(trans); } cublasStatus_t err = cublasStrsv(handle, uplo, trans, diag, static_cast<int>(A->size1), A->data, static_cast<int>(A->tda), x->data, static_cast<int>(x->stride)); CublasCheckError(err); return err; } // // BLAS LEVEL 3 // // Syrk. template <CBLAS_ORDER O> cublasStatus_t blas_syrk(cublasHandle_t handle, cublasFillMode_t uplo, cublasOperation_t trans, const float alpha, const matrix<float, O> *A, const float beta, matrix<float, O> *C) { int k = trans == CUBLAS_OP_N ? A->size2 : A->size1; if (O == CblasRowMajor) { uplo = InvFillMode(uplo); trans = InvOp(trans); } cublasStatus_t err = cublasSsyrk(handle, uplo, trans, static_cast<int>(C->size1), k, &alpha, A->data, static_cast<int>(A->tda), &beta, C->data, static_cast<int>(C->tda)); CublasCheckError(err); return err; } template <CBLAS_ORDER O> cublasStatus_t blas_syrk(cublasHandle_t handle, cublasFillMode_t uplo, cublasOperation_t trans, const double alpha, const matrix<double, O> *A, const double beta, matrix<double, O> *C) { int k = trans == CUBLAS_OP_N ? A->size2 : A->size1; if (O == CblasRowMajor) { uplo = InvFillMode(uplo); trans = InvOp(trans); } cublasStatus_t err = cublasDsyrk(handle, uplo, trans, static_cast<int>(C->size1), k, &alpha, A->data, static_cast<int>(A->tda), &beta, C->data, static_cast<int>(C->tda)); CublasCheckError(err); return err; } // Gemm. template <CBLAS_ORDER O> cublasStatus_t blas_gemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, const float alpha, const matrix<float, O> *A, const matrix<float, O> *B, const float beta, matrix<float, O> *C) { int k = transa == CUBLAS_OP_N ? A->size2 : A->size1; cublasStatus_t err; if (O == CblasColMajor) err = cublasSgemm(handle, transa, transb, static_cast<int>(C->size1), static_cast<int>(C->size2), k, &alpha, A->data, static_cast<int>(A->tda), B->data, static_cast<int>(B->tda), &beta, C->data, static_cast<int>(C->tda)); else err = cublasSgemm(handle, transb, transa, static_cast<int>(C->size2), static_cast<int>(C->size1), k, &alpha, B->data, static_cast<int>(B->tda), A->data, static_cast<int>(A->tda), &beta, C->data, static_cast<int>(C->tda)); CublasCheckError(err); return err; } template <CBLAS_ORDER O> cublasStatus_t blas_gemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, const double alpha, const matrix<double, O> *A, const matrix<double, O> *B, const double beta, matrix<double, O> *C) { int k = transa == CUBLAS_OP_N ? A->size2 : A->size1; cublasStatus_t err; if (O == CblasColMajor) err = cublasDgemm(handle, transa, transb, static_cast<int>(C->size1), static_cast<int>(C->size2), k, &alpha, A->data, static_cast<int>(A->tda), B->data, static_cast<int>(B->tda), &beta, C->data, static_cast<int>(C->tda)); else err = cublasDgemm(handle, transb, transa, static_cast<int>(C->size2), static_cast<int>(C->size1), k, &alpha, B->data, static_cast<int>(B->tda), A->data, static_cast<int>(A->tda), &beta, C->data, static_cast<int>(C->tda)); CublasCheckError(err); return err; } } // namespace cml #endif // CML_BLAS_CUH_
the_stack
#pragma once #include <iostream> #include <math.h> #include <gunrock/app/problem_base.cuh> namespace gunrock { namespace app { namespace pr_nibble { /** * @brief Speciflying parameters for pr_nibble Problem * @param parameters The util::Parameter<...> structure holding all parameter * info \return cudaError_t error message(s), if any */ cudaError_t UseParameters_problem(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(gunrock::app::UseParameters_problem(parameters)); return retval; } /** * @brief Template Problem structure. * @tparam _GraphT Type of the graph * @tparam _FLAG Problem flags */ template <typename _GraphT, ProblemFlag _FLAG = Problem_None> struct Problem : ProblemBase<_GraphT, _FLAG> { typedef _GraphT GraphT; static const ProblemFlag FLAG = _FLAG; typedef typename GraphT::VertexT VertexT; typedef typename GraphT::ValueT ValueT; typedef typename GraphT::SizeT SizeT; typedef typename GraphT::CsrT CsrT; typedef typename GraphT::GpT GpT; typedef ProblemBase<GraphT, FLAG> BaseProblem; typedef DataSliceBase<GraphT, FLAG> BaseDataSlice; // ---------------------------------------------------------------- // Dataslice structure /** * @brief Data structure containing problem specific data on indivual GPU. */ struct DataSlice : BaseDataSlice { util::Array1D<SizeT, ValueT> values; // Output values util::Array1D<SizeT, ValueT> grad; // Gradient values util::Array1D<SizeT, ValueT> y; // Intermediate quantity util::Array1D<SizeT, ValueT> z; // Intermediate quantity util::Array1D<SizeT, ValueT> q; // Truncated z-values util::Array1D<SizeT, int> touched; // Keep track of VertexT src; // Node to start local PR from VertexT src_neib; // Neighbor of reference node int num_ref_nodes; // Number of source nodes (hardcoded to 1 for now) int *d_grad_scale; // Gradient magnitude for convergence check int *h_grad_scale; // Gradient magnitude for convergence check ValueT *d_grad_scale_value; ValueT *h_grad_scale_value; ValueT eps; // Tolerance for convergence ValueT alpha; // Parameterizes conductance/size of output cluster ValueT rho; // Parameterizes conductance/size of output cluster int max_iter; // Maximum number of iterations /* * @brief Default constructor */ DataSlice() : BaseDataSlice() { values.SetName("values"); grad.SetName("grad"); q.SetName("q"); y.SetName("y"); z.SetName("z"); touched.SetName("touched"); } /* * @brief Default destructor */ virtual ~DataSlice() { Release(); } /* * @brief Releasing allocated memory space * @param[in] target The location to release memory from * \return cudaError_t Error message(s), if any */ cudaError_t Release(util::Location target = util::LOCATION_ALL) { cudaError_t retval = cudaSuccess; if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx)); GUARD_CU(values.Release(target)); GUARD_CU(grad.Release(target)); GUARD_CU(q.Release(target)); GUARD_CU(y.Release(target)); GUARD_CU(z.Release(target)); GUARD_CU(touched.Release(target)); GUARD_CU(BaseDataSlice::Release(target)); return retval; } /** * @brief initializing sssp-specific data on each gpu * @param sub_graph Sub graph on the GPU. * @param[in] gpu_idx GPU device index * @param[in] target Targeting device location * @param[in] flag Problem flag containling options * @param[in] _eps Convergence criteria * @param[in] _alpha * @param[in] _rho * @param[in] _max_iter Max number of iterations * \return cudaError_t Error message(s), if any */ cudaError_t Init(GraphT &sub_graph, int num_gpus, int gpu_idx, util::Location target, ProblemFlag flag, ValueT _eps, ValueT _alpha, ValueT _rho, int _max_iter) { cudaError_t retval = cudaSuccess; eps = _eps; alpha = _alpha; rho = _rho; max_iter = _max_iter; GUARD_CU(BaseDataSlice::Init(sub_graph, num_gpus, gpu_idx, target, flag)); GUARD_CU(values.Allocate(sub_graph.nodes, target)); GUARD_CU(grad.Allocate(sub_graph.nodes, target)); GUARD_CU(q.Allocate(sub_graph.nodes, target)); GUARD_CU(y.Allocate(sub_graph.nodes, target)); GUARD_CU(z.Allocate(sub_graph.nodes, target)); GUARD_CU(touched.Allocate(sub_graph.nodes, target)); h_grad_scale = (int *)malloc(sizeof(int) * 1); GUARD_CU(cudaMalloc((void **)&d_grad_scale, 1 * sizeof(int))); h_grad_scale_value = (ValueT *)malloc(sizeof(ValueT) * 1); GUARD_CU(cudaMalloc((void **)&d_grad_scale_value, 1 * sizeof(ValueT))); if (target & util::DEVICE) { GUARD_CU(sub_graph.CsrT::Move(util::HOST, target, this->stream)); } return retval; } /** * @brief Reset problem function. Must be called prior to each run. * @param[in] target Targeting device location * @param[in] _src Source node * @param[in] _src_neib Neighbor of source node (!! HACK) * @param[in] _num_ref_nodes Number of source nodes (HARDCODED to 1 * elsewhere) \return cudaError_t Error message(s), if any */ cudaError_t Reset(VertexT _src, VertexT _src_neib, int _num_ref_nodes, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; SizeT nodes = this->sub_graph->nodes; src = _src; src_neib = _src_neib; num_ref_nodes = _num_ref_nodes; // Ensure data are allocated GUARD_CU(values.EnsureSize_(nodes, target)); GUARD_CU(grad.EnsureSize_(nodes, target)); GUARD_CU(q.EnsureSize_(nodes, target)); GUARD_CU(y.EnsureSize_(nodes, target)); GUARD_CU(z.EnsureSize_(nodes, target)); GUARD_CU(touched.EnsureSize_(nodes, target)); // Reset data GUARD_CU( values.ForEach([] __host__ __device__(ValueT & x) { x = (ValueT)0; }, nodes, target, this->stream)); GUARD_CU( grad.ForEach([] __host__ __device__(ValueT & x) { x = (ValueT)0; }, nodes, target, this->stream)); GUARD_CU(q.ForEach([] __host__ __device__(ValueT & x) { x = (ValueT)0; }, nodes, target, this->stream)); GUARD_CU(y.ForEach([] __host__ __device__(ValueT & x) { x = (ValueT)0; }, nodes, target, this->stream)); GUARD_CU(z.ForEach([] __host__ __device__(ValueT & x) { x = (ValueT)0; }, nodes, target, this->stream)); GUARD_CU(touched.ForEach([] __host__ __device__(int &x) { x = 0; }, nodes, target, this->stream)); return retval; } }; // DataSlice // Problem attributes util::Array1D<SizeT, DataSlice> *data_slices; ValueT phi; ValueT vol; int max_iter; ValueT eps; ValueT alpha; ValueT rho; // ---------------------------------------------------------------- // Problem Methods /** * @brief pr_nibble default constructor */ Problem(util::Parameters &_parameters, ProblemFlag _flag = Problem_None) : BaseProblem(_parameters, _flag), data_slices(NULL) { // Load command line parameters phi = _parameters.Get<ValueT>("phi"); max_iter = _parameters.Get<int>("max-iter"); eps = _parameters.Get<ValueT>("eps"); vol = _parameters.Get<ValueT>("vol"); if (vol == 0.0) { vol = 1.0; } } /** * @brief pr_nibble default destructor */ virtual ~Problem() { Release(); } /* * @brief Releasing allocated memory space * @param[in] target The location to release memory from * \return cudaError_t Error message(s), if any */ cudaError_t Release(util::Location target = util::LOCATION_ALL) { cudaError_t retval = cudaSuccess; if (data_slices == NULL) return retval; for (int i = 0; i < this->num_gpus; i++) GUARD_CU(data_slices[i].Release(target)); if ((target & util::HOST) != 0 && data_slices[0].GetPointer(util::DEVICE) == NULL) { delete[] data_slices; data_slices = NULL; } GUARD_CU(BaseProblem::Release(target)); return retval; } /** * @brief Copy result distancess computed on GPUs back to host-side arrays. * @param[in] h_values Host array for PR values * \return cudaError_t Error message(s), if any */ cudaError_t Extract(ValueT *h_values, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; SizeT nodes = this->org_graph->nodes; ValueT *h_grad = new ValueT[nodes]; ValueT *h_y = new ValueT[nodes]; ValueT *h_z = new ValueT[nodes]; ValueT *h_q = new ValueT[nodes]; if (this->num_gpus == 1) { auto &data_slice = data_slices[0][0]; // Set device if (target == util::DEVICE) { GUARD_CU(util::SetDevice(this->gpu_idx[0])); GUARD_CU(data_slice.grad.SetPointer(h_grad, nodes, util::HOST)); GUARD_CU(data_slice.grad.Move(util::DEVICE, util::HOST)); GUARD_CU(data_slice.y.SetPointer(h_y, nodes, util::HOST)); GUARD_CU(data_slice.y.Move(util::DEVICE, util::HOST)); GUARD_CU(data_slice.z.SetPointer(h_z, nodes, util::HOST)); GUARD_CU(data_slice.z.Move(util::DEVICE, util::HOST)); GUARD_CU(data_slice.q.SetPointer(h_q, nodes, util::HOST)); GUARD_CU(data_slice.q.Move(util::DEVICE, util::HOST)); } else if (target == util::HOST) { GUARD_CU(data_slice.grad.ForEach( h_grad, [] __host__ __device__(const ValueT &device_val, ValueT &host_val) { host_val = device_val; }, nodes, util::HOST)); GUARD_CU(data_slice.y.ForEach( h_y, [] __host__ __device__(const ValueT &device_val, ValueT &host_val) { host_val = device_val; }, nodes, util::HOST)); GUARD_CU(data_slice.z.ForEach( h_z, [] __host__ __device__(const ValueT &device_val, ValueT &host_val) { host_val = device_val; }, nodes, util::HOST)); GUARD_CU(data_slice.q.ForEach( h_q, [] __host__ __device__(const ValueT &device_val, ValueT &host_val) { host_val = device_val; }, nodes, util::HOST)); } for (SizeT i = 0; i < nodes; ++i) { SizeT d = this->org_graph->GetNeighborListLength(i); double d_sqrt = sqrt((double)d); h_values[i] = abs(h_q[i] * d_sqrt); } } else { // num_gpus != 1 // ============ INCOMPLETE TEMPLATE - MULTIGPU ============ // // TODO: extract the results from multiple GPUs, e.g.: // // util::Array1D<SizeT, ValueT *> th_distances; // // th_distances.SetName("bfs::Problem::Extract::th_distances"); // // GUARD_CU(th_distances.Allocate(this->num_gpus, util::HOST)); // for (int gpu = 0; gpu < this->num_gpus; gpu++) // { // auto &data_slice = data_slices[gpu][0]; // if (target == util::DEVICE) // { // GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); // // GUARD_CU(data_slice.distances.Move(util::DEVICE, // util::HOST)); // } // // th_distances[gpu] = data_slice.distances.GetPointer(util::HOST); // } //end for(gpu) // for (VertexT v = 0; v < nodes; v++) // { // int gpu = this -> org_graph -> GpT::partition_table[v]; // VertexT v_ = v; // if ((GraphT::FLAG & gunrock::partitioner::Keep_Node_Num) != 0) // v_ = this -> org_graph -> GpT::convertion_table[v]; // // h_distances[v] = th_distances[gpu][v_]; // } // // GUARD_CU(th_distances.Release()); } return retval; } /** * @brief initialization function. * @param graph The graph that SSSP processes on * @param[in] Location Memory location to work on * \return cudaError_t Error message(s), if any */ cudaError_t Init(GraphT &graph, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; GUARD_CU(BaseProblem::Init(graph, target)); data_slices = new util::Array1D<SizeT, DataSlice>[this->num_gpus]; ValueT num_edges = (ValueT)graph.edges / 2.0; ValueT log_num_edges = log2(num_edges); // alpha this->alpha = pow(this->phi, 2) / (225.0 * log(100.0 * sqrt(num_edges))); // rho if (1.0f + log2((ValueT)this->vol) > log_num_edges) { this->rho = log_num_edges; } else { this->rho = 1.0f + log2((ValueT)this->vol); } this->rho = pow(2.0f, this->rho); this->rho = 1.0 / this->rho; this->rho *= 1.0 / (48.0 * log_num_edges); for (int gpu = 0; gpu < this->num_gpus; gpu++) { data_slices[gpu].SetName("data_slices[" + std::to_string(gpu) + "]"); if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); GUARD_CU(data_slices[gpu].Allocate(1, target | util::HOST)); auto &data_slice = data_slices[gpu][0]; GUARD_CU(data_slice.Init( this->sub_graphs[gpu], this->num_gpus, this->gpu_idx[gpu], target, this->flag, this->eps, this->alpha, this->rho, this->max_iter)); } return retval; } /** * @brief Reset problem function. Must be called prior to each run. * @param[in] src Source vertex * @param[in] src_neib Source vertex neighbor (!! HACK) * @param[in] location Memory location to work on * \return cudaError_t Error message(s), if any */ cudaError_t Reset(VertexT src, VertexT src_neib, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; int num_ref_nodes = 1; // Reset data slices for (int gpu = 0; gpu < this->num_gpus; ++gpu) { if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); GUARD_CU(data_slices[gpu]->Reset(src, src_neib, num_ref_nodes, target)); GUARD_CU(data_slices[gpu].Move(util::HOST, target)); } int gpu; VertexT src_; if (this->num_gpus <= 1) { gpu = 0; src_ = src; } else { // TODO -- MULTIGPU } if (target & util::DEVICE) { GUARD_CU(util::SetDevice(this->gpu_idx[gpu])); GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed"); } SizeT src_d = this->org_graph->GetNeighborListLength(src_); ValueT src_d_sqrt = sqrt((ValueT)src_d); ValueT src_dn_sqrt = 1.0 / src_d_sqrt; ValueT src_grad = -1.0 * this->alpha * src_dn_sqrt / (double)num_ref_nodes; ValueT thresh = this->rho * this->alpha * src_d_sqrt; if (-src_grad < thresh) { printf("pr_nibble::Problem: `-1 * src_grad < thresh` -> breaking"); return retval; } if (target & util::HOST) { data_slices[gpu]->grad[src_] = src_grad; } if (target & util::DEVICE) { GUARD_CU2( cudaMemcpy(data_slices[gpu]->grad.GetPointer(util::DEVICE) + src_, &src_grad, sizeof(ValueT), cudaMemcpyHostToDevice), "PRNibble cudaMemcpy distances failed"); GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed"); } GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed"); return retval; } }; } // namespace pr_nibble } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { __global__ void sync_conv_groups_highway() { } template <typename Dtype> __global__ void ReLUForward(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > Dtype(0) ? in[index] : Dtype(0); } } template <typename Dtype> __global__ void TanHForward(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = tanh(in[index]); } } template <typename Dtype> __global__ void SigmoidForward(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = 1. / (1. + exp(-in[index])); } } template <typename Dtype> void CuDNNHighwayLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); Dtype* cell_state_data = cell_states[i]->mutable_gpu_data(); Dtype* transform_gate_data = transform_gate_states[i]->mutable_gpu_data(); const Dtype* cell_weight = this->blobs_[0]->gpu_data(); const Dtype* transform_weight = this->blobs_[1]->gpu_data(); size_t workspace_limit_bytes = 2 * this->kernel_h_ * this->kernel_w_ * this->channels_ * sizeof(int) + 1; // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { // pick the convolution algorithm for the block state calculation // should be exposed in proto cudnnConvolutionFwdAlgo_t algo; CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(handle_[g], bottom_descs_[i], filter_desc_, conv_descs_[i], top_descs_[i], CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT, workspace_limit_bytes, // memoryLimitInBytes, &algo)); // pick the convolution algorithm for the transform gate calculation cudnnConvolutionFwdAlgo_t trans_algo; CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(handle_[g], bottom_descs_[i], trans_filter_desc_, trans_conv_descs_[i], top_descs_[i], CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT, workspace_limit_bytes, // memoryLimitInBytes, &trans_algo)); // get minimum size of the workspace needed for the desired algorithm size_t workspaceSizeInBytes_temp = 0; size_t trans_workspaceSizeInBytes_temp = 0; CUDNN_CHECK(cudnnGetConvolutionForwardWorkspaceSize(handle_[g], bottom_descs_[i], filter_desc_, conv_descs_[i], top_descs_[i], algo, &workspaceSizeInBytes_temp)); CUDNN_CHECK(cudnnGetConvolutionForwardWorkspaceSize(handle_[g], bottom_descs_[i], trans_filter_desc_, trans_conv_descs_[i], top_descs_[i], trans_algo, &trans_workspaceSizeInBytes_temp)); if ((workspaceSizeInBytes_temp + trans_workspaceSizeInBytes_temp) > workspaceSizeInBytes) { workspaceSizeInBytes = workspaceSizeInBytes_temp; // free the existing workspace and allocate a new (larger) one cudaFree(this->workspace); cudaError_t err = cudaMalloc(&(this->workspace), workspaceSizeInBytes); if (err != cudaSuccess) { // force zero memory path algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM; trans_algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM; workspace = NULL; workspaceSizeInBytes = 0; } } // Cells. CUDNN_CHECK(cudnnConvolutionForward(handle_[0*this->group_ + g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, filter_desc_, cell_weight + weight_offset_ * g, conv_descs_[i], algo, workspace, workspaceSizeInBytes, cudnn::dataType<Dtype>::zero, top_descs_[i], cell_state_data + top_offset_ * g)); // Transform gates. CUDNN_CHECK(cudnnConvolutionForward(handle_[1*this->group_ + g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, trans_filter_desc_, transform_weight + trans_weight_offset_ * g, trans_conv_descs_[i], trans_algo, workspace, workspaceSizeInBytes, cudnn::dataType<Dtype>::zero, top_descs_[i], transform_gate_data + top_offset_ * g)); // Bias. if (this->bias_term_) { const Dtype* cell_bias_data = this->blobs_[2]->gpu_data(); const Dtype* transform_bias_data = this->blobs_[3]->gpu_data(); CUDNN_CHECK(cudnnAddTensor(handle_[0*this->group_ + g], CUDNN_ADD_SAME_C, cudnn::dataType<Dtype>::one, bias_desc_, cell_bias_data + bias_offset_ * g, cudnn::dataType<Dtype>::one, top_descs_[i], cell_state_data + top_offset_ * g)); CUDNN_CHECK(cudnnAddTensor(handle_[1*this->group_ + g], CUDNN_ADD_SAME_C, cudnn::dataType<Dtype>::one, bias_desc_, transform_bias_data + bias_offset_ * g, cudnn::dataType<Dtype>::one, top_descs_[i], transform_gate_data + top_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) sync_conv_groups_highway<<<1, 1>>>(); const int count = bottom[i]->count(); // NOLINT_NEXT_LINE(whitespace/operators) ReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, cell_state_data, cell_state_data); CUDA_POST_KERNEL_CHECK; // NOLINT_NEXT_LINE(whitespace/operators) SigmoidForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, transform_gate_data, transform_gate_data); CUDA_POST_KERNEL_CHECK; // Finally, top = cell.input + bottom.(1 - input) caffe_gpu_gate_h_and_x_with_g(count, cell_state_data, bottom_data, transform_gate_data, top_data); CUDA_POST_KERNEL_CHECK; } } template <typename Dtype> __global__ void ReLUBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * (in_data[index] > Dtype(0)); } } template <typename Dtype> __global__ void TanHBackward(const int n, const Dtype* in_diff, const Dtype* out_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { Dtype tanhx = out_data[index]; out_diff[index] = in_diff[index] * (1 - tanhx * tanhx); } } template <typename Dtype> __global__ void SigmoidBackward(const int n, const Dtype* in_diff, const Dtype* out_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { const Dtype sigmoid_x = out_data[index]; out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x); } } template <typename Dtype> void CuDNNHighwayLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* cell_weight = NULL; Dtype* cell_weight_diff = NULL; const Dtype* transform_weight = NULL; Dtype* transform_weight_diff = NULL; if (this->param_propagate_down_[0]) { cell_weight_diff = this->blobs_[0]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), cell_weight_diff); transform_weight_diff = this->blobs_[1]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), transform_weight_diff); } Dtype* cell_bias_diff = NULL; Dtype* transform_bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { cell_bias_diff = this->blobs_[2]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[2]->count(), Dtype(0), cell_bias_diff); transform_bias_diff = this->blobs_[3]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[3]->count(), Dtype(0), transform_bias_diff); } for (int i = 0; i < top.size(); ++i) { const Dtype* cell_state_data = cell_states[i]->gpu_data(); const Dtype* transform_gate_data = transform_gate_states[i]->gpu_data(); Dtype* cell_state_diff = cell_states[i]->mutable_gpu_diff(); Dtype* transform_gate_diff = transform_gate_states[i]->mutable_gpu_diff(); const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); const Dtype* top_diff = top[i]->gpu_diff(); const int count = top[i]->count(); // NOLINT_NEXT_LINE(whitespace/operators) ReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, cell_state_data, cell_state_diff); CUDA_POST_KERNEL_CHECK; // NOLINT_NEXT_LINE(whitespace/operators) SigmoidBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, transform_gate_data, transform_gate_diff); CUDA_POST_KERNEL_CHECK; caffe_gpu_dot_with_diff(count, transform_gate_diff, cell_state_data, bottom_data, transform_gate_diff); caffe_gpu_elem_multiply(count, cell_state_diff, transform_gate_data, cell_state_diff); // Backward through cuDNN in parallel over groups and gradients. for (int g = 0; g < this->group_; g++) { // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], cell_state_diff + top_offset_ * g, cudnn::dataType<Dtype>::one, bias_desc_, cell_bias_diff + bias_offset_ * g)); CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[3*this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], transform_gate_diff + top_offset_ * g, cudnn::dataType<Dtype>::one, bias_desc_, transform_bias_diff + bias_offset_ * g)); } // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter(handle_[1*this->group_ + g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, top_descs_[i], cell_state_diff + top_offset_ * g, conv_descs_[i], cudnn::dataType<Dtype>::one, filter_desc_, cell_weight_diff + weight_offset_ * g)); CUDNN_CHECK(cudnnConvolutionBackwardFilter(handle_[4*this->group_ + g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, top_descs_[i], transform_gate_diff + top_offset_ * g, trans_conv_descs_[i], cudnn::dataType<Dtype>::one, trans_filter_desc_, transform_weight_diff + trans_weight_offset_ * g)); } // Gradient w.r.t. bottom data. if (propagate_down[i]) { if (cell_weight == NULL || transform_weight == NULL) { cell_weight = this->blobs_[0]->gpu_data(); transform_weight = this->blobs_[1]->gpu_data(); } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g], cudnn::dataType<Dtype>::one, filter_desc_, cell_weight + weight_offset_ * g, top_descs_[i], cell_state_diff + top_offset_ * g, conv_descs_[i], cudnn::dataType<Dtype>::zero, bottom_descs_[i], bottom_diff + bottom_offset_ * g)); CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g], cudnn::dataType<Dtype>::one, trans_filter_desc_, transform_weight + trans_weight_offset_ * g, top_descs_[i], transform_gate_diff + top_offset_ * g, trans_conv_descs_[i], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_diff + bottom_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) sync_conv_groups_highway<<<1, 1>>>(); caffe_gpu_dot_add_one_minus_b(count, top_diff, transform_gate_data, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNHighwayLayer); } // namespace caffe #endif
the_stack
#if defined(_WIN32) #include <stddef.h> #else #include <inttypes.h> #endif #define PAGE_SIZE 4096 // threshold to consider using pre-allocated pool #define PINNED_POOL_SIZE_THRESHOLD (100*1024*1024) // 8 MB for pool allocations on host & device #define PINNED_POOL_SIZE ( 100 * 1024 * 1024) // set that macro on if you want to see print info // #define AMGX_PRINT_MEMORY_INFO 1 // set that macro to print the call stack for each malloc/free (it's extensive). // #define AMGX_PRINT_MALLOC_CALL_STACK 1 // #define MULTIGPU 1 _thread_id getCurrentThreadId() { #ifdef WIN32 return GetCurrentThreadId(); #else return pthread_self(); #endif } namespace amgx { namespace memory { MemoryPool::MemoryPool(size_t max_block_size, size_t page_size, size_t max_size) : m_size(0) , m_max_size(max_size) , m_max_block_size(max_block_size) , m_page_size(page_size) , m_free_mem(0) , m_used_blocks() , m_free_blocks() , m_recently_merged(false) { //initializeCriticalSection(&m_mutex2); } MemoryPool::~MemoryPool() { #ifdef MULTIGPU int rank; MPI_Comm_rank( MPI_COMM_WORLD, &rank ); if (rank == 0) { #endif if ( !m_used_blocks.empty() ) { std::cerr << "!!! detected some memory leaks in the code: trying to free non-empty temporary device pool !!!" << std::endl; for ( MemoryBlockListIterator it = m_used_blocks.begin() ; it != m_used_blocks.end() ; ++it ) { std::cerr << "ptr: " << std::setw(18) << (void *) get_block_begin(it) << " size: " << get_block_size(it) << std::endl; } } //deleteCriticalSection(&m_mutex2); #ifdef MULTIGPU } #endif } void MemoryPool::add_memory(void *ptr, size_t size, bool managed) { if (m_max_size != 0 && managed && (size + m_size > m_max_size)) { FatalError("Memory pool limit is reached", AMGX_ERR_NO_MEMORY); } m_mutex2.lock(); m_owned_ptrs.push_back(MemoryBlock(ptr, size, true, managed)); char *aligned_ptr = (char *) ptr; if ( (size_t) aligned_ptr % m_page_size ) { aligned_ptr = (char *) ((((size_t) aligned_ptr + m_page_size - 1) / m_page_size) * m_page_size); } size_t free_size = size - (aligned_ptr - (char *) ptr); #ifdef AMGX_PRINT_MEMORY_INFO // std::cerr << "INFO: Adding memory block " << (void*) aligned_ptr << " " << free_size << std::endl; #endif m_free_blocks.push_back(MemoryBlock(aligned_ptr, free_size, true, managed)); m_size += free_size; m_free_mem += free_size; m_mutex2.unlock(); } void *MemoryPool::allocate(size_t size, size_t &allocated_size) { m_mutex2.lock(); void *ptr = NULL; // Fail if the size is 0. if ( size == 0 ) { FatalError("Allocating memory buffer of size 0!!!", AMGX_ERR_BAD_PARAMETERS); } // The memory size we are actually going to allocate. size_t aligned_size = m_page_size * ((size + m_page_size - 1) / m_page_size); // The chosen block (if any). MemoryBlockListIterator best_it = m_free_blocks.end(); // The best cost (wasted amount of memory). size_t best_cost = std::numeric_limits<size_t>::max(); // The address of the first correctly aligned region we're interested in. char *best_aligned_ptr = NULL; // Look for a large enough block. for ( MemoryBlockListIterator it = m_free_blocks.begin() ; it != m_free_blocks.end() ; ++it ) { #ifdef AMGX_PRINT_MEMORY_INFO #ifdef MULTIGPU int rank; MPI_Comm_rank( MPI_COMM_WORLD, &rank ); if (rank == 0) #endif { std::cerr << "INFO: [block " << std::setw(18) << (void *) get_block_begin(it) << " " << std::setw(12) << get_block_size(it) << std::endl; } #endif // Get an aligned pointer. char *aligned_ptr = get_block_begin(it); // Make sure alignments are fine. It shouldn't be needed but it's actually cheap to test. if ( (size_t) aligned_ptr & (m_page_size - 1) ) { FatalError("INTERNAL ERROR: Invalid alignment!!!", AMGX_ERR_UNKNOWN); } // If the pointer fits in that block, just keep it. if ( aligned_size > get_block_size(it) ) { continue; } // The cost. size_t cost = get_block_size(it) - aligned_size; // If the cost is better, keep it. if ( cost < best_cost ) { best_it = it; best_cost = cost; best_aligned_ptr = aligned_ptr; } } // No block found??? Fallback to regular malloc treated outside of this function. if ( best_it == m_free_blocks.end() ) { allocated_size = 0; m_mutex2.unlock(); return ptr; } // Our allocation starts at aligned_ptr. ptr = best_aligned_ptr; // Allocated size. allocated_size = aligned_size; // Store the used block. MemoryBlock used_block(best_aligned_ptr, aligned_size, is_block_first(best_it)); m_used_blocks.push_back(used_block); // Update statistics. m_free_mem -= aligned_size; // We store the pointer to the beginning of the block. char *block_begin = get_block_begin(best_it); // ... and its size. size_t block_size = get_block_size(best_it); // We use all the block. Simply remove it. if ( best_aligned_ptr == block_begin && aligned_size == block_size ) { m_free_blocks.erase(best_it); } else { set_block_begin(best_it, best_aligned_ptr + aligned_size); set_block_size (best_it, block_size - aligned_size); best_it->m_first = false; } m_mutex2.unlock(); // fallback to regular malloc treated outside of this function return ptr; } void MemoryPool::free(void *ptr, size_t &freed_size) { m_mutex2.lock(); // Find the element to remove. MemoryBlockListIterator it = m_used_blocks.begin(); for ( ; it != m_used_blocks.end() ; ++it ) if ( get_block_begin(it) == ptr ) { break; } // Sanity check. if ( it == m_used_blocks.end() ) { FatalError("INTERNAL ERROR: Invalid iterator!!!", AMGX_ERR_UNKNOWN); } // We keep the pointers sorted. So find where to insert the new block. MemoryBlockListIterator insert_it = m_free_blocks.begin(); for ( ; insert_it != m_free_blocks.end() ; ++insert_it ) { // Same pointer in used and free... That's surely a bug. if ( get_block_begin(insert_it) == get_block_begin(it) ) { FatalError("INTERNAL ERROR: Invalid memory block iterator!!! Free was called twice on same pointer.", AMGX_ERR_UNKNOWN); } if ( get_block_begin(insert_it) > get_block_begin(it) ) { break; } } m_free_blocks.insert(insert_it, *it); // We merge contiguous blocks. MemoryBlockListIterator first = m_free_blocks.begin(); MemoryBlockListIterator last = m_free_blocks.begin(); char *last_ptr = get_block_begin(first) + get_block_size(first); size_t merged_size = get_block_size(first); int num_merged_blocks = 0; for ( ++last ; last != m_free_blocks.end() ; ++last ) { if ( last_ptr != get_block_begin(last) || is_block_first(last) ) // We won't merge those two. { if ( num_merged_blocks != 0 ) // We have found the end of the block. { break; } // We have found nothing to merge... Shift the window. first = last; last_ptr = get_block_begin(first) + get_block_size(first); merged_size = get_block_size(first); } else { last_ptr = get_block_begin(last) + get_block_size(last); merged_size += get_block_size(last); num_merged_blocks++; } } #ifdef AMGX_PRINT_MEMORY_INFO #ifdef MULTIGPU int rank; MPI_Comm_rank( MPI_COMM_WORLD, &rank ); if (rank == 0) #endif { std::cerr << "INFO: Merging " << num_merged_blocks << " blocks" << std::endl; } #endif if ( num_merged_blocks != 0 ) // Do the merge. { set_block_size(first, merged_size); first++; m_free_blocks.erase(first, last); } // Remove the used block and update statistics. m_free_mem += get_block_size(it); m_used_blocks.erase(it); //m_recently_merged = true; m_mutex2.unlock(); } void MemoryPool::free_all() { m_mutex2.lock(); m_used_blocks.clear(); m_free_blocks.clear(); std::vector<MemoryBlock> owned_ptrs = m_owned_ptrs; m_owned_ptrs.clear(); for ( size_t i = 0 ; i < owned_ptrs.size() ; ++i ) { add_memory(owned_ptrs[i].m_begin, owned_ptrs[i].m_size, owned_ptrs[i].m_managed); } m_free_mem = m_size; m_mutex2.unlock(); } bool MemoryPool::is_allocated(void *ptr) { m_mutex2.lock(); for ( MemoryBlockListConstIterator it = m_used_blocks.begin() ; it != m_used_blocks.end() ; ++it ) if ( it->m_begin == ptr ) { m_mutex2.unlock(); return true; } m_mutex2.unlock(); return false; } PinnedMemoryPool::PinnedMemoryPool() : MemoryPool(PINNED_POOL_SIZE_THRESHOLD, 4096, 0) { void *ptr = NULL; ::cudaMallocHost(&ptr, PINNED_POOL_SIZE); if ( ptr == NULL ) { FatalError("Cannot allocate pinned memory", AMGX_ERR_NO_MEMORY); } add_memory(ptr, PINNED_POOL_SIZE); } PinnedMemoryPool::~PinnedMemoryPool() { for ( size_t i = 0 ; i < m_owned_ptrs.size() ; ++i ) if (m_owned_ptrs[i].m_managed) { ::cudaFreeHost(m_owned_ptrs[i].m_begin); } m_owned_ptrs.clear(); } DeviceMemoryPool::DeviceMemoryPool(size_t size, size_t max_block_size, size_t max_size) : MemoryPool(max_block_size, 4096, max_size) { if (max_size > 0 && size > max_size) { FatalError("Initial size for the memory pool specified is more than memory limit", AMGX_ERR_NO_MEMORY); } void *ptr = NULL; ::cudaMalloc(&ptr, size); if ( ptr == NULL ) { FatalError("Cannot allocate device memory", AMGX_ERR_NO_MEMORY); } add_memory(ptr, size); } void DeviceMemoryPool::expandPool(size_t size, size_t max_block_size) { if (this->m_max_size > 0 && (size + this->m_size) > this->m_max_size) { FatalError("Pool memory size is exceeded.", AMGX_ERR_NO_MEMORY); } void *ptr = NULL; ::cudaMalloc(&ptr, size); if ( ptr == NULL ) { FatalError("Cannot allocate device memory", AMGX_ERR_NO_MEMORY); } add_memory(ptr, size); } DeviceMemoryPool::~DeviceMemoryPool() { for ( size_t i = 0 ; i < m_owned_ptrs.size() ; ++i ) if (m_owned_ptrs[i].m_managed) { ::cudaFree(m_owned_ptrs[i].m_begin); } m_owned_ptrs.clear(); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////// struct MemoryManager { // Get the global instance. static MemoryManager &get_instance() { static MemoryManager s_instance; return s_instance; } // Ctor. MemoryManager() : m_main_pinned_pool(NULL) , m_main_device_pool(NULL) , m_use_async_free(false) , m_use_device_pool(false) , m_alloc_scaling_factor(0) , m_alloc_scaling_threshold(16 * 1024 * 1024) { //initializeCriticalSection(&m_mutex); } // Dtor. ~MemoryManager() { //deleteCriticalSection(&m_mutex); } // Synchronize a device pool. void sync_pinned_pool(PinnedMemoryPool *pool); void sync_device_pool(DeviceMemoryPool *pool); // Scale a memory size. size_t scale(size_t size) const { size_t new_size = size; if ( size >= m_alloc_scaling_threshold ) { new_size += m_alloc_scaling_factor * (size / 100); } return new_size; } // Mutex to make functions thread-safe. std::recursive_mutex m_mutex; // Streams. typedef std::map<_thread_id, cudaStream_t> StreamMap; StreamMap m_thread_stream; cudaStream_t m_main_stream; // Items to free (async free). // typedef std::map<_thread_id, std::vector<void*> > AsyncFreeMap; // AsyncFreeMap m_thread_free; // std::vector<void*> m_main_free; // Pinned pools. typedef std::map<_thread_id, PinnedMemoryPool *> PinnedPoolMap; PinnedPoolMap m_thread_pinned_pools; PinnedMemoryPool *m_main_pinned_pool; // Device pools. typedef std::map<_thread_id, DeviceMemoryPool *> DevicePoolMap; DevicePoolMap m_thread_device_pools; DeviceMemoryPool *m_main_device_pool; // Registered memory blocks. typedef std::vector<std::pair<void *, void *> > RegisteredBlocks; typedef std::map<_thread_id, RegisteredBlocks> RegisteredBlocksMap; RegisteredBlocksMap m_thread_registered; RegisteredBlocks m_main_registered; // We keep a list of allocations that go through cudaMalloc. typedef std::map<void *, size_t> MemoryBlockMap; MemoryBlockMap m_allocated_blocks; // whether we want to use async free/wait or regular free. bool m_use_async_free; // whether we want to use device pool or simply do regular malloc. bool m_use_device_pool; // Scaling factor. size_t m_alloc_scaling_factor; // Scaling threshold. size_t m_alloc_scaling_threshold; }; void MemoryManager::sync_pinned_pool(PinnedMemoryPool *pool) { MemoryPool *mem_pool = (MemoryPool *) pool; assert(mem_pool); MemoryPool *main_pool = (MemoryPool *) m_main_pinned_pool; main_pool->m_used_blocks.insert(main_pool->m_used_blocks.end(), mem_pool->get_used_begin(), mem_pool->get_used_end()); mem_pool->free_all(); } void MemoryManager::sync_device_pool(DeviceMemoryPool *pool) { MemoryPool *mem_pool = (MemoryPool *) pool; assert(mem_pool); MemoryPool *main_pool = (MemoryPool *) m_main_device_pool; main_pool->m_used_blocks.insert(main_pool->m_used_blocks.end(), mem_pool->get_used_begin(), mem_pool->get_used_end()); mem_pool->free_all(); } bool hasPinnedMemoryPool() { MemoryManager &manager = MemoryManager::get_instance(); return manager.m_main_pinned_pool != NULL; } bool hasDeviceMemoryPool() { MemoryManager &manager = MemoryManager::get_instance(); return manager.m_main_device_pool != NULL; } void setPinnedMemoryPool(PinnedMemoryPool *pool) { MemoryManager &manager = MemoryManager::get_instance(); manager.m_mutex.lock(); manager.m_main_pinned_pool = pool; manager.m_mutex.unlock(); } void setDeviceMemoryPool(DeviceMemoryPool *pool) { MemoryManager &manager = MemoryManager::get_instance(); manager.m_mutex.lock(); manager.m_main_device_pool = pool; manager.m_mutex.unlock(); } void setPinnedMemoryPool(_thread_id thread_id, PinnedMemoryPool *pool) { MemoryManager &manager = MemoryManager::get_instance(); manager.m_mutex.lock(); manager.m_thread_pinned_pools[thread_id] = pool; manager.m_mutex.unlock(); } void setDeviceMemoryPool(_thread_id thread_id, DeviceMemoryPool *pool) { MemoryManager &manager = MemoryManager::get_instance(); manager.m_mutex.lock(); manager.m_thread_device_pools[thread_id] = pool; manager.m_mutex.unlock(); } void destroyPinnedMemoryPool() { MemoryManager &manager = MemoryManager::get_instance(); manager.m_mutex.lock(); delete manager.m_main_pinned_pool; manager.m_main_pinned_pool = NULL; manager.m_mutex.unlock(); } void destroyDeviceMemoryPool() { MemoryManager &manager = MemoryManager::get_instance(); manager.m_mutex.lock(); delete manager.m_main_device_pool; manager.m_main_device_pool = NULL; manager.m_mutex.unlock(); } void destroyPinnedMemoryPool(_thread_id thread_id) { MemoryManager &manager = MemoryManager::get_instance(); manager.m_mutex.lock(); MemoryManager::PinnedPoolMap::iterator it = manager.m_thread_pinned_pools.find(thread_id); if ( it == manager.m_thread_pinned_pools.end() ) { FatalError("INTERNAL ERROR: Invalid pinned memory pool", AMGX_ERR_UNKNOWN); } delete it->second; manager.m_thread_pinned_pools.erase(it); manager.m_mutex.unlock(); } void destroyDeviceMemoryPool(_thread_id thread_id) { MemoryManager &manager = MemoryManager::get_instance(); manager.m_mutex.lock(); MemoryManager::DevicePoolMap::iterator it = manager.m_thread_device_pools.find(thread_id); if ( it == manager.m_thread_device_pools.end() ) { FatalError("INTERNAL ERROR: Invalid device memory pool", AMGX_ERR_UNKNOWN); } delete it->second; manager.m_thread_device_pools.erase(it); manager.m_mutex.unlock(); } void destroyAllPinnedMemoryPools() { MemoryManager &manager = MemoryManager::get_instance(); manager.m_mutex.lock(); MemoryManager::PinnedPoolMap::iterator it = manager.m_thread_pinned_pools.begin(); for ( ; it != manager.m_thread_pinned_pools.end() ; ++it ) { delete it->second; manager.m_thread_pinned_pools.erase(it); } destroyPinnedMemoryPool(); manager.m_mutex.unlock(); } void destroyAllDeviceMemoryPools() { MemoryManager &manager = MemoryManager::get_instance(); manager.m_mutex.lock(); MemoryManager::DevicePoolMap::iterator it = manager.m_thread_device_pools.begin(); for ( ; it != manager.m_thread_device_pools.end() ; ++it ) { delete it->second; manager.m_thread_device_pools.erase(it); } destroyDeviceMemoryPool(); manager.m_mutex.unlock(); } void setAsyncFreeFlag(bool set) { MemoryManager &manager = MemoryManager::get_instance(); manager.m_use_async_free = set; } void setDeviceMemoryPoolFlag(bool set) { MemoryManager &manager = MemoryManager::get_instance(); manager.m_use_device_pool = set; } void setMallocScalingFactor(size_t factor) { MemoryManager &manager = MemoryManager::get_instance(); manager.m_alloc_scaling_factor = factor; } void setMallocScalingThreshold(size_t threshold) { MemoryManager &manager = MemoryManager::get_instance(); manager.m_alloc_scaling_threshold = threshold; } void createAsyncFreePool(_thread_id thread_id) { /* MemoryManager &manager = MemoryManager::get_instance(); manager.m_mutex.lock(); manager.m_thread_free[thread_id] = std::vector<void*>(); manager.m_mutex.unlock(); */ } void setMainStream(cudaStream_t stream) { MemoryManager &manager = MemoryManager::get_instance(); manager.m_main_stream = stream; } void setStream(_thread_id thread_id, cudaStream_t stream) { MemoryManager &manager = MemoryManager::get_instance(); manager.m_mutex.lock(); manager.m_thread_stream[thread_id] = stream; manager.m_mutex.unlock(); } cudaStream_t getStream() { MemoryManager &manager = MemoryManager::get_instance(); _thread_id thread_id = getCurrentThreadId(); MemoryManager::StreamMap::iterator it = manager.m_thread_stream.find(thread_id); if ( it != manager.m_thread_stream.end() ) { return it->second; } return manager.m_main_stream; } void cudaHostRegister(void *ptr, int size) { MemoryManager &manager = MemoryManager::get_instance(); _thread_id thread_id = getCurrentThreadId(); MemoryManager::RegisteredBlocks *blocks = &manager.m_main_registered; MemoryManager::RegisteredBlocksMap::iterator it = manager.m_thread_registered.find(thread_id); if ( it != manager.m_thread_registered.end() ) { blocks = &it->second; } bool reg = true; for ( size_t i = 0; i < blocks->size() ; ++i ) if ( blocks->at(i).first <= ptr && blocks->at(i).second >= ptr) { reg = false; break; } if ( reg ) { ::cudaHostRegister(ptr, size, 0); blocks->push_back(std::pair<void *, void *>(ptr, (char *)ptr + size)); } } cudaError_t cudaMallocHost(void **ptr, size_t size) { MemoryManager &manager = MemoryManager::get_instance(); _thread_id thread_id = getCurrentThreadId(); PinnedMemoryPool *pool = manager.m_main_pinned_pool; MemoryManager::PinnedPoolMap::iterator it = manager.m_thread_pinned_pools.find(thread_id); if ( it != manager.m_thread_pinned_pools.end() ) { pool = it->second; } size_t allocated_size = 0; cudaError_t error = cudaSuccess; void *new_ptr = NULL; if ( pool != NULL && size < PINNED_POOL_SIZE_THRESHOLD ) { new_ptr = pool->allocate(size, allocated_size); } if ( pool != NULL && new_ptr == NULL && size < PINNED_POOL_SIZE_THRESHOLD ) // retry with size { new_ptr = pool->allocate(size, allocated_size); } if ( new_ptr != NULL ) { *ptr = new_ptr; } else { //printf("calling cudaMallocHost, size = %lu\n",size); error = ::cudaMallocHost(ptr, size); } return error; } cudaError_t cudaFreeHost(void *ptr) { MemoryManager &manager = MemoryManager::get_instance(); _thread_id thread_id = getCurrentThreadId(); PinnedMemoryPool *pool = manager.m_main_pinned_pool; MemoryManager::PinnedPoolMap::iterator it = manager.m_thread_pinned_pools.find(thread_id); if ( it != manager.m_thread_pinned_pools.end() ) { pool = it->second; } size_t freed_size = 0; cudaError_t error = cudaSuccess; if ( pool != NULL && pool->is_allocated(ptr) ) { pool->free(ptr, freed_size); } else { //printf("calling cudaFreeHost\n"); error = ::cudaFreeHost(ptr); } return error; } cudaError_t cudaMalloc(void **ptr, size_t size) { AMGX_CPU_PROFILER("cudaMalloc"); #ifdef AMGX_PRINT_MALLOC_CALL_STACK #ifdef MULTIGPU int rank; MPI_Comm_rank( MPI_COMM_WORLD, &rank ); if (rank == 0) #endif { std::cerr << "----" << std::endl; std::cerr << "cudaMalloc call stack:" << std::endl; printStackTrace(std::cerr); } #endif MemoryManager &manager = MemoryManager::get_instance(); DeviceMemoryPool *pool = manager.m_main_device_pool; _thread_id thread_id = getCurrentThreadId(); MemoryManager::DevicePoolMap::iterator it = manager.m_thread_device_pools.find(thread_id); if ( it != manager.m_thread_device_pools.end() ) { pool = it->second; } bool use_pool = manager.m_use_device_pool; #ifdef AMGX_PRINT_MEMORY_INFO bool print_fallback = false; #endif size_t allocated_size = 0; cudaError_t error = cudaSuccess; void *new_ptr = NULL; if ( pool != NULL /*&& size < pool->get_max_block_size()*/ && use_pool ) { new_ptr = pool->allocate(size, allocated_size); } if ( new_ptr != NULL ) { *ptr = new_ptr; } else { #ifdef AMGX_PRINT_MEMORY_INFO print_fallback = true; #endif // We allocate an extra fraction here. allocated_size = manager.scale(size); // We hack the size to make it a multiple of a page size. allocated_size = PAGE_SIZE * ((allocated_size + PAGE_SIZE - 1) / PAGE_SIZE); error = ::cudaMalloc(ptr, allocated_size); // Very last attempt. Try without over allocation. if ( *ptr == NULL ) { allocated_size = size; error = ::cudaMalloc(ptr, allocated_size); } manager.m_mutex.lock(); manager.m_allocated_blocks[*ptr] = allocated_size; manager.m_mutex.unlock(); #ifdef AMGX_PRINT_MEMORY_INFO #ifdef MULTIGPU if (rank == 0) #endif { std::cerr << "INFO: Registered [block " << std::setw(18) << *ptr << " size: " << allocated_size << "]" << std::endl; } #endif } #ifdef AMGX_PRINT_MEMORY_INFO #ifdef MULTIGPU if (rank == 0) #endif { if ( print_fallback ) { std::cerr << "cudaMalloc "; } else { std::cerr << "pool::allocate"; } std::cerr << ";" << std::setw(18) << *ptr << ";" << std::setw(12) << size << ";" << std::setw(12) << allocated_size << ";" << std::setw(12) << pool->get_used_mem() << ";" << std::setw(12) << pool->get_free_mem(); size_t gpu_free_mem, gpu_total_mem; cudaMemGetInfo(&gpu_free_mem, &gpu_total_mem); std::cerr << ";" << std::setw(12) << gpu_free_mem << ";" << std::setw(12) << gpu_total_mem; std::cerr << std::endl; } #endif return error; } cudaError_t cudaFreeAsync(void *ptr) { AMGX_CPU_PROFILER("cudaFreeAsync"); #ifdef AMGX_PRINT_MALLOC_CALL_STACK #ifdef MULTIGPU int rank; MPI_Comm_rank( MPI_COMM_WORLD, &rank ); if (rank == 0) #endif { std::cerr << "----" << std::endl; std::cerr << "cudaFreeAsync call stack:" << std::endl; printStackTrace(std::cerr); } #endif // We accept NULL pointers and we do nothing. if ( ptr == NULL ) { return cudaSuccess; } MemoryManager &manager = MemoryManager::get_instance(); _thread_id thread_id = getCurrentThreadId(); #ifdef AMGX_PRINT_MEMORY_INFO bool print_async = false, print_fallback = false; #endif DeviceMemoryPool *pool = manager.m_main_device_pool; size_t freed_size = 0; cudaError_t status = cudaSuccess; MemoryManager::DevicePoolMap::iterator it_pool = manager.m_thread_device_pools.find(thread_id); if ( it_pool != manager.m_thread_device_pools.end() ) { pool = manager.m_thread_device_pools[thread_id]; } if ( pool != NULL && pool->is_allocated(ptr) ) { pool->free(ptr, freed_size); } else if ( pool != NULL && manager.m_use_async_free ) { #ifdef AMGX_PRINT_MEMORY_INFO #ifdef MULTIGPU if (rank == 0) #endif { print_async = true; std::cerr << "INFO: Async free [ptr " << std::setw(18) << ptr << "]" << std::endl; } #endif MemoryManager::MemoryBlockMap::iterator ptr_it = manager.m_allocated_blocks.find(ptr); if ( ptr_it == manager.m_allocated_blocks.end() ) { FatalError("INTERNAL ERROR: Invalid call to cudaFreeAsync", AMGX_ERR_UNKNOWN); } pool->add_memory(ptr, ptr_it->second); manager.m_mutex.lock(); manager.m_allocated_blocks.erase(ptr_it); manager.m_mutex.unlock(); } else { #ifdef AMGX_PRINT_MEMORY_INFO print_fallback = true; #endif status = ::cudaFree(ptr); } #ifdef AMGX_PRINT_MEMORY_INFO #ifdef MULTIGPU if (rank == 0) #endif { if ( print_fallback ) { std::cerr << "cudaFree "; } else if ( print_async ) { std::cerr << "pool::async "; } else { std::cerr << "pool::free "; } std::cerr << ";" << std::setw(18) << ptr << ";" << std::setw(12) << freed_size << ";" << std::setw(12) << pool->get_used_mem() << ";" << std::setw(12) << pool->get_free_mem(); size_t gpu_free_mem, gpu_total_mem; cudaMemGetInfo(&gpu_free_mem, &gpu_total_mem); std::cerr << ";" << std::setw(12) << gpu_free_mem << ";" << std::setw(12) << gpu_total_mem; std::cerr << std::endl; } #endif return status; } void cudaFreeWait() { } // Join device pools void joinPinnedPools() { MemoryManager &manager = MemoryManager::get_instance(); typedef MemoryManager::PinnedPoolMap::iterator Iterator; Iterator it = manager.m_thread_pinned_pools.begin(); Iterator end = manager.m_thread_pinned_pools.end(); for ( ; it != end ; ++it ) { manager.sync_pinned_pool(it->second); } } void joinDevicePools() { MemoryManager &manager = MemoryManager::get_instance(); typedef MemoryManager::DevicePoolMap::iterator Iterator; Iterator it = manager.m_thread_device_pools.begin(); Iterator end = manager.m_thread_device_pools.end(); for ( ; it != end ; ++it ) { manager.sync_device_pool(it->second); } } void printInfo() { // } void expandDeviceMemoryPool(size_t size, size_t max_block_size) { MemoryManager &manager = MemoryManager::get_instance(); if (manager.m_main_device_pool) { manager.m_main_device_pool->expandPool(size, max_block_size); } } } // namespace memory } // namespace amgx
the_stack
#include "common.h" #include "gptKernels_int8.h" #include "transformerKernels.h" /** @file Implemented the cuda kernel function and its launcher that required by GPT model. Currently, fp16 and fp32 versions are provided */ namespace lightseq { namespace cuda { __forceinline__ __device__ int8_t float2int8(float x, float quant_scale) { float i8_f = x * quant_scale; int32_t i8 = floorf(i8_f + 0.5); i8 = i8 < -127 ? -127 : (i8 > 127 ? 127 : i8); return int8_t(i8); } template <typename T> __global__ void ker_gpt_embedding_int8(const int8_t* token_emb, const T* pos_emb, const int* token_id, T* output, int* real_seq_len, int padding_id, int pos_offset, float dequant_scale) { int target_pos = blockIdx.x * gridDim.y + blockIdx.y; int tid = token_id[target_pos]; if (tid == padding_id) { // for padding id output[target_pos * blockDim.x + threadIdx.x] = 0.f; return; } if (threadIdx.x == 0) { atomicAdd(real_seq_len + blockIdx.x, 1); } output[target_pos * blockDim.x + threadIdx.x] = T(token_emb[tid * blockDim.x + threadIdx.x]) * dequant_scale + pos_emb[(blockIdx.y + pos_offset) * blockDim.x + threadIdx.x]; } /* fp16 version */ template <> __global__ void ker_gpt_embedding_int8<__half>( const int8_t* token_emb, const __half* pos_emb, const int* token_id, __half* output, int* real_seq_len, int padding_id, int pos_offset, float dequant_scale) { int target_pos = blockIdx.x * gridDim.y + blockIdx.y; int tid = token_id[target_pos]; half2* output_h = (half2*)output; if (tid == padding_id) { // for padding id output_h[target_pos * blockDim.x + threadIdx.x] = __float2half2_rn(0.f); return; } if (threadIdx.x == 0) { atomicAdd(real_seq_len + blockIdx.x, 1); } float2 te; char2 cte = ((const char2*)token_emb)[tid * blockDim.x + threadIdx.x]; float2 pe = __half22float2( ((const half2*) pos_emb)[(blockIdx.y + pos_offset) * blockDim.x + threadIdx.x]); te.x = float(cte.x) * dequant_scale + pe.x; te.y = float(cte.y) * dequant_scale + pe.y; output_h[target_pos * blockDim.x + threadIdx.x] = __float22half2_rn(te); } template <typename T> void ker_gpt_embedding_i8I_launcher(int batch_size, int batch_seq_len, int hidden_size, cudaStream_t stream, const int8_t* token_emb, const T* pos_emb, const int* token_id, T* output, int* real_seq_len, int padding_id, int pos_offset, float dequant_scale) { ker_gpt_embedding_int8<T> <<<dim3(batch_size, batch_seq_len), hidden_size, 0, stream>>>( token_emb, pos_emb, token_id, output, real_seq_len, padding_id, pos_offset, dequant_scale); } template <> void ker_gpt_embedding_i8I_launcher<__half>( int batch_size, int batch_seq_len, int hidden_size, cudaStream_t stream, const int8_t* token_emb, const __half* pos_emb, const int* token_id, __half* output, int* real_seq_len, int padding_id, int pos_offset, float dequant_scale) { ker_gpt_embedding_int8<__half> <<<dim3(batch_size, batch_seq_len), hidden_size / 2, 0, stream>>>( token_emb, pos_emb, token_id, output, real_seq_len, padding_id, pos_offset, dequant_scale); } template void ker_gpt_embedding_i8I_launcher<float>( int batch_size, int batch_seq_len, int hidden_size, cudaStream_t stream, const int8_t* token_emb, const float* pos_emb, const int* token_id, float* output, int* real_seq_len, int padding_id, int pos_offset, float dequant_scale); template void ker_gpt_embedding_i8I_launcher<__half>( int batch_size, int batch_seq_len, int hidden_size, cudaStream_t stream, const int8_t* token_emb, const __half* pos_emb, const int* token_id, __half* output, int* real_seq_len, int padding_id, int pos_offset, float dequant_scale); __global__ void ker_ppl_i8I(const int8_t* logits, const int* input_ids, const int* real_seq_len, float* ppl, int vocab_size, float dequant_scale, bool in_col32) { int seq_len = real_seq_len[blockIdx.x]; // remove "eos" if (blockIdx.y >= seq_len - 1) { // will not contribute to ppl return; } int token_idx_in_batch = blockIdx.x * gridDim.y + blockIdx.y; int left_logit_idx = token_idx_in_batch * vocab_size + threadIdx.x; int right_logit_idx = (token_idx_in_batch + 1) * vocab_size; /* step 1. find max logit over the whole vocab */ float max_logit = CUDA_FLOAT_INF_NEG; for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) { int logits_idx; if (in_col32) { int row_id = token_idx_in_batch; int col_id = idx - token_idx_in_batch * vocab_size; logits_idx = row_major2flat_col32(row_id, col_id, gridDim.x * gridDim.y, vocab_size); } else { logits_idx = idx; } max_logit = fmaxf(max_logit, (float)logits[logits_idx] * dequant_scale); } max_logit = blockReduceMax(max_logit); __shared__ float s_max_logit; if (threadIdx.x == 0) { s_max_logit = max_logit; } __syncthreads(); /* step 2. compute the log probability for the given token, add it to the sequence's ppl */ float sum_exp_logit = 0.f; for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) { int logits_idx; if (in_col32) { int row_id = token_idx_in_batch; int col_id = idx - token_idx_in_batch * vocab_size; logits_idx = row_major2flat_col32(row_id, col_id, gridDim.x * gridDim.y, vocab_size); } else { logits_idx = idx; } float lgt = fmaxf((float)logits[logits_idx] * dequant_scale - s_max_logit, logit_thresh_min); sum_exp_logit += expf(lgt); } sum_exp_logit = blockReduceSum(sum_exp_logit); if (threadIdx.x == 0) { int token_id = input_ids[token_idx_in_batch + 1]; int logits_idx; if (in_col32) { int row_id = token_idx_in_batch; int col_id = token_id; logits_idx = row_major2flat_col32(row_id, col_id, gridDim.x * gridDim.y, vocab_size); } else { logits_idx = token_idx_in_batch * vocab_size + token_id; } float log_prob = ((float)logits[logits_idx] * dequant_scale - s_max_logit - logf(sum_exp_logit)) / (float)(seq_len - 1); atomicAdd(ppl + blockIdx.x, -log_prob); } } void ker_ppl_i8I_launcher(int batch_size, int batch_seq_len, int max_thread_per_block, cudaStream_t stream, const int8_t* logits, const int* input_ids, const int* real_seq_len, float* ppl, int vocab_size, float dequant_scale, bool in_col32) { ker_ppl_i8I<<<dim3(batch_size, batch_seq_len), max_thread_per_block, 0, stream>>>(logits, input_ids, real_seq_len, ppl, vocab_size, dequant_scale, in_col32); } template <typename T> __global__ void ker_correlation_softmax_gpt_i32I( int32_t* correlation, T* output, const int* real_seq_len, const int batch_seq_len, float attn_scale, float dequant_scale) { int query_token_pos = blockIdx.y % batch_seq_len; if (query_token_pos >= real_seq_len[blockIdx.x]) { return; } int mask = 0; // can see the token when mask=0 if (threadIdx.x > query_token_pos || threadIdx.x >= batch_seq_len) { mask = 1; // Can only see the token on the left side of it } int idx = (blockIdx.x * gridDim.y + blockIdx.y) * batch_seq_len + threadIdx.x; float val = threadIdx.x < batch_seq_len ? ((float)correlation[idx] * attn_scale * dequant_scale * dequant_scale) : CUDA_FLOAT_INF_NEG; float max_val = blockReduceMax<float>(mask ? CUDA_FLOAT_INF_NEG : val); __shared__ float smax; if (threadIdx.x == 0) smax = max_val; __syncthreads(); val = mask ? 0.f : expf(val - smax); float rsum = blockReduceSum<float>(val); __shared__ float ssum; if (threadIdx.x == 0) ssum = rsum; __syncthreads(); if (threadIdx.x < batch_seq_len) output[idx] = (T)(val / ssum); } template <typename T> void ker_correlation_softmax_gpt_i32I_launcher( int batch_size, int batch_seq_len, int head_num, cudaStream_t stream, int32_t* correlation, T* output, const int* real_seq_len, float attn_scale, float dequant_scale) { int block_dim = batch_seq_len; if (batch_seq_len < 1024) { block_dim = (batch_seq_len + 31) >> 5; block_dim *= 32; } ker_correlation_softmax_gpt_i32I<T> <<<dim3(batch_size, head_num * batch_seq_len), block_dim, 0, stream>>>( correlation, output, real_seq_len, batch_seq_len, attn_scale, dequant_scale); } template void ker_correlation_softmax_gpt_i32I_launcher<float>( int batch_size, int batch_seq_len, int head_num, cudaStream_t stream, int32_t* correlation, float* output, const int* real_seq_len, float attn_scale, float dequant_scale); template void ker_correlation_softmax_gpt_i32I_launcher<__half>( int batch_size, int batch_seq_len, int head_num, cudaStream_t stream, int32_t* correlation, __half* output, const int* real_seq_len, float attn_scale, float dequant_scale); template <int k> __global__ void ker_topk_sample_i8I(const int8_t* logits, int* old_input_ids, int* new_input_ids, const int* real_seq_len, const int vocab_size, const int batch_seq_len, int logits_seq_len, int* unfinished, curandState* curandstate, int eos_id, float dequant_scale, bool in_col32) { int last_token_idx_in_batch = blockIdx.x * batch_seq_len + batch_seq_len - 1; /* add EOS to end if last token is EOS */ if (old_input_ids[last_token_idx_in_batch] == eos_id) { int left_token_idx = blockIdx.x * batch_seq_len + threadIdx.x; int right_token_idx = (blockIdx.x + 1) * batch_seq_len; for (int idx = left_token_idx; idx < right_token_idx; idx += blockDim.x) { int new_idx = idx + blockIdx.x; new_input_ids[new_idx] = old_input_ids[idx]; } if (threadIdx.x == 0) { // blockIdx.x * (batch_seq_len+1) + batch_seq_len new_input_ids[(blockIdx.x + 1) * (batch_seq_len + 1) - 1] = eos_id; old_input_ids[gridDim.x * batch_seq_len + blockIdx.x] = eos_id; } return; } int logits_token_idx_in_batch = blockIdx.x * logits_seq_len + logits_seq_len - 1; int left_logit_idx = logits_token_idx_in_batch * vocab_size + threadIdx.x; int right_logit_idx = (logits_token_idx_in_batch + 1) * vocab_size; /* step1. find max logit and rough Kth logit over the whole vocab */ __shared__ float s_max_logit, s_topk_logit; float rough_top_kth_logit = CUDA_FLOAT_INF_NEG; for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) { int logits_idx; if (in_col32) { int row_id = logits_token_idx_in_batch; int col_id = idx - logits_token_idx_in_batch * vocab_size; logits_idx = row_major2flat_col32(row_id, col_id, gridDim.x * logits_seq_len, vocab_size); } else { logits_idx = idx; } rough_top_kth_logit = fmaxf(rough_top_kth_logit, (float)logits[logits_idx] * dequant_scale); } float max_logit = blockReduceMax(rough_top_kth_logit); rough_top_kth_logit = blockRoughTopK<float, k>(rough_top_kth_logit); if (threadIdx.x == 0) { s_topk_logit = rough_top_kth_logit; s_max_logit = max_logit; } __syncthreads(); __shared__ int s_tid; if (k != 1) { /* step2 hold one logit per thread which larger than Kth logit and sample * from them */ float topk_exp_sum, topk_exp = CUDA_FLOAT_INF_NEG; int topk_tid = vocab_size; int test_num = 0; __shared__ float s_topk_exp_sum; for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) { int logits_idx; if (in_col32) { int row_id = logits_token_idx_in_batch; int col_id = idx - logits_token_idx_in_batch * vocab_size; logits_idx = row_major2flat_col32( row_id, col_id, gridDim.x * logits_seq_len, vocab_size); } else { logits_idx = idx; } float logit = (float)logits[logits_idx] * dequant_scale; float logit_exp = expf(fmaxf(logit - s_max_logit, logit_thresh_min)); if (logit >= s_topk_logit) test_num++; if (logit >= s_topk_logit && logit_exp > topk_exp) { topk_exp = logit_exp; topk_tid = idx - left_logit_idx + threadIdx.x; } } test_num = blockReduceSum(test_num); if (topk_tid == vocab_size) topk_exp = 0; topk_exp_sum = blockReduceSum(topk_exp); if (threadIdx.x == 0) { s_topk_exp_sum = topk_exp_sum; } __syncthreads(); /* calculate cumulative probability */ float topk_prob = topk_exp / s_topk_exp_sum; float prefix_sum_prob; typedef cub::BlockScan<float, 1024> BlockScan; __shared__ typename BlockScan::TempStorage temp_storage; BlockScan(temp_storage).InclusiveSum(topk_prob, prefix_sum_prob); __shared__ float random_x; if (threadIdx.x == 0) { random_x = curand_uniform(curandstate + blockIdx.x); } __syncthreads(); if (threadIdx.x == 0) { s_tid = vocab_size; } __syncthreads(); int threadID = threadIdx.x; __shared__ int s_threadID; __shared__ float s_max_prob; if (random_x > prefix_sum_prob) threadID = blockDim.x; threadID = blockReduceMin(threadID); float max_prob = blockReduceMax(topk_prob); if (threadIdx.x == 0) { s_threadID = threadID; s_max_prob = max_prob; } __syncthreads(); if (threadIdx.x == s_threadID) { s_tid = topk_tid; } __syncthreads(); if (s_tid == vocab_size && topk_prob == s_max_prob) { s_tid = topk_tid; } __syncthreads(); } else { s_tid = vocab_size; for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) { int logits_idx; if (in_col32) { int row_id = logits_token_idx_in_batch; int col_id = idx - logits_token_idx_in_batch * vocab_size; logits_idx = row_major2flat_col32( row_id, col_id, gridDim.x * logits_seq_len, vocab_size); } else { logits_idx = idx; } float logit = (float)logits[logits_idx] * dequant_scale; if (logit == s_max_logit) { s_tid = idx - left_logit_idx + threadIdx.x; } } __syncthreads(); } /* if new sampled tid is not EOS, set unfinish TRUE */ if (threadIdx.x == 0) { if (s_tid != eos_id) unfinished[0] = 1; } /* step3 copy old_input_ids to new_input_ids and add new sampled ids */ int left_token_idx = blockIdx.x * batch_seq_len + threadIdx.x; int right_token_idx = (blockIdx.x + 1) * batch_seq_len; for (int idx = left_token_idx; idx < right_token_idx; idx += blockDim.x) { int new_idx = idx + blockIdx.x; new_input_ids[new_idx] = old_input_ids[idx]; } if (threadIdx.x == 0) { new_input_ids[(blockIdx.x + 1) * (batch_seq_len + 1) - 1] = s_tid; // save the newly sampled ids to old_input_ids for next step inputs old_input_ids[gridDim.x * batch_seq_len + blockIdx.x] = s_tid; } } void ker_topk_sample_i8I_launcher(int batch_size, int batch_seq_len, int logits_seq_len, int max_thread_per_block, cudaStream_t stream, const int8_t* logits, int* old_input_ids, int* new_input_ids, const int* real_seq_len, const int vocab_size, const int k, int* unfinished, curandState* curandstate, int eos_id, float dequant_scale, bool in_col32) { if (k == 1) ker_topk_sample_i8I<1><<<batch_size, max_thread_per_block, 0, stream>>>( logits, old_input_ids, new_input_ids, real_seq_len, vocab_size, batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id, dequant_scale, in_col32); else if (k == 2) ker_topk_sample_i8I<2><<<batch_size, max_thread_per_block, 0, stream>>>( logits, old_input_ids, new_input_ids, real_seq_len, vocab_size, batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id, dequant_scale, in_col32); else if (k == 4) ker_topk_sample_i8I<4><<<batch_size, max_thread_per_block, 0, stream>>>( logits, old_input_ids, new_input_ids, real_seq_len, vocab_size, batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id, dequant_scale, in_col32); else if (k == 8) ker_topk_sample_i8I<8><<<batch_size, max_thread_per_block, 0, stream>>>( logits, old_input_ids, new_input_ids, real_seq_len, vocab_size, batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id, dequant_scale, in_col32); else if (k == 16) ker_topk_sample_i8I<16><<<batch_size, max_thread_per_block, 0, stream>>>( logits, old_input_ids, new_input_ids, real_seq_len, vocab_size, batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id, dequant_scale, in_col32); else if (k == 32) ker_topk_sample_i8I<32><<<batch_size, max_thread_per_block, 0, stream>>>( logits, old_input_ids, new_input_ids, real_seq_len, vocab_size, batch_seq_len, logits_seq_len, unfinished, curandstate, eos_id, dequant_scale, in_col32); else { throw std::invalid_argument("topk argument should be in [1,2,4,8,16,32]"); } } __global__ void ker_topp_sample_i8I(const int8_t* logits, int* old_input_ids, int* new_input_ids, const int* real_seq_len, const int vocab_size, const int batch_seq_len, int logits_seq_len, int* unfinished, float p, curandState* curandstate, int eos_id, float dequant_scale, bool in_col32) { int token_idx_in_batch = blockIdx.x * batch_seq_len + batch_seq_len - 1; /* add EOS to end if last token is EOS */ if (old_input_ids[token_idx_in_batch] == eos_id) { int left_token_idx = blockIdx.x * batch_seq_len + threadIdx.x; int right_token_idx = (blockIdx.x + 1) * batch_seq_len; for (int idx = left_token_idx; idx < right_token_idx; idx += blockDim.x) { int new_idx = idx + blockIdx.x; new_input_ids[new_idx] = old_input_ids[idx]; } if (threadIdx.x == 0) { new_input_ids[(blockIdx.x + 1) * (batch_seq_len + 1) - 1] = eos_id; old_input_ids[gridDim.x * batch_seq_len + blockIdx.x] = eos_id; } return; } int logits_token_idx_in_batch = blockIdx.x * logits_seq_len + logits_seq_len - 1; int left_logit_idx = logits_token_idx_in_batch * vocab_size + threadIdx.x; int right_logit_idx = (logits_token_idx_in_batch + 1) * vocab_size; /* step1. find max logit in each thread and sample from these probs with nucleus sampling */ __shared__ float s_max_logit; float max_logit = CUDA_FLOAT_INF_NEG; for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) { int logits_idx; if (in_col32) { int row_id = logits_token_idx_in_batch; int col_id = idx - logits_token_idx_in_batch * vocab_size; logits_idx = row_major2flat_col32(row_id, col_id, gridDim.x * logits_seq_len, vocab_size); } else { logits_idx = idx; } max_logit = fmaxf(max_logit, (float)logits[logits_idx] * dequant_scale); } float max_logit_array[1]; max_logit_array[0] = max_logit; typedef cub::BlockRadixSort<float, 1024, 1> BlockRadixSort; __shared__ typename BlockRadixSort::TempStorage sort_temp_storage; BlockRadixSort(sort_temp_storage).SortDescending(max_logit_array); float presum_max_logit_exp; max_logit = max_logit_array[0]; float block_max_logit = blockReduceMax(max_logit); if (threadIdx.x == 0) { s_max_logit = block_max_logit; } __syncthreads(); float biased_logit_exp = expf(fmaxf(max_logit - s_max_logit, logit_thresh_min)); typedef cub::BlockScan<float, 1024> BlockScan; __shared__ typename BlockScan::TempStorage presum_temp_storage; BlockScan(presum_temp_storage) .InclusiveSum(biased_logit_exp, presum_max_logit_exp); float topp_exp_threshold; if (threadIdx.x == blockDim.x - 1) { topp_exp_threshold = p * presum_max_logit_exp; } __shared__ float s_presum_logit_exp_threshold; if (presum_max_logit_exp > topp_exp_threshold) { presum_max_logit_exp = CUDA_FLOAT_INF_NEG; } float logit_exp_threshold = blockReduceMax(presum_max_logit_exp); if (threadIdx.x == 0) { s_presum_logit_exp_threshold = logit_exp_threshold; } __syncthreads(); __shared__ float s_logit_threshold; if (presum_max_logit_exp == s_presum_logit_exp_threshold) { s_logit_threshold = max_logit; } __syncthreads(); /* step2 hold one logit per thread and sample * from them */ float topk_exp_sum, topk_exp = CUDA_FLOAT_INF_NEG; int topk_tid = vocab_size; int test_num = 0; __shared__ float s_topk_exp_sum; for (int idx = left_logit_idx; idx < right_logit_idx; idx += blockDim.x) { int logits_idx; if (in_col32) { int row_id = logits_token_idx_in_batch; int col_id = idx - logits_token_idx_in_batch * vocab_size; logits_idx = row_major2flat_col32(row_id, col_id, gridDim.x * logits_seq_len, vocab_size); } else { logits_idx = idx; } float logit = (float)logits[logits_idx] * dequant_scale; float logit_exp = expf(fmaxf(logit - s_max_logit, logit_thresh_min)); if (logit >= s_logit_threshold) test_num++; if (logit >= s_logit_threshold && logit_exp > topk_exp) { topk_exp = logit_exp; topk_tid = idx - left_logit_idx + threadIdx.x; } } test_num = blockReduceSum(test_num); if (topk_tid == vocab_size) topk_exp = 0; topk_exp_sum = blockReduceSum(topk_exp); if (threadIdx.x == 0) { s_topk_exp_sum = topk_exp_sum; } __syncthreads(); /* calculate cumulative probability */ float topk_prob = topk_exp / s_topk_exp_sum; float prefix_sum_prob; BlockScan(presum_temp_storage).InclusiveSum(topk_prob, prefix_sum_prob); __shared__ float random_x; if (threadIdx.x == 0) { random_x = curand_uniform(curandstate + blockIdx.x); } __syncthreads(); __shared__ int s_tid; if (threadIdx.x == 0) { s_tid = vocab_size; } __syncthreads(); int threadID = threadIdx.x; __shared__ int s_threadID; __shared__ float s_max_prob; if (random_x > prefix_sum_prob) threadID = blockDim.x; threadID = blockReduceMin(threadID); float max_prob = blockReduceMax(topk_prob); if (threadIdx.x == 0) { s_threadID = threadID; s_max_prob = max_prob; } __syncthreads(); if (threadIdx.x == s_threadID) { s_tid = topk_tid; } __syncthreads(); if (s_tid == vocab_size && topk_prob == s_max_prob) { s_tid = topk_tid; } __syncthreads(); /* if new sampled tid is not EOS, set unfinish TRUE */ if (threadIdx.x == 0) { if (s_tid != eos_id) unfinished[0] = 1; } /* step3 copy old_input_ids to new_input_ids and add new sampled ids */ int left_token_idx = blockIdx.x * batch_seq_len + threadIdx.x; int right_token_idx = (blockIdx.x + 1) * batch_seq_len; for (int idx = left_token_idx; idx < right_token_idx; idx += blockDim.x) { int new_idx = idx + blockIdx.x; new_input_ids[new_idx] = old_input_ids[idx]; } if (threadIdx.x == 0) { new_input_ids[(blockIdx.x + 1) * (batch_seq_len + 1) - 1] = s_tid; // save the newly sampled ids to old_input_ids for next step inputs old_input_ids[gridDim.x * batch_seq_len + blockIdx.x] = s_tid; } } void ker_topp_sample_i8I_launcher(int batch_size, int batch_seq_len, int logits_seq_len, int max_thread_per_block, cudaStream_t stream, const int8_t* logits, int* old_input_ids, int* new_input_ids, const int* real_seq_len, const int vocab_size, const float p, int* unfinished, curandState* curandstate, int eos_id, float dequant_scale, bool in_col32) { ker_topp_sample_i8I<<<batch_size, max_thread_per_block, 0, stream>>>( logits, old_input_ids, new_input_ids, real_seq_len, vocab_size, batch_seq_len, logits_seq_len, unfinished, p, curandstate, eos_id, dequant_scale, in_col32); } template <typename T> __global__ void ker_arrange_qkv_with_cache_i8I_i8O( const int8_t* ori_qkv, const T* qkv_bias, int8_t* new_q, int8_t* new_k, int8_t* k_cache, int8_t* new_v, int8_t* v_cache, int batch_seq_len, int dim_per_head, int head_num, float dequant_scale, float quant_scale, bool in_col32) { int hidden_size = head_num * dim_per_head; int batch_size = gridDim.x / batch_seq_len; int batch_id = blockIdx.x / batch_seq_len; int token_id = blockIdx.x % batch_seq_len; int head_id = threadIdx.x / dim_per_head; int dim_id = threadIdx.x % dim_per_head; int target_id = targetid_4dim(batch_id, head_id, token_id, dim_id, head_num, batch_seq_len, dim_per_head); int8_t new_val; if (token_id < batch_seq_len - 1) { int old_target_id = targetid_4dim(batch_id, head_id, token_id, dim_id, head_num, batch_seq_len - 1, dim_per_head); if (blockIdx.y == 0) return; if (blockIdx.y == 1) new_val = k_cache[old_target_id]; if (blockIdx.y == 2) new_val = v_cache[old_target_id]; } else { int qkv_index; if (in_col32) { int row_id = batch_id; int col_id = blockIdx.y * hidden_size + threadIdx.x; qkv_index = row_major2flat_col32(row_id, col_id, batch_size, gridDim.y * hidden_size); } else { qkv_index = (batch_id * gridDim.y + blockIdx.y) * hidden_size + threadIdx.x; } float tmp_val = float(ori_qkv[qkv_index]) * dequant_scale + __ldg(&qkv_bias[blockIdx.y * hidden_size + threadIdx.x]); new_val = float2int8(tmp_val, quant_scale); if (blockIdx.y == 0) { target_id = targetid_4dim(batch_id, head_id, 0, dim_id, head_num, 1, dim_per_head); } } if (blockIdx.y == 0) new_q[target_id] = new_val; if (blockIdx.y == 1) new_k[target_id] = new_val; if (blockIdx.y == 2) { new_v[target_id] = new_val; } } template <> __global__ void ker_arrange_qkv_with_cache_i8I_i8O<__half>( const int8_t* ori_qkv, const __half* qkv_bias, int8_t* new_q, int8_t* new_k, int8_t* k_cache, int8_t* new_v, int8_t* v_cache, int batch_seq_len, int dim_per_head, int head_num, float dequant_scale, float quant_scale, bool in_col32) { int hidden_size = head_num * dim_per_head; int batch_size = gridDim.x / batch_seq_len; int batch_id = blockIdx.x / batch_seq_len; int token_id = blockIdx.x % batch_seq_len; int head_id = threadIdx.x / dim_per_head; int dim_id = threadIdx.x % dim_per_head; int target_id = targetid_4dim(batch_id, head_id, token_id, dim_id, head_num, batch_seq_len, dim_per_head); int2 new_val; int8_t* p_new_val = (int8_t*)(&new_val); const int2* p_ori_qkv = (const int2*)ori_qkv; const float4* p_bias = (const float4*)qkv_bias; const int2* p_k_cache = (const int2*)k_cache; const int2* p_v_cache = (const int2*)v_cache; int2* p_new_q = (int2*)new_q; int2* p_new_k = (int2*)new_k; int2* p_new_v = (int2*)new_v; if (token_id < batch_seq_len - 1) { int old_target_id = targetid_4dim(batch_id, head_id, token_id, dim_id, head_num, batch_seq_len - 1, dim_per_head); if (blockIdx.y == 0) return; if (blockIdx.y == 1) new_val = p_k_cache[old_target_id]; if (blockIdx.y == 2) new_val = p_v_cache[old_target_id]; } else { int qkv_index; if (in_col32) { int row_id = batch_id; int col_id = (blockIdx.y * hidden_size + threadIdx.x) << 3; qkv_index = row_major2flat_col32(row_id, col_id, batch_size, (gridDim.y * hidden_size) << 3) >> 3; } else { qkv_index = (batch_id * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; } int2 ori_qkv8 = p_ori_qkv[qkv_index]; float4 bias8 = __ldg(&p_bias[blockIdx.y * blockDim.x + threadIdx.x]); int8_t* p_ori_qkv8 = (int8_t*)(&ori_qkv8); __half* p_bias8 = (__half*)(&bias8); #pragma unroll for (int i = 0; i < 8; ++i) { p_new_val[i] = float2int8(float(p_ori_qkv8[i]) * dequant_scale + float(p_bias8[i]), quant_scale); } if (blockIdx.y == 0) { target_id = targetid_4dim(batch_id, head_id, 0, dim_id, head_num, 1, dim_per_head); } } if (blockIdx.y == 0) p_new_q[target_id] = new_val; if (blockIdx.y == 1) p_new_k[target_id] = new_val; if (blockIdx.y == 2) p_new_v[target_id] = new_val; } template <typename T> void ker_arrange_qkv_with_cache_i8I_i8O_launcher( int batch_token_num, int hidden_size, cudaStream_t stream, const int8_t* ori_qkv, const T* qkv_bias, int8_t* new_q, int8_t* new_k, int8_t* k_cache, int8_t* new_v, int8_t* v_cache, int batch_seq_len, int dim_per_head, int head_num, float dequant_scale, float quant_scale, bool in_col32) { ker_arrange_qkv_with_cache_i8I_i8O<T> <<<dim3(batch_token_num, 3), hidden_size, 0, stream>>>( ori_qkv, qkv_bias, new_q, new_k, k_cache, new_v, v_cache, batch_seq_len, dim_per_head, head_num, dequant_scale, quant_scale, in_col32); } template <> void ker_arrange_qkv_with_cache_i8I_i8O_launcher<__half>( int batch_token_num, int hidden_size, cudaStream_t stream, const int8_t* ori_qkv, const __half* qkv_bias, int8_t* new_q, int8_t* new_k, int8_t* k_cache, int8_t* new_v, int8_t* v_cache, int batch_seq_len, int dim_per_head, int head_num, float dequant_scale, float quant_scale, bool in_col32) { ker_arrange_qkv_with_cache_i8I_i8O<__half> <<<dim3(batch_token_num, 3), hidden_size / 8, 0, stream>>>( ori_qkv, qkv_bias, new_q, new_k, k_cache, new_v, v_cache, batch_seq_len, dim_per_head / 8, head_num, dequant_scale, quant_scale, in_col32); } template void ker_arrange_qkv_with_cache_i8I_i8O_launcher<float>( int batch_token_num, int hidden_size, cudaStream_t stream, const int8_t* ori_qkv, const float* qkv_bias, int8_t* new_q, int8_t* new_k, int8_t* k_cache, int8_t* new_v, int8_t* v_cache, int batch_seq_len, int dim_per_head, int head_num, float dequant_scale, float quant_scale, bool in_col32); template void ker_arrange_qkv_with_cache_i8I_i8O_launcher<__half>( int batch_token_num, int hidden_size, cudaStream_t stream, const int8_t* ori_qkv, const __half* qkv_bias, int8_t* new_q, int8_t* new_k, int8_t* k_cache, int8_t* new_v, int8_t* v_cache, int batch_seq_len, int dim_per_head, int head_num, float dequant_scale, float quant_scale, bool in_col32); template <typename T> __global__ void ker_attention_mask_weights_i32I( int32_t* correlation, T* output, const int* real_seq_len, int dst_seq_len, int src_seq_len, float attn_scale, float dequant_scale) { int query_token_pos = blockIdx.y % dst_seq_len + src_seq_len - dst_seq_len; if (query_token_pos >= real_seq_len[blockIdx.x]) { return; } int mask = 0; // can see the token when mask=0 if (threadIdx.x > query_token_pos) { mask = 1; // Can only see the token on the left side of it } int idx = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; float val = (float)correlation[idx] * attn_scale * dequant_scale * dequant_scale; float max_val = blockReduceMax<float>(mask ? CUDA_FLOAT_INF_NEG : val); __shared__ float smax; if (threadIdx.x == 0) smax = max_val; __syncthreads(); val = mask ? 0.f : expf(fmaxf(logit_thresh_min, val - smax)); float rsum = blockReduceSum<float>(val); __shared__ float ssum; if (threadIdx.x == 0) ssum = rsum; __syncthreads(); output[idx] = (T)(val / (ssum + epsilon)); } template <typename T> void ker_attention_mask_weights_i32I_launcher( int batch_size, int dst_seq_len, int src_seq_len, int head_num, cudaStream_t stream, int32_t* correlation, T* output, const int* real_seq_len, float attn_scale, float dequant_scale) { ker_attention_mask_weights_i32I<T> <<<dim3(batch_size, head_num * dst_seq_len), src_seq_len, 0, stream>>>( correlation, output, real_seq_len, dst_seq_len, src_seq_len, attn_scale, dequant_scale); } template void ker_attention_mask_weights_i32I_launcher<float>( int batch_size, int dst_seq_len, int src_seq_len, int head_num, cudaStream_t stream, int32_t* correlation, float* output, const int* real_seq_len, float attn_scale, float dequant_scale); template void ker_attention_mask_weights_i32I_launcher<__half>( int batch_size, int dst_seq_len, int src_seq_len, int head_num, cudaStream_t stream, int32_t* correlation, __half* output, const int* real_seq_len, float attn_scale, float dequant_scale); } // namespace cuda } // namespace lightseq
the_stack
#define GAMMA 1.4f #define iterations 2000 #ifndef block_length #define block_length 192 #endif #define NDIM 3 #define NNB 4 #define RK 3 // 3rd order RK #define ff_mach 1.2f #define deg_angle_of_attack 0.0f #define VAR_DENSITY 0 #define VAR_MOMENTUM 1 #define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM) #define NVAR (VAR_DENSITY_ENERGY+1) #if block_length > 128 #warning "the kernels may fail too launch on some systems if the block length is too large" #endif double get_time() { struct timeval t; gettimeofday(&t,NULL); return t.tv_sec+t.tv_usec*1e-6; } //self-defined user type typedef struct{ float x; float y; float z; } Float3; __device__ inline void compute_velocity(float density, Float3 momentum, Float3* velocity){ velocity->x = momentum.x / density; velocity->y = momentum.y / density; velocity->z = momentum.z / density; } __device__ inline float compute_speed_sqd(Float3 velocity){ return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z; } __device__ inline float compute_pressure(float density, float density_energy, float speed_sqd){ return ((float)(GAMMA) - (float)(1.0f))*(density_energy - (float)(0.5f)*density*speed_sqd); } // sqrt is a device function __device__ inline float compute_speed_of_sound(float density, float pressure){ return sqrt((float)(GAMMA)*pressure/density); } __device__ __host__ inline void compute_flux_contribution(float density, Float3 momentum, float density_energy, float pressure, Float3 velocity, Float3* fc_momentum_x, Float3* fc_momentum_y, Float3* fc_momentum_z, Float3* fc_density_energy) { fc_momentum_x->x = velocity.x*momentum.x + pressure; fc_momentum_x->y = velocity.x*momentum.y; fc_momentum_x->z = velocity.x*momentum.z; fc_momentum_y->x = fc_momentum_x->y; fc_momentum_y->y = velocity.y*momentum.y + pressure; fc_momentum_y->z = velocity.y*momentum.z; fc_momentum_z->x = fc_momentum_x->z; fc_momentum_z->y = fc_momentum_y->z; fc_momentum_z->z = velocity.z*momentum.z + pressure; float de_p = density_energy+pressure; fc_density_energy->x = velocity.x*de_p; fc_density_energy->y = velocity.y*de_p; fc_density_energy->z = velocity.z*de_p; } void copy(float* dst, const float* src, int N){ hipMemcpy(dst, src, N*sizeof(float), hipMemcpyDeviceToDevice); } void dump(float *h_variables, int nel, int nelr){ { std::ofstream file("density"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl; } { std::ofstream file("momentum"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) { for(int j = 0; j != NDIM; j++) file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " "; file << std::endl; } } { std::ofstream file("density_energy"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl; } } __global__ void initialize_buffer(float *d, const float val, const int nelr) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); if (i < nelr) d[i] = val; } __global__ void initialize_variables(const int nelr, float* variables, const float* ff_variable) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); for(int j = 0; j < NVAR; j++) variables[i + j*nelr] = ff_variable[j]; } __global__ void compute_step_factor(const int nelr, float* variables, float* areas, float* step_factors){ const int i = (blockDim.x*blockIdx.x + threadIdx.x); if( i >= nelr) return; float density = variables[i + VAR_DENSITY*nelr]; Float3 momentum; momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy = variables[i + VAR_DENSITY_ENERGY*nelr]; Float3 velocity; compute_velocity(density, momentum, &velocity); float speed_sqd = compute_speed_sqd(velocity); float pressure = compute_pressure(density, density_energy, speed_sqd); float speed_of_sound = compute_speed_of_sound(density, pressure); step_factors[i] = (float)(0.5f) / (sqrt(areas[i]) * (sqrt(speed_sqd) + speed_of_sound)); } __global__ void compute_flux( int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* ff_variable, float* fluxes, Float3* ff_flux_contribution_density_energy, Float3* ff_flux_contribution_momentum_x, Float3* ff_flux_contribution_momentum_y, Float3* ff_flux_contribution_momentum_z){ const int i = (blockDim.x*blockIdx.x + threadIdx.x); if( i >= nelr) return; const float smoothing_coefficient = (float)(0.2f); int j, nb; Float3 normal; float normal_len; float factor; float density_i = variables[i + VAR_DENSITY*nelr]; Float3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; Float3 velocity_i; compute_velocity(density_i, momentum_i, &velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); //float speed_sqd_i; //compute_speed_sqd(velocity_i, speed_sqd_i); float speed_i = sqrt(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); Float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z; Float3 flux_contribution_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, &flux_contribution_i_momentum_x, &flux_contribution_i_momentum_y, &flux_contribution_i_momentum_z, &flux_contribution_i_density_energy); float flux_i_density = (float)(0.0f); Float3 flux_i_momentum; flux_i_momentum.x = (float)(0.0f); flux_i_momentum.y = (float)(0.0f); flux_i_momentum.z = (float)(0.0f); float flux_i_density_energy = (float)(0.0f); Float3 velocity_nb; float density_nb, density_energy_nb; Float3 momentum_nb; Float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z; Float3 flux_contribution_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < NNB; j++) { nb = elements_surrounding_elements[i + j*nelr]; normal.x = normals[i + (j + 0*NNB)*nelr]; normal.y = normals[i + (j + 1*NNB)*nelr]; normal.z = normals[i + (j + 2*NNB)*nelr]; normal_len = sqrt(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { density_nb = variables[nb + VAR_DENSITY*nelr]; momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; compute_velocity(density_nb, momentum_nb, &velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, &flux_contribution_nb_momentum_x, &flux_contribution_nb_momentum_y, &flux_contribution_nb_momentum_z, &flux_contribution_nb_density_energy); // artificial viscosity factor = -normal_len*smoothing_coefficient*(float)(0.5f)*(speed_i + sqrt(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = (float)(0.5f)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x); factor = (float)(0.5f)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y); factor = (float)(0.5f)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z); } else if(nb == -1) // a wing boundary { flux_i_momentum.x += normal.x*pressure_i; flux_i_momentum.y += normal.y*pressure_i; flux_i_momentum.z += normal.z*pressure_i; } else if(nb == -2) // a far field boundary { factor = (float)(0.5f)*normal.x; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x); factor = (float)(0.5f)*normal.y; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y); factor = (float)(0.5f)*normal.z; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z); } } fluxes[i + VAR_DENSITY*nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy; } __global__ void time_step(int j, int nelr, const float* old_variables, float* variables, const float* step_factors, const float* fluxes) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); if( i >= nelr) return; float factor = step_factors[i]/(float)(RK+1-j); variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr]; variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr]; variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr]; variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr]; variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr]; } /* * Main function */ int main(int argc, char** argv){ printf("WG size of kernel:initialize = %d\nWG size of kernel:compute_step_factor = %d\nWG size of kernel:compute_flux = %d\nWG size of kernel:time_step = %d\n", BLOCK_SIZE_1, BLOCK_SIZE_2, BLOCK_SIZE_3, BLOCK_SIZE_4); if (argc < 2){ std::cout << "Please specify data file name" << std::endl; return 0; } const char* data_file_name = argv[1]; float h_ff_variable[NVAR]; // set far field conditions and load them into constant memory on the gpu //{ const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack); h_ff_variable[VAR_DENSITY] = float(1.4); float ff_pressure = float(1.0f); float ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]); float ff_speed = float(ff_mach)*ff_speed_of_sound; Float3 ff_velocity; ff_velocity.x = ff_speed*float(cos((float)angle_of_attack)); ff_velocity.y = ff_speed*float(sin((float)angle_of_attack)); ff_velocity.z = 0.0f; h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x; h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y; h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z; h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(float(0.5f)*(ff_speed*ff_speed)) + (ff_pressure / float(GAMMA-1.0f)); Float3 h_ff_momentum; h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0); h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1); h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2); Float3 h_ff_flux_contribution_momentum_x; Float3 h_ff_flux_contribution_momentum_y; Float3 h_ff_flux_contribution_momentum_z; Float3 h_ff_flux_contribution_density_energy; compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, &h_ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_z, &h_ff_flux_contribution_density_energy); int nel; int nelr; std::ifstream file(data_file_name, std::ifstream::in); if(!file.good()){ throw(std::string("can not find/open file! ")+data_file_name); } file >> nel; nelr = block_length*((nel / block_length )+ std::min(1, nel % block_length)); std::cout<<"--cambine: nel="<<nel<<", nelr="<<nelr<<std::endl; float* h_areas = new float[nelr]; int* h_elements_surrounding_elements = new int[nelr*NNB]; float* h_normals = new float[nelr*NDIM*NNB]; // read in data for(int i = 0; i < nel; i++) { file >> h_areas[i]; for(int j = 0; j < NNB; j++) { file >> h_elements_surrounding_elements[i + j*nelr]; if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1; h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering for(int k = 0; k < NDIM; k++) { file >> h_normals[i + (j + k*NNB)*nelr]; h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr]; } } } // fill in remaining data int last = nel-1; for(int i = nel; i < nelr; i++) { h_areas[i] = h_areas[last]; for(int j = 0; j < NNB; j++) { // duplicate the last element h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr]; for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr]; } } float* h_variables = new float[nelr*NVAR]; #ifdef DEBUG float* h_step_factors = new float[nelr]; #endif double offload_start = get_time(); float *d_ff_variable; Float3 *d_ff_flux_contribution_momentum_x; Float3 *d_ff_flux_contribution_momentum_y; Float3 *d_ff_flux_contribution_momentum_z; Float3 *d_ff_flux_contribution_density_energy; hipMalloc((void**)&d_ff_variable, sizeof(float)*NVAR); hipMemcpy(d_ff_variable, h_ff_variable, sizeof(float)*NVAR, hipMemcpyHostToDevice); hipMalloc((void**)&d_ff_flux_contribution_momentum_x, sizeof(Float3)); hipMemcpy(d_ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(Float3), hipMemcpyHostToDevice); hipMalloc((void**)&d_ff_flux_contribution_momentum_y, sizeof(Float3)); hipMemcpy(d_ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(Float3), hipMemcpyHostToDevice); hipMalloc((void**)&d_ff_flux_contribution_momentum_z, sizeof(Float3)); hipMemcpy(d_ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(Float3), hipMemcpyHostToDevice); hipMalloc((void**)&d_ff_flux_contribution_density_energy, sizeof(Float3)); hipMemcpy(d_ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(Float3), hipMemcpyHostToDevice); float* d_areas; hipMalloc((void**)&d_areas, sizeof(float)*nelr); hipMemcpy(d_areas, h_areas, sizeof(float)*nelr, hipMemcpyHostToDevice); float* d_normals; hipMalloc((void**)&d_normals, sizeof(float)*nelr*NDIM*NNB); hipMemcpy(d_normals, h_normals, sizeof(float)*nelr*NDIM*NNB, hipMemcpyHostToDevice); int* d_elements_surrounding_elements; hipMalloc((void**)&d_elements_surrounding_elements, sizeof(int)*nelr*NNB); hipMemcpy(d_elements_surrounding_elements, h_elements_surrounding_elements, sizeof(int)*nelr*NNB, hipMemcpyHostToDevice); // Create arrays and set initial conditions float* d_variables; hipMalloc((void**)&d_variables, sizeof(float)*nelr*NVAR); float* d_old_variables; hipMalloc((void**)&d_old_variables, sizeof(float)*nelr*NVAR); float* d_fluxes; hipMalloc((void**)&d_fluxes, sizeof(float)*nelr*NVAR); float* d_step_factors; hipMalloc((void**)&d_step_factors, sizeof(float)*nelr); dim3 gridDim1 ((nelr + BLOCK_SIZE_1 - 1)/BLOCK_SIZE_1); dim3 gridDim2 ((nelr + BLOCK_SIZE_2 - 1)/BLOCK_SIZE_2); dim3 gridDim3 ((nelr + BLOCK_SIZE_3 - 1)/BLOCK_SIZE_3); dim3 gridDim4 ((nelr + BLOCK_SIZE_4 - 1)/BLOCK_SIZE_4); hipLaunchKernelGGL(initialize_variables, gridDim1, BLOCK_SIZE_1, 0, 0, nelr, d_variables, d_ff_variable); hipLaunchKernelGGL(initialize_variables, gridDim1, BLOCK_SIZE_1, 0, 0, nelr, d_old_variables, d_ff_variable); hipLaunchKernelGGL(initialize_variables, gridDim1, BLOCK_SIZE_1, 0, 0, nelr, d_fluxes, d_ff_variable); hipLaunchKernelGGL(initialize_buffer, gridDim1, BLOCK_SIZE_1, 0, 0, d_step_factors, 0, nelr); // Begin iterations for(int n = 0; n < iterations; n++){ copy(d_old_variables, d_variables, nelr*NVAR); // for the first iteration we compute the time step hipLaunchKernelGGL(compute_step_factor, gridDim2, BLOCK_SIZE_2, 0, 0, nelr, d_variables, d_areas, d_step_factors); #ifdef DEBUG hipMemcpy(h_step_factors, d_step_factors, sizeof(float)*nelr, cudaMemDeviceToHost); for (int i = 0; i < 16; i++) printf("step factor: i=%d %f\n", i, h_step_factors[i]); #endif for(int j = 0; j < RK; j++){ hipLaunchKernelGGL(compute_flux, gridDim3, BLOCK_SIZE_3, 0, 0, nelr, d_elements_surrounding_elements, d_normals, d_variables, d_ff_variable, d_fluxes, d_ff_flux_contribution_density_energy, \ d_ff_flux_contribution_momentum_x, d_ff_flux_contribution_momentum_y, d_ff_flux_contribution_momentum_z); hipLaunchKernelGGL(time_step, gridDim4, BLOCK_SIZE_4, 0, 0, j, nelr, d_old_variables, d_variables, d_step_factors, d_fluxes); } } hipMemcpy(h_variables, d_variables, sizeof(float)*nelr*NVAR, hipMemcpyDeviceToHost); hipFree(d_ff_variable); hipFree(d_ff_flux_contribution_momentum_x); hipFree(d_ff_flux_contribution_momentum_y); hipFree(d_ff_flux_contribution_momentum_z); hipFree(d_ff_flux_contribution_density_energy); hipFree(d_areas); hipFree(d_normals); hipFree(d_elements_surrounding_elements); hipFree(d_variables); hipFree(d_old_variables); hipFree(d_fluxes); hipFree(d_step_factors); double offload_end = get_time(); printf("Device offloading time = %lf(s)\n", offload_end - offload_start); #ifdef OUTPUT std::cout << "Saving solution..." << std::endl; dump(h_variables, nel, nelr); #endif delete[] h_areas; delete[] h_elements_surrounding_elements; delete[] h_normals; delete[] h_variables; #ifdef DEBUG delete[] h_step_factors; #endif std::cout << "Done..." << std::endl; return 0; }
the_stack
#ifdef __INTELLISENSE__ /* just for vstudio code colors */ #define __CUDA_ARCH__ 520 #endif #define TPB52 32 #define TPB50 16 #define Nrow 8 #define Ncol 8 #define memshift 3 __constant__ const uint2x4 blake2b_IV[2] = { 0xf3bcc908lu, 0x6a09e667lu, 0x84caa73blu, 0xbb67ae85lu, 0xfe94f82blu, 0x3c6ef372lu, 0x5f1d36f1lu, 0xa54ff53alu, 0xade682d1lu, 0x510e527flu, 0x2b3e6c1flu, 0x9b05688clu, 0xfb41bd6blu, 0x1f83d9ablu, 0x137e2179lu, 0x5be0cd19lu }; //#include "cuda_lyra2_sm2.cuh" #include "cuda_lyra2_sm5.cuh" #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ > 500 #include "cuda_vectors.h" #ifdef __INTELLISENSE__ /* just for vstudio code colors */ __device__ uint32_t __shfl(uint32_t a, uint32_t b, uint32_t c); #endif __device__ uint2 *DMatrix; __device__ void LD4S(uint2 res[3], const uint32_t row, const uint32_t col, const uint2* shared_mem) { const uint32_t s0 = (Ncol * row + col) * memshift; #pragma unroll 3 for (uint32_t j = 0; j < 3; j++) res[j] = shared_mem[((s0 + j) * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x]; } __device__ void ST4S(const uint32_t row, const uint32_t col, const uint2 data[3], uint2* shared_mem) { const uint32_t s0 = (Ncol * row + col) * memshift; #pragma unroll 3 for (uint32_t j = 0; j < 3; j++) shared_mem[((s0 + j) * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x] = data[j]; } __device__ __forceinline__ uint2 __shfl(uint2 a, uint32_t b, uint32_t c) { return make_uint2(__shfl(a.x, b, c), __shfl(a.y, b, c)); } __device__ __forceinline__ void __shfl3(uint2 &a1, uint2 &a2, uint2 &a3, uint32_t b1, uint32_t b2, uint32_t b3, uint32_t c) { a1 = __shfl(a1, b1, c); a2 = __shfl(a2, b2, c); a3 = __shfl(a3, b3, c); } static __device__ void Gfunc(uint2 &a, uint2 &b, uint2 &c, uint2 &d) { a+= b; d = SWAPUINT2(d^a); c+= d; b = ROR24(b^c); a+= b; d = ROR16(d^a); c+= d; b = ROR2(b^c, 63); } static __device__ void round_lyra(uint2 s[4]){ Gfunc(s[0], s[1], s[2], s[3]); __shfl3(s[1], s[2], s[3], threadIdx.x + 1, threadIdx.x + 2, threadIdx.x + 3, 4); Gfunc(s[0], s[1], s[2], s[3]); __shfl3(s[1], s[2], s[3], threadIdx.x + 3, threadIdx.x + 2, threadIdx.x + 1, 4); } static __device__ void round_lyra(uint2x4 *const __restrict__ s) { Gfunc(s[0].x, s[1].x, s[2].x, s[3].x); Gfunc(s[0].y, s[1].y, s[2].y, s[3].y); Gfunc(s[0].z, s[1].z, s[2].z, s[3].z); Gfunc(s[0].w, s[1].w, s[2].w, s[3].w); Gfunc(s[0].x, s[1].y, s[2].z, s[3].w); Gfunc(s[0].y, s[1].z, s[2].w, s[3].x); Gfunc(s[0].z, s[1].w, s[2].x, s[3].y); Gfunc(s[0].w, s[1].x, s[2].y, s[3].z); } static __device__ void reduceDuplex(uint2 state[4], uint2* shared) { uint2 state1[3]; #pragma unroll 8 for (int i = 0; i < Nrow; i++) { ST4S(0, Ncol - i - 1, state, shared); round_lyra(state); } #pragma unroll 8 for (int i = 0; i < Nrow; i++) { LD4S(state1, 0, i, shared); #pragma unroll 3 for (int j = 0; j < 3; j++) state[j] ^= state1[j]; round_lyra(state); #pragma unroll 3 for (int j = 0; j < 3; j++) state1[j] ^= state[j]; ST4S(1, Ncol - i - 1, state1, shared); } } static __device__ void reduceDuplexRowSetup(const int rowIn, const int rowInOut, const int rowOut, uint2 state[4], uint2* shared) { uint2 state1[3], state2[3]; #pragma unroll 1 for (uint32_t i = 0; i < Nrow; i++) { LD4S(state1, rowIn, i, shared); LD4S(state2, rowInOut, i, shared); for (int j = 0; j < 3; j++) state[j] ^= state1[j] + state2[j]; round_lyra(state); #pragma unroll 3 for (int j = 0; j < 3; j++) state1[j] ^= state[j]; ST4S(rowOut, Ncol - i - 1, state1, shared); //一個手前のスレッドからデータを貰う(同時に一個先のスレッドにデータを送る) uint2 Data0 = state[0]; uint2 Data1 = state[1]; uint2 Data2 = state[2]; __shfl3(Data0, Data1, Data2, threadIdx.x - 1, threadIdx.x - 1, threadIdx.x - 1, 4); if (threadIdx.x == 0) { state2[0] ^= Data2; state2[1] ^= Data0; state2[2] ^= Data1; } else { state2[0] ^= Data0; state2[1] ^= Data1; state2[2] ^= Data2; } ST4S(rowInOut, i, state2, shared); } } static __device__ void reduceDuplexRowt(const int rowIn, const int rowInOut, const int rowOut, uint2 state[4], uint2* shared) { for (uint32_t i = 0; i < Nrow; i++) { uint2 state1[3], state2[3]; LD4S(state1, rowIn, i, shared); LD4S(state2, rowInOut, i, shared); #pragma unroll for (int j = 0; j < 3; j++) state[j] ^= state1[j] + state2[j]; round_lyra(state); //一個手前のスレッドからデータを貰う(同時に一個先のスレッドにデータを送る) uint2 Data0 = state[0]; uint2 Data1 = state[1]; uint2 Data2 = state[2]; __shfl3(Data0, Data1, Data2, threadIdx.x - 1, threadIdx.x - 1, threadIdx.x - 1, 4); if (threadIdx.x == 0) { state2[0] ^= Data2; state2[1] ^= Data0; state2[2] ^= Data1; } else { state2[0] ^= Data0; state2[1] ^= Data1; state2[2] ^= Data2; } ST4S(rowInOut, i, state2, shared); LD4S(state1, rowOut, i, shared); #pragma unroll for (int j = 0; j < 3; j++) state1[j] ^= state[j]; ST4S(rowOut, i, state1, shared); } } static __device__ void reduceDuplexRowt_8(const int rowInOut, uint2* state, uint2* shared) { uint2 state1[3], state2[3], last[3]; LD4S(state1, 2, 0, shared); LD4S(last, rowInOut, 0, shared); #pragma unroll 3 for (int j = 0; j < 3; j++) state[j] ^= state1[j] + last[j]; round_lyra(state); //一個手前のスレッドからデータを貰う(同時に一個先のスレッドにデータを送る) uint2 Data0 = state[0]; uint2 Data1 = state[1]; uint2 Data2 = state[2]; __shfl3(Data0, Data1, Data2, threadIdx.x - 1, threadIdx.x - 1, threadIdx.x - 1, 4); if (threadIdx.x == 0) { last[0] ^= Data2; last[1] ^= Data0; last[2] ^= Data1; } else { last[0] ^= Data0; last[1] ^= Data1; last[2] ^= Data2; } if (rowInOut == 5) { #pragma unroll 3 for (int j = 0; j < 3; j++) last[j] ^= state[j]; } #pragma unroll 1 for (uint32_t i = 1; i < Nrow; i++) { LD4S(state1, 2, i, shared); LD4S(state2, rowInOut, i, shared); #pragma unroll 3 for (int j = 0; j < 3; j++) state[j] ^= state1[j] + state2[j]; round_lyra(state); } #pragma unroll 3 for (int j = 0; j < 3; j++) state[j] ^= last[j]; } __global__ __launch_bounds__(512, 1) void lyra2_gpu_hash_32_1(uint32_t threads,const uint2* __restrict__ g_hash) { const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); if (thread < threads) { uint2x4 state[4]; state[0].x = state[1].x = __ldg(&g_hash[thread + threads * 0]); state[0].y = state[1].y = __ldg(&g_hash[thread + threads * 1]); state[0].z = state[1].z = __ldg(&g_hash[thread + threads * 2]); state[0].w = state[1].w = __ldg(&g_hash[thread + threads * 3]); state[2] = blake2b_IV[0]; state[3] = blake2b_IV[1]; for (uint32_t i = 0; i<24; i++) round_lyra(state); //because 12 is not enough ((uint2x4*)DMatrix)[threads * 0 + thread] = state[0]; ((uint2x4*)DMatrix)[threads * 1 + thread] = state[1]; ((uint2x4*)DMatrix)[threads * 2 + thread] = state[2]; ((uint2x4*)DMatrix)[threads * 3 + thread] = state[3]; } } __global__ __launch_bounds__(TPB52, 1) void lyra2_gpu_hash_32_2(uint32_t threads) { const uint32_t thread = blockDim.y * blockIdx.x + threadIdx.y; __shared__ uint2 shared[192*TPB52]; if (thread < threads) { uint2 state[4]; state[0] = __ldg(&DMatrix[(0 * threads + thread) * blockDim.x + threadIdx.x]); state[1] = __ldg(&DMatrix[(1 * threads + thread) * blockDim.x + threadIdx.x]); state[2] = __ldg(&DMatrix[(2 * threads + thread) * blockDim.x + threadIdx.x]); state[3] = __ldg(&DMatrix[(3 * threads + thread) * blockDim.x + threadIdx.x]); reduceDuplex(state,shared); reduceDuplexRowSetup(1, 0, 2, state,shared); reduceDuplexRowSetup(2, 1, 3, state,shared); for(int i=3;i<7;i++){ reduceDuplexRowSetup(i, 8%(i+1), i+1, state,shared); } uint32_t rowa = __shfl(state[0].x, 0, 4) & 7; uint32_t prev = 7; for(int i=0;i<21;i+=3){ reduceDuplexRowt(prev, rowa, i&7, state,shared); prev = i&7; rowa = __shfl(state[0].x, 0, 4) & 7; } reduceDuplexRowt_8(rowa, state,shared); DMatrix[(0 * threads + thread) * blockDim.x + threadIdx.x] = state[0]; DMatrix[(1 * threads + thread) * blockDim.x + threadIdx.x] = state[1]; DMatrix[(2 * threads + thread) * blockDim.x + threadIdx.x] = state[2]; DMatrix[(3 * threads + thread) * blockDim.x + threadIdx.x] = state[3]; } } __global__ __launch_bounds__(512, 1) void lyra2_gpu_hash_32_3(uint32_t threads, uint2 *g_hash) { const uint32_t thread = blockDim.x * blockIdx.x + threadIdx.x; uint2x4 state[4]; if (thread < threads) { state[0] = __ldg4(&((uint2x4*)DMatrix)[threads * 0 + thread]); state[1] = __ldg4(&((uint2x4*)DMatrix)[threads * 1 + thread]); state[2] = __ldg4(&((uint2x4*)DMatrix)[threads * 2 + thread]); state[3] = __ldg4(&((uint2x4*)DMatrix)[threads * 3 + thread]); for (uint32_t i = 0; i < 12; i++) round_lyra(state); g_hash[thread + threads * 0] = state[0].x; g_hash[thread + threads * 1] = state[0].y; g_hash[thread + threads * 2] = state[0].z; g_hash[thread + threads * 3] = state[0].w; } //thread } #else #if __CUDA_ARCH__ < 500 /* for unsupported SM arch */ __device__ void* DMatrix; #endif __global__ void lyra2_gpu_hash_32_1(uint32_t threads, const uint2* __restrict__ g_hash) {} __global__ void lyra2_gpu_hash_32_2(uint32_t threads) {} __global__ void lyra2_gpu_hash_32_3(uint32_t threads, uint2 *g_hash) {} #endif __host__ void lyra2_cpu_init(int thr_id, uint32_t threads, uint2* d_matrix) { // just assign the device pointer allocated in main loop cudaMemcpyToSymbol(DMatrix, &d_matrix, sizeof(uint64_t*), 0, cudaMemcpyHostToDevice); } __host__ void lyra2_cpu_hash_32(int thr_id, uint32_t threads, uint2* d_hash) { int dev_id = device_map[thr_id]; if (device_sm[dev_id] >= 520) { dim3 grid1((threads * 4 + TPB52 - 1) / TPB52); dim3 block1(4, TPB52 >> 2); dim3 grid2((threads + 512 - 1) / 64); dim3 block2(512); lyra2_gpu_hash_32_1 << <grid2, block2 >> > (threads, d_hash); lyra2_gpu_hash_32_2 << <grid1, block1>> > (threads); lyra2_gpu_hash_32_3 << <grid2, block2 >> > (threads, d_hash); } else{ dim3 grid((threads + TPB50 - 1) / TPB50); dim3 block(TPB50); lyra2_gpu_hash_32 <<< grid, block >>> (threads, d_hash); } }
the_stack
/** \addtogroup cudpp_app * */ /** @name Segmented Scan Functions * @{ */ #include <cstdlib> #include <cstdio> #include <assert.h> #include "cuda_util.h" #include "cudpp.h" #include "cudpp_util.h" #include "cudpp_plan.h" #include "cudpp_manager.h" #include "kernel/segmented_scan_kernel.cuh" #include "kernel/vector_kernel.cuh" /** @brief Perform recursive scan on arbitrary size arrays * * This is the CPU-side workhorse function of the segmented scan * engine. This function invokes the CUDA kernels which perform the * segmented scan on individual blocks. * * Scans of large arrays must be split (possibly recursively) into a * hierarchy of block scans, where each block is scanned by a single * CUDA thread block. At each recursive level of the * segmentedScanArrayRecursive first invokes a kernel to scan all blocks of * that level, and if the level has more than one block, it calls * itself recursively. On returning from each recursive level, the * total sum of each block from the level below is added to all * elements of the first segment of the corresponding block in this * level. * * Template parameter T is the data type of the input data. * Template parameter op is the binary operator of the segmented scan. * Template parameter isBackward specifies whether the direction is backward * (not implemented). It is forward if it is false. * Template parameter isExclusive specifies whether the segmented scan * is exclusive (true) or inclusive (false). * * @param[out] d_out The output array for the segmented scan results * @param[in] d_idata The input array to be scanned * @param[in] d_iflags The input flags vector which specifies the * segments. The first element of a segment is marked by a 1 in the * corresponding position in d_iflags vector. All other elements of * d_iflags is 0. * @param[out] d_blockSums Array of arrays of per-block sums (one * array per recursive level, allocated * by allocScanStorage()) * @param[out] d_blockFlags Array of arrays of per-block OR-reductions * of flags (one array per recursive level, allocated by * allocScanStorage()) * @param[out] d_blockIndices Array of arrays of per-block * min-reductions of indices (one array per recursive level, allocated * by allocSegmentedScanStorage()). An index for a particular position \c i in * a block is calculated as - if \c d_iflags[i] is set then it is the * 1-based index of that position (i.e if \c d_iflags[10] is set then * index is \c 11) otherwise the index is \c INT_MAX (the identity * element of a min operator) * @param[in] numElements The number of elements in the array to scan * @param[in] level The current recursive level of the scan * @param[in] sm12OrBetterHw True if running on sm_12 or higher GPU, false otherwise */ template <typename T, class Op, bool isBackward, bool isExclusive, bool doShiftFlagsLeft> void segmentedScanArrayRecursive(T *d_out, const T *d_idata, const unsigned int *d_iflags, T **d_blockSums, unsigned int **d_blockFlags, unsigned int **d_blockIndices, int numElements, int level, bool sm12OrBetterHw) { unsigned int numBlocks = max(1, (int)ceil((double)numElements / ((double)SEGSCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE))); // This is the number of elements per block that the // CTA level API is aware of unsigned int numEltsPerBlock = SCAN_CTA_SIZE * 2; // Space to store flags - we need two sets. One gets modified and the // other doesn't unsigned int flagSpace = numEltsPerBlock * sizeof(unsigned int); // Space to store indices unsigned int idxSpace = numEltsPerBlock * sizeof(unsigned int); // Total shared memory space unsigned int sharedMemSize = sizeof(T) * (numEltsPerBlock) + idxSpace + flagSpace; // setup execution parameters dim3 grid(max(1, numBlocks), 1, 1); dim3 threads(SCAN_CTA_SIZE, 1, 1); // make sure there are no CUDA errors before we start CUDA_CHECK_ERROR("segmentedScanArrayRecursive before kernels"); bool fullBlock = (numElements == (numBlocks * SEGSCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE)); unsigned int traitsCode = 0; if (numBlocks > 1) traitsCode |= 1; if (fullBlock) traitsCode |= 2; if (sm12OrBetterHw) traitsCode |= 4; switch(traitsCode) { case 0: // single block, single row, non-full last block segmentedScan4<T, SegmentedScanTraits<T, Op, isBackward, isExclusive, doShiftFlagsLeft, false, false, false> > <<< grid, threads, sharedMemSize >>> (d_out, d_idata, d_iflags, numElements, 0, 0, 0); break; case 1: // multi block, single row, non-full last block segmentedScan4<T, SegmentedScanTraits<T, Op, isBackward, isExclusive, doShiftFlagsLeft, false, true, false> > <<< grid, threads, sharedMemSize >>> (d_out, d_idata, d_iflags, numElements, d_blockSums[level], d_blockFlags[level], d_blockIndices[level]); break; case 2: // single block, single row, full last block segmentedScan4<T, SegmentedScanTraits<T, Op, isBackward, isExclusive, doShiftFlagsLeft, true, false, false> > <<< grid, threads, sharedMemSize >>> (d_out, d_idata, d_iflags, numElements, 0, 0, 0); break; case 3: // multi block, single row, full last block segmentedScan4<T, SegmentedScanTraits<T, Op, isBackward, isExclusive, doShiftFlagsLeft, true, true, false> > <<< grid, threads, sharedMemSize >>> (d_out, d_idata, d_iflags, numElements, d_blockSums[level], d_blockFlags[level], d_blockIndices[level]); break; case 4: // single block, single row, non-full last block segmentedScan4<T, SegmentedScanTraits<T, Op, isBackward, isExclusive, doShiftFlagsLeft, false, false, true> > <<< grid, threads, sharedMemSize >>> (d_out, d_idata, d_iflags, numElements, 0, 0, 0); break; case 5: // multi block, single row, non-full last block segmentedScan4<T, SegmentedScanTraits<T, Op, isBackward, isExclusive, doShiftFlagsLeft, false, true, true> > <<< grid, threads, sharedMemSize >>> (d_out, d_idata, d_iflags, numElements, d_blockSums[level], d_blockFlags[level], d_blockIndices[level]); break; case 6: // single block, single row, full last block segmentedScan4<T, SegmentedScanTraits<T, Op, isBackward, isExclusive, doShiftFlagsLeft, true, false, true> > <<< grid, threads, sharedMemSize >>> (d_out, d_idata, d_iflags, numElements, 0, 0, 0); break; case 7: // multi block, single row, full last block segmentedScan4<T, SegmentedScanTraits<T, Op, isBackward, isExclusive, doShiftFlagsLeft, true, true, true> > <<< grid, threads, sharedMemSize >>> (d_out, d_idata, d_iflags, numElements, d_blockSums[level], d_blockFlags[level], d_blockIndices[level]); break; } CUDA_CHECK_ERROR("segmentedScanArrayRecursive after block level scans"); if (numBlocks > 1) { // After scanning all the sub-blocks, we are mostly done. But // now we need to take all of the last values of the // sub-blocks and segment scan those. This will give us a new value // that must be sdded to the first segment of each block to get // the final results. segmentedScanArrayRecursive<T, Op, isBackward, false, false> ((T*)d_blockSums[level], (const T*)d_blockSums[level], d_blockFlags[level], (T **)d_blockSums, d_blockFlags, d_blockIndices, numBlocks, level + 1, sm12OrBetterHw); if (isBackward) { if (fullBlock) vectorSegmentedAddUniformToRight4<T, Op, true><<<grid, threads>>> (d_out, d_blockSums[level], d_blockIndices[level], numElements, 0, 0); else vectorSegmentedAddUniformToRight4<T, Op, false><<<grid, threads>>> (d_out, d_blockSums[level], d_blockIndices[level], numElements, 0, 0); } else { if (fullBlock) vectorSegmentedAddUniform4<T, Op, true><<<grid, threads>>> (d_out, d_blockSums[level], d_blockIndices[level], numElements, 0, 0); else vectorSegmentedAddUniform4<T, Op, false><<<grid, threads>>> (d_out, d_blockSums[level], d_blockIndices[level], numElements, 0, 0); } CUDA_CHECK_ERROR("vectorSegmentedAddUniform4"); } } #ifdef __cplusplus extern "C" { #endif // file scope /** @brief Allocate intermediate block sums, block flags and block * indices arrays in a CUDPPSegmentedScanPlan class. * * Segmented scans of large arrays must be split (possibly * recursively) into a hierarchy of block segmented scans, where each * block is scanned by a single CUDA thread block. At each recursive * level of the scan, we need an array in which to store the total * sums of all blocks in that level. Also at this level we have two * more arrays - one which contains the OR-reductions of flags of all * blocks at that level and the second which contains the * min-reductions of indices of all blocks at that levels This * function computes the amount of storage needed and allocates it. * * @param[in] plan Pointer to CUDPPSegmentedScanPlan object containing segmented scan * options and number of elements, which is used to compute storage * requirements. */ void allocSegmentedScanStorage(CUDPPSegmentedScanPlan *plan) { plan->m_numEltsAllocated = plan->m_numElements; size_t numElts = plan->m_numElements; size_t level = 0; do { size_t numBlocks = max(1, (unsigned int)ceil ((double)numElts / ((double)SEGSCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE))); if (numBlocks > 1) { level++; } numElts = numBlocks; } while (numElts > 1); size_t elementSize = 0; switch(plan->m_config.datatype) { case CUDPP_INT: plan->m_blockSums = (void**) malloc(level * sizeof(int*)); elementSize = sizeof(int); break; case CUDPP_UINT: plan->m_blockSums = (void**) malloc(level * sizeof(unsigned int*)); elementSize = sizeof(unsigned int); break; case CUDPP_FLOAT: plan->m_blockSums = (void**) malloc(level * sizeof(float*)); elementSize = sizeof(float); break; case CUDPP_DOUBLE: plan->m_blockSums = (void**) malloc(level * sizeof(double*)); elementSize = sizeof(double); break; case CUDPP_LONGLONG: plan->m_blockSums = (void**) malloc(level * sizeof(long long*)); elementSize = sizeof(long long); break; case CUDPP_ULONGLONG: plan->m_blockSums = (void**) malloc(level * sizeof(unsigned long long*)); elementSize = sizeof(unsigned long long); break; default: break; } plan->m_blockFlags = (unsigned int**) malloc(level * sizeof(unsigned int*)); plan->m_blockIndices = (unsigned int**) malloc(level * sizeof(unsigned int*)); plan->m_numLevelsAllocated = level; numElts = plan->m_numElements; level = 0; do { size_t numBlocks = max(1, (unsigned int)ceil((double)numElts / ((double)SEGSCAN_ELTS_PER_THREAD * SCAN_CTA_SIZE))); if (numBlocks > 1) { CUDA_SAFE_CALL(cudaMalloc((void**) &(plan->m_blockSums[level]), numBlocks * elementSize)); CUDA_SAFE_CALL(cudaMalloc((void**) &(plan->m_blockFlags[level]), numBlocks * sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMalloc((void**) &(plan->m_blockIndices[level]), numBlocks * sizeof(unsigned int))); level++; } numElts = numBlocks; } while (numElts > 1); CUDA_CHECK_ERROR("allocSegmentedScanStorage"); } /** @brief Deallocate intermediate block sums, block flags and block * indices arrays in a CUDPPSegmentedScanPlan class. * * These arrays must have been allocated by allocSegmentedScanStorage(), * which is called by the constructor of CUDPPSegmentedScanPlan. * * @param[in] plan CUDPPSegmentedScanPlan class initialized by its constructor. */ void freeSegmentedScanStorage(CUDPPSegmentedScanPlan *plan) { for (unsigned int i = 0; i < plan->m_numLevelsAllocated; i++) { cudaFree(plan->m_blockSums[i]); cudaFree(plan->m_blockFlags[i]); cudaFree(plan->m_blockIndices[i]); } CUDA_CHECK_ERROR("freeSegmentedScanStorage"); free((void**)plan->m_blockSums); free((void**)plan->m_blockFlags); free((void**)plan->m_blockIndices); plan->m_blockSums = 0; plan->m_blockFlags = 0; plan->m_blockIndices = 0; plan->m_numEltsAllocated = 0; plan->m_numLevelsAllocated = 0; } #ifdef __cplusplus } #endif template <typename T, bool isBackward, bool isExclusive> void cudppSegmentedScanDispatchOperator(void *d_out, const void *d_in, const unsigned int *d_iflags, int numElements, const CUDPPSegmentedScanPlan *plan ) { cudaDeviceProp deviceProp; plan->m_planManager->getDeviceProps(deviceProp); bool sm12OrBetterHw = false; if ((deviceProp.major * 10 + deviceProp.minor) >= 12) sm12OrBetterHw = true; switch(plan->m_config.op) { case CUDPP_MAX: segmentedScanArrayRecursive<T, OperatorMax<T>, isBackward, isExclusive, isBackward> ((T *)d_out, (const T *)d_in, d_iflags, (T **)plan->m_blockSums, plan->m_blockFlags, plan->m_blockIndices, numElements, 0, sm12OrBetterHw); break; case CUDPP_ADD: segmentedScanArrayRecursive<T, OperatorAdd<T>, isBackward, isExclusive, isBackward> ((T *)d_out, (const T *)d_in, d_iflags, (T **)plan->m_blockSums, plan->m_blockFlags, plan->m_blockIndices, numElements, 0, sm12OrBetterHw); break; case CUDPP_MULTIPLY: segmentedScanArrayRecursive<T, OperatorMultiply<T>, isBackward, isExclusive, isBackward> ((T *)d_out, (const T *)d_in, d_iflags, (T **)plan->m_blockSums, plan->m_blockFlags, plan->m_blockIndices, numElements, 0, sm12OrBetterHw); break; case CUDPP_MIN: segmentedScanArrayRecursive<T, OperatorMin<T>, isBackward, isExclusive, isBackward> ((T *)d_out, (const T *)d_in, d_iflags, (T **)plan->m_blockSums, plan->m_blockFlags, plan->m_blockIndices, numElements, 0, sm12OrBetterHw); break; default: break; } } template <bool isBackward, bool isExclusive> void cudppSegmentedScanDispatchType(void *d_out, const void *d_in, const unsigned int *d_iflags, int numElements, const CUDPPSegmentedScanPlan *plan ) { switch(plan->m_config.datatype) { case CUDPP_INT: cudppSegmentedScanDispatchOperator<int, isBackward, isExclusive> (d_out, d_in, d_iflags, numElements, plan); break; case CUDPP_UINT: cudppSegmentedScanDispatchOperator<unsigned int, isBackward, isExclusive> (d_out, d_in, d_iflags, numElements, plan); break; case CUDPP_FLOAT: cudppSegmentedScanDispatchOperator<float, isBackward, isExclusive> (d_out, d_in, d_iflags, numElements, plan); break; case CUDPP_DOUBLE: cudppSegmentedScanDispatchOperator<double, isBackward, isExclusive> (d_out, d_in, d_iflags, numElements, plan); break; case CUDPP_LONGLONG: cudppSegmentedScanDispatchOperator<long long, isBackward, isExclusive> (d_out, d_in, d_iflags, numElements, plan); break; case CUDPP_ULONGLONG: cudppSegmentedScanDispatchOperator<unsigned long long, isBackward, isExclusive> (d_out, d_in, d_iflags, numElements, plan); break; default: break; } } #ifdef __cplusplus extern "C" { #endif /** @brief Dispatch function to perform a scan (prefix sum) on an * array with the specified configuration. * * This is the dispatch routine which calls segmentedScanArrayRecursive() with * appropriate template parameters and arguments to achieve the scan as * specified in \a plan. * * @param[in] numElements The number of elements to scan * @param[in] plan Segmented Scan configuration (plan), initialized * by CUDPPSegmentedScanPlan constructor * @param[in] d_in The input array * @param[in] d_iflags The input flags array * @param[out] d_out The output array of segmented scan results */ void cudppSegmentedScanDispatch (void *d_out, const void *d_in, const unsigned int *d_iflags, int numElements, const CUDPPSegmentedScanPlan *plan ) { if (CUDPP_OPTION_EXCLUSIVE & plan->m_config.options) { if (CUDPP_OPTION_BACKWARD & plan->m_config.options) { cudppSegmentedScanDispatchType<true, true>(d_out, d_in, d_iflags, numElements, plan); } else { cudppSegmentedScanDispatchType<false, true>(d_out, d_in, d_iflags, numElements, plan); } } else { if (CUDPP_OPTION_BACKWARD & plan->m_config.options) { cudppSegmentedScanDispatchType<true, false>(d_out, d_in, d_iflags, numElements, plan); } else { cudppSegmentedScanDispatchType<false, false>(d_out, d_in, d_iflags, numElements, plan); } } } #ifdef __cplusplus } #endif /** @} */ // end segmented scan functions /** @} */ // end cudpp_app
the_stack
#include <sobfu/cuda/utils.hpp> /* cuda includes */ #include <curand.h> #include <curand_kernel.h> /* thrust includes */ #include <thrust/device_vector.h> #include <thrust/extrema.h> using namespace kfusion::device; /////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Volume initialization namespace kfusion { namespace device { __global__ void clear_volume_kernel(TsdfVolume tsdf) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < tsdf.dims.x && y < tsdf.dims.y) { float2* beg = tsdf.beg(x, y); float2* end = beg + tsdf.dims.x * tsdf.dims.y * tsdf.dims.z; for (float2* pos = beg; pos != end; pos = tsdf.zstep(pos)) *pos = make_float2(0.f, 0.f); } } } // namespace device } // namespace kfusion void kfusion::device::clear_volume(TsdfVolume& volume) { dim3 block(64, 16); dim3 grid(1, 1, 1); grid.x = divUp(volume.dims.x, block.x); grid.y = divUp(volume.dims.y, block.y); clear_volume_kernel<<<grid, block>>>(volume); cudaSafeCall(cudaGetLastError()); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Volume integration namespace kfusion { namespace device { texture<float, 2> dists_tex(0, cudaFilterModePoint, cudaAddressModeBorder, cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat)); struct TsdfIntegrator { Projector proj; Aff3f vol2cam; int2 dists_size; __kf_device__ void operator()(TsdfVolume& volume) const { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= volume.dims.x || y >= volume.dims.y) { return; } float3 vc = make_float3(x * volume.voxel_size.x + volume.voxel_size.x / 2.f, y * volume.voxel_size.y + volume.voxel_size.y / 2.f, volume.voxel_size.z / 2.f); float3 vc_cam = vol2cam * vc; /* transfrom from volume coos to camera coos */ float3 zstep = make_float3(0.f, 0.f, volume.voxel_size.z); float2* vptr = volume.beg(x, y); for (int i = 0; i <= volume.dims.z - 1; ++i, vc_cam += zstep, vptr = volume.zstep(vptr)) { /* project the voxel centre onto the depth map */ float2 coo = proj(vc_cam); if (coo.x < 0 || coo.y < 0 || coo.x >= dists_size.x || coo.y >= dists_size.y) { continue; } float Dp = tex2D(dists_tex, coo.x, coo.y); if (Dp <= 0.f || vc_cam.z <= 0) { continue; } /* get the psdf value */ float psdf = Dp - vc_cam.z; /* get the weight */ float weight = (psdf > -volume.eta) ? 1.f : 0.f; if (psdf >= volume.trunc_dist) { *vptr = make_float2(1.f, weight); } else if (psdf <= -volume.trunc_dist) { *vptr = make_float2(-1.f, weight); } else { *vptr = make_float2(__fdividef(psdf, volume.trunc_dist), weight); } } } __kf_device__ void operator()(TsdfVolume& phi_global, TsdfVolume& phi_n_psi) const { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= phi_global.dims.x || y >= phi_global.dims.y) { return; } float2* pos_global = phi_global.beg(x, y); float2* pos_n_psi = phi_n_psi.beg(x, y); for (int i = 0; i <= phi_global.dims.z - 1; ++i, pos_global = phi_global.zstep(pos_global), pos_n_psi = phi_n_psi.zstep(pos_n_psi)) { float2 tsdf = *pos_n_psi; if (tsdf.y == 0.f || (tsdf.y == 1.f && (tsdf.x == 0.f || tsdf.x == -1.f))) { continue; } float2 tsdf_prev = *pos_global; float tsdf_new = __fdividef(__fmaf_rn(tsdf_prev.y, tsdf_prev.x, tsdf.x), tsdf_prev.y + 1.f); float weight_new = fminf(tsdf_prev.y + 1.f, (float) phi_global.max_weight); /* pack and write */ *pos_global = make_float2(tsdf_new, weight_new); } } }; // namespace device __global__ void integrate_kernel(const TsdfIntegrator integrator, TsdfVolume volume) { integrator(volume); } __global__ void integrate_kernel(const TsdfIntegrator integrator, TsdfVolume phi_global, TsdfVolume phi_n_psi) { integrator(phi_global, phi_n_psi); }; } // namespace device } // namespace kfusion void kfusion::device::integrate(const PtrStepSz<float>& dists, TsdfVolume& volume, const Aff3f& aff, const Projector& proj) { /* init tsdf */ TsdfIntegrator ti; ti.dists_size = make_int2(dists.cols, dists.rows); ti.vol2cam = aff; ti.proj = proj; dists_tex.filterMode = cudaFilterModePoint; dists_tex.addressMode[0] = cudaAddressModeBorder; dists_tex.addressMode[1] = cudaAddressModeBorder; dists_tex.addressMode[2] = cudaAddressModeBorder; TextureBinder binder(dists, dists_tex, cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat)); (void) binder; dim3 block(64, 16); dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y)); integrate_kernel<<<grid, block>>>(ti, volume); cudaSafeCall(cudaGetLastError()); cudaSafeCall(cudaDeviceSynchronize()); } void kfusion::device::integrate(TsdfVolume& phi_global, TsdfVolume& phi_n_psi) { TsdfIntegrator ti; dim3 block(64, 16); dim3 grid(divUp(phi_global.dims.x, block.x), divUp(phi_global.dims.y, block.y)); integrate_kernel<<<grid, block>>>(ti, phi_global, phi_n_psi); cudaSafeCall(cudaGetLastError()); cudaSafeCall(cudaDeviceSynchronize()); } namespace kfusion { namespace device { //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// signed distance fields for various primitives __global__ void init_box_kernel(TsdfVolume volume, const float3 b) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= volume.dims.x || y >= volume.dims.y) return; /* centering */ float3 c = make_float3(volume.dims.x / 2.f * volume.voxel_size.x, volume.dims.y / 2.f * volume.voxel_size.y, volume.dims.z / 2.f * volume.voxel_size.z); float3 vc = make_float3(x * volume.voxel_size.x + volume.voxel_size.x / 2.f, y * volume.voxel_size.y + volume.voxel_size.y / 2.f, volume.voxel_size.z / 2.f) - c; float3 zstep = make_float3(0.f, 0.f, volume.voxel_size.z); float2* vptr = volume.beg(x, y); for (int i = 0; i < volume.dims.z; vc += zstep, vptr = volume.zstep(vptr), ++i) { float3 d = make_float3(fabs(vc.x), fabs(vc.y), fabs(vc.z)) - b; float sdf = fmin(fmax(d.x, fmax(d.y, d.z)), 0.f) + norm(make_float3(fmax(d.x, 0.f), fmax(d.y, 0.f), fmax(d.z, 0.f))); float weight = 1.f; if (sdf >= volume.trunc_dist) { *vptr = make_float2(1.f, weight); } else if (sdf <= -volume.trunc_dist) { *vptr = make_float2(-1.f, weight); } else { *vptr = make_float2(__fdividef(sdf, volume.trunc_dist), weight); } } } __global__ void init_ellipsoid_kernel(TsdfVolume volume, const float3 r) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= volume.dims.x || y >= volume.dims.y) return; /* centering */ float3 c = make_float3(volume.dims.x / 2.f * volume.voxel_size.x, volume.dims.y / 2.f * volume.voxel_size.y, volume.dims.z / 2.f * volume.voxel_size.z); float3 vc = make_float3(x * volume.voxel_size.x + volume.voxel_size.x / 2.f, y * volume.voxel_size.y + volume.voxel_size.y / 2.f, volume.voxel_size.z / 2.f) - c; float3 zstep = make_float3(0.f, 0.f, volume.voxel_size.z); float2* vptr = volume.beg(x, y); for (int i = 0; i < volume.dims.z; vc += zstep, vptr = volume.zstep(vptr), ++i) { float k0 = norm(make_float3(vc.x / r.x, vc.y / r.y, vc.z / r.z)); float k1 = norm(make_float3(vc.x / (r.x * r.x), vc.y / (r.y * r.y), vc.z / (r.z * r.z))); float sdf = k0 * (k0 - 1.f) / k1; float weight = 1.f; if (sdf >= volume.trunc_dist) { *vptr = make_float2(1.f, weight); } else if (sdf <= -volume.trunc_dist) { *vptr = make_float2(-1.f, weight); } else { *vptr = make_float2(__fdividef(sdf, volume.trunc_dist), weight); } } } __global__ void init_sphere_kernel(TsdfVolume volume, float3 centre, float radius) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= volume.dims.x || y >= volume.dims.y) return; float3 vc = make_float3(x * volume.voxel_size.x + volume.voxel_size.x / 2.f, y * volume.voxel_size.y + volume.voxel_size.y / 2.f, volume.voxel_size.z / 2.f); float3 zstep = make_float3(0.f, 0.f, volume.voxel_size.z); float2* vptr = volume.beg(x, y); for (int i = 0; i < volume.dims.z; vc += zstep, vptr = volume.zstep(vptr), ++i) { float d = sqrtf(powf(vc.x - centre.x, 2) + powf(vc.y - centre.y, 2) + powf(vc.z - centre.z, 2)); float sdf = d - radius; float weight = (sdf > -volume.eta) ? 1.f : 0.f; if (sdf >= volume.trunc_dist) { *vptr = make_float2(1.f, weight); } else if (sdf <= -volume.trunc_dist) { *vptr = make_float2(-1.f, weight); } else { *vptr = make_float2(__fdividef(sdf, volume.trunc_dist), weight); } } } __global__ void init_plane_kernel(TsdfVolume volume, float z) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= volume.dims.x || y >= volume.dims.y) return; float3 vc = make_float3(x * volume.voxel_size.x + volume.voxel_size.x / 2.f, y * volume.voxel_size.y + volume.voxel_size.y / 2.f, volume.voxel_size.z / 2.f); float3 zstep = make_float3(0.f, 0.f, volume.voxel_size.z); float2* vptr = volume.beg(x, y); for (int i = 0; i < volume.dims.z; vc += zstep, vptr = volume.zstep(vptr), ++i) { float sdf = vc.z - z; float weight = 1.f; if (sdf >= volume.trunc_dist) { *vptr = make_float2(1.f, weight); } else if (sdf <= -volume.trunc_dist) { *vptr = make_float2(-1.f, weight); } else { *vptr = make_float2(__fdividef(sdf, volume.trunc_dist), weight); } } } __global__ void init_torus_kernel(TsdfVolume volume, const float2 t) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= volume.dims.x || y >= volume.dims.y) return; /* centering */ float3 c = make_float3(volume.dims.x / 2.f * volume.voxel_size.x, volume.dims.y / 2.f * volume.voxel_size.y, volume.dims.z / 2.f * volume.voxel_size.z); float3 vc = make_float3(x * volume.voxel_size.x + volume.voxel_size.x / 2.f, y * volume.voxel_size.y + volume.voxel_size.y / 2.f, volume.voxel_size.z / 2.f) - c; float3 zstep = make_float3(0.f, 0.f, volume.voxel_size.z); float2* vptr = volume.beg(x, y); for (int i = 0; i < volume.dims.z; vc += zstep, vptr = volume.zstep(vptr), ++i) { float2 q = make_float2(norm(make_float2(vc.x, vc.z)) - t.x, vc.y); float sdf = norm(q) - t.y; float weight = 1.f; if (sdf >= volume.trunc_dist) { *vptr = make_float2(1.f, weight); } else if (sdf <= -volume.trunc_dist) { *vptr = make_float2(-1.f, weight); } else { *vptr = make_float2(__fdividef(sdf, volume.trunc_dist), weight); } } } } // namespace device } // namespace kfusion void kfusion::device::init_box(TsdfVolume& volume, const float3& b) { dim3 block(64, 16); dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y)); init_box_kernel<<<grid, block>>>(volume, b); cudaSafeCall(cudaGetLastError()); cudaSafeCall(cudaDeviceSynchronize()); } void kfusion::device::init_ellipsoid(TsdfVolume& volume, const float3& r) { dim3 block(64, 16); dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y)); init_ellipsoid_kernel<<<grid, block>>>(volume, r); cudaSafeCall(cudaGetLastError()); cudaSafeCall(cudaDeviceSynchronize()); } void kfusion::device::init_plane(TsdfVolume& volume, const float& z) { dim3 block(64, 16); dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y)); init_plane_kernel<<<grid, block>>>(volume, z); cudaSafeCall(cudaGetLastError()); cudaSafeCall(cudaDeviceSynchronize()); } void kfusion::device::init_sphere(TsdfVolume& volume, const float3& centre, const float& radius) { dim3 block(64, 16); dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y)); init_sphere_kernel<<<grid, block>>>(volume, centre, radius); cudaSafeCall(cudaGetLastError()); cudaSafeCall(cudaDeviceSynchronize()); } void kfusion::device::init_torus(TsdfVolume& volume, const float2& t) { dim3 block(64, 16); dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y)); init_torus_kernel<<<grid, block>>>(volume, t); cudaSafeCall(cudaGetLastError()); cudaSafeCall(cudaDeviceSynchronize()); }
the_stack
#include "cudapoa_structs.cuh" #include <claraparabricks/genomeworks/utils/cudautils.hpp> #include <claraparabricks/genomeworks/utils/limits.cuh> #include <stdio.h> namespace claraparabricks { namespace genomeworks { namespace cudapoa { template <typename ScoreT> __device__ __forceinline__ ScoreT* get_score_ptr_tb(ScoreT* scores, int32_t score_row, int32_t column, int32_t band_start, int32_t band_width) { column = column == -1 ? 0 : column - band_start; int64_t score_index = static_cast<int64_t>(column) + static_cast<int64_t>(score_row) * static_cast<int64_t>(band_width + CUDAPOA_BANDED_MATRIX_RIGHT_PADDING); return &scores[score_index]; } template <typename ScoreT> __device__ __forceinline__ void set_score_tb(ScoreT* scores, int32_t row, int32_t column, int32_t score_matrix_height, int32_t value, int32_t band_start, int32_t band_width) { int32_t col_idx; if (column == -1) { col_idx = band_start; } else { col_idx = column - band_start; } // in NW with traceback buffer, score matrix is stored partially, hence row is mapped to [0, score_matrix_height) span row = row % score_matrix_height; int64_t score_index = static_cast<int64_t>(col_idx) + static_cast<int64_t>(row) * static_cast<int64_t>(band_width + CUDAPOA_BANDED_MATRIX_RIGHT_PADDING); scores[score_index] = value; } __device__ __forceinline__ int32_t get_band_start_for_row_tb(int32_t row, float gradient, int32_t band_width, int32_t band_shift, int32_t max_column) { int32_t diagonal_index = int32_t(row * gradient); int32_t start_pos = max(0, diagonal_index - band_shift); if (max_column < start_pos + band_width) { start_pos = max(0, max_column - band_width + CUDAPOA_CELLS_PER_THREAD); } start_pos = start_pos - (start_pos % CUDAPOA_CELLS_PER_THREAD); return start_pos; } template <typename ScoreT> __device__ __forceinline__ void initialize_band_tb(ScoreT* scores, int32_t row, int32_t score_matrix_height, int32_t min_score_value, int32_t band_start, int32_t band_width, int32_t lane_idx) { int32_t band_end = band_start + band_width; band_start = max(1, band_start); set_score_tb(scores, row, band_start, score_matrix_height, min_score_value, band_start, band_width); // note: as long as CUDAPOA_BANDED_MATRIX_RIGHT_PADDING < WARP_SIZE, no need for a for loop for (int32_t j = lane_idx + band_end; j < band_end + CUDAPOA_BANDED_MATRIX_RIGHT_PADDING; j += WARP_SIZE) { set_score_tb(scores, row, j, score_matrix_height, min_score_value, band_start, band_width); } } template <typename TraceT> __device__ __forceinline__ TraceT get_trace(TraceT* traceback, int32_t row, int32_t column, int32_t band_start, int32_t band_width) { int64_t trace_index = static_cast<int64_t>(column - band_start) + static_cast<int64_t>(row) * static_cast<int64_t>(band_width + CUDAPOA_BANDED_MATRIX_RIGHT_PADDING); return traceback[trace_index]; } template <typename ScoreT> __device__ __forceinline__ ScoreT get_score_tb(ScoreT* scores, int32_t row, int32_t column, int32_t score_matrix_height, int32_t band_width, int32_t band_shift, float gradient, int32_t max_column, const ScoreT min_score_value) { int32_t band_start = get_band_start_for_row_tb(row, gradient, band_width, band_shift, max_column); int32_t band_end = band_start + band_width; band_end = min(band_end, max_column); if (((column > band_end) || (column < band_start)) && column != -1) { return min_score_value; } else { // row is mapped to [0, score_matrix_height) span return *get_score_ptr_tb(scores, row % score_matrix_height, column, band_start, band_width); } } template <typename SeqT, typename ScoreT, typename TraceT> __device__ __forceinline__ void get_scores_tb(ScoreT* scores, int32_t pred_node, int32_t current_node, int32_t column, int32_t score_matrix_height, int32_t band_width, int32_t band_shift, float gradient, int32_t max_column, int32_t gap_score, int32_t match_score, int32_t mismatch_score, SeqT4<SeqT> read4, SeqT graph_base, ScoreT4<ScoreT>& score, TraceT4<TraceT>& trace) { // The load instructions typically load data in 4B or 8B chunks. // If data is 16b (2B), then a 4B load chunk is loaded into register // and the necessary bits are extracted before returning. This wastes cycles // as each read of 16b issues a separate load command. // Instead it is better to load a 4B or 8B chunk into a register // using a single load inst, and then extracting necessary part of // of the data using bit arithmetic. Also reduces register count. int32_t band_start = get_band_start_for_row_tb(pred_node, gradient, band_width, band_shift, max_column); // subtract by CELLS_PER_THREAD to ensure score4_next is not pointing out of the corresponding band bounds int32_t band_end = band_start + band_width - CUDAPOA_CELLS_PER_THREAD; band_end = min(band_end, max_column); if ((column > band_end || column < band_start) && column != -1) { return; } else { // row is mapped to [0, score_matrix_height) span ScoreT4<ScoreT>* pred_scores = (ScoreT4<ScoreT>*)get_score_ptr_tb(scores, pred_node % score_matrix_height, column, band_start, band_width); // loads 8/16 consecutive bytes (4 ScoreTs) ScoreT4<ScoreT> score4 = pred_scores[0]; // need to load the next chunk of memory as well ScoreT4<ScoreT> score4_next = pred_scores[1]; int32_t char_profile = (graph_base == read4.r0 ? match_score : mismatch_score); // if trace is diogonal, its value is positive and if vertical, negative // update score.s0, trace.t0 ---------- if ((score4.s0 + char_profile) >= (score4.s1 + gap_score)) { if ((score4.s0 + char_profile) > score.s0) { score.s0 = score4.s0 + char_profile; trace.t0 = current_node - pred_node; } } else { if ((score4.s1 + gap_score) > score.s0) { score.s0 = score4.s1 + gap_score; trace.t0 = -(current_node - pred_node); } } // update score.s1, trace.t1 ---------- char_profile = (graph_base == read4.r1 ? match_score : mismatch_score); if ((score4.s1 + char_profile) >= (score4.s2 + gap_score)) { if ((score4.s1 + char_profile) > score.s1) { score.s1 = score4.s1 + char_profile; trace.t1 = current_node - pred_node; } } else { if ((score4.s2 + gap_score) > score.s1) { score.s1 = score4.s2 + gap_score; trace.t1 = -(current_node - pred_node); } } // update score.s2, trace.t2 ---------- char_profile = (graph_base == read4.r2 ? match_score : mismatch_score); if ((score4.s2 + char_profile) >= (score4.s3 + gap_score)) { if ((score4.s2 + char_profile) > score.s2) { score.s2 = score4.s2 + char_profile; trace.t2 = current_node - pred_node; } } else { if ((score4.s3 + gap_score) > score.s2) { score.s2 = score4.s3 + gap_score; trace.t2 = -(current_node - pred_node); } } // update score.s3, trace.t3 ---------- char_profile = (graph_base == read4.r3 ? match_score : mismatch_score); if ((score4.s3 + char_profile) >= (score4_next.s0 + gap_score)) { if ((score4.s3 + char_profile) > score.s3) { score.s3 = score4.s3 + char_profile; trace.t3 = current_node - pred_node; } } else { if ((score4_next.s0 + gap_score) > score.s3) { score.s3 = score4_next.s0 + gap_score; trace.t3 = -(current_node - pred_node); } } } } template <typename SeqT, typename ScoreT, typename SizeT, typename TraceT, bool Adaptive = true> __device__ __forceinline__ int32_t needlemanWunschBandedTraceback(SeqT* nodes, SizeT* graph, SizeT* node_id_to_pos, int32_t graph_count, uint16_t* incoming_edge_count, SizeT* incoming_edges, uint16_t* outgoing_edge_count, SeqT* read, int32_t read_length, ScoreT* scores, TraceT* traceback, float max_buffer_size, SizeT* alignment_graph, SizeT* alignment_read, int32_t band_width, int32_t score_matrix_height, int32_t gap_score, int32_t mismatch_score, int32_t match_score, int32_t rerun) { const ScoreT min_score_value = numeric_limits<ScoreT>::min() / 2; int32_t lane_idx = threadIdx.x % WARP_SIZE; //Calculate aspect ratio for the scores matrix float gradient = float(read_length + 1) / float(graph_count + 1); int32_t max_column = read_length + 1; // Set band-width based on scores matrix aspect ratio //--------------------------------------------------------- // band_shift defines distance of band_start from the scores matrix diagonal, ad-hoc rule 4 int32_t band_shift = band_width / 2; if (Adaptive) { // rerun code is defined in backtracking loop from previous alignment try // SHIFT_ADAPTIVE_BAND_TO_LEFT means traceback path was too close to the left bound of band // SHIFT_ADAPTIVE_BAND_TO_RIGHT means traceback path was too close to the right bound of band // Therefore we rerun alignment of the same read, but this time with double band-width and band_shift further to // the left for rerun == SHIFT_ADAPTIVE_BAND_TO_LEFT, and further to the right for rerun == SHIFT_ADAPTIVE_BAND_TO_RIGHT. if (rerun == CUDAPOA_SHIFT_ADAPTIVE_BAND_TO_LEFT && band_width <= CUDAPOA_MAX_ADAPTIVE_BAND_WIDTH / 2) { // ad-hoc rule 5 band_width *= 2; band_shift *= 2.5; } if (rerun == CUDAPOA_SHIFT_ADAPTIVE_BAND_TO_RIGHT && band_width <= CUDAPOA_MAX_ADAPTIVE_BAND_WIDTH / 2) { // ad-hoc rule 6 band_width *= 2; band_shift *= 1.5; } // check required memory and return error if exceeding max_buffer_size // using float to avoid 64-bit float required_buffer_size = static_cast<float>(graph_count) * static_cast<float>(band_width + CUDAPOA_BANDED_MATRIX_RIGHT_PADDING); if (required_buffer_size > max_buffer_size) { return CUDAPOA_KERNEL_NW_ADAPTIVE_STORAGE_FAILED; } } //--------------------------------------------------------- // Initialise the horizontal boundary of the score matrix, initialising of the vertical boundary is done within the main for loop for (int32_t j = lane_idx; j < band_width + CUDAPOA_BANDED_MATRIX_RIGHT_PADDING; j += WARP_SIZE) { set_score_tb(scores, 0, j, score_matrix_height, j * gap_score, 0, band_width); } if (lane_idx == 0) { #ifdef NW_VERBOSE_PRINT printf("graph %d, read %d\n", graph_count, read_length); #endif } __syncwarp(); // compute vertical and diagonal values in parallel. for (int32_t graph_pos = 0; graph_pos < graph_count; graph_pos++) { int32_t node_id = graph[graph_pos]; int32_t score_gIdx = graph_pos + 1; int32_t band_start = get_band_start_for_row_tb(score_gIdx, gradient, band_width, band_shift, max_column); int32_t pred_node_id = incoming_edges[node_id * CUDAPOA_MAX_NODE_EDGES]; initialize_band_tb(scores, score_gIdx, score_matrix_height, min_score_value, band_start, band_width, lane_idx); int32_t first_element_prev_score = 0; uint16_t pred_count = 0; int32_t pred_idx = 0; if (lane_idx == 0) { // Initialise the vertical boundary of the score matrix int32_t penalty; pred_count = incoming_edge_count[node_id]; if (pred_count == 0) { // row is mapped to [0, score_matrix_height) span int64_t index = static_cast<int64_t>(score_gIdx % score_matrix_height) * static_cast<int64_t>(band_width + CUDAPOA_BANDED_MATRIX_RIGHT_PADDING); scores[index] = gap_score; index = static_cast<int64_t>(score_gIdx) * static_cast<int64_t>(band_width + CUDAPOA_BANDED_MATRIX_RIGHT_PADDING); traceback[index] = -score_gIdx; } else { int64_t index = static_cast<int64_t>(score_gIdx) * static_cast<int64_t>(band_width + CUDAPOA_BANDED_MATRIX_RIGHT_PADDING); pred_idx = node_id_to_pos[pred_node_id] + 1; // only predecessors that are less than score_matrix_height distant can be taken into account if ((graph_pos - pred_idx) < score_matrix_height) { // fill in first column of traceback buffer traceback[index] = -(score_gIdx - pred_idx); if (band_start > CUDAPOA_CELLS_PER_THREAD && pred_count == 1) { first_element_prev_score = min_score_value + gap_score; } else { penalty = max(min_score_value, get_score_tb(scores, pred_idx, -1, score_matrix_height, band_width, band_shift, gradient, max_column, min_score_value)); // if pred_num > 1 keep checking to find max score as penalty for (int32_t p = 1; p < pred_count; p++) { pred_node_id = incoming_edges[node_id * CUDAPOA_MAX_NODE_EDGES + p]; int32_t pred_idx_tmp = node_id_to_pos[pred_node_id] + 1; // only predecessors that are less than score_matrix_height distant can be taken into account if ((score_gIdx - pred_idx_tmp) < score_matrix_height) { int32_t trace_tmp = -(score_gIdx - pred_idx_tmp); int32_t score_tmp = get_score_tb(scores, pred_idx_tmp, -1, score_matrix_height, band_width, band_shift, gradient, max_column, min_score_value); if (penalty < score_tmp) { penalty = score_tmp; traceback[index] = trace_tmp; } } } first_element_prev_score = penalty + gap_score; set_score_tb(scores, score_gIdx, -1, score_matrix_height, first_element_prev_score, band_start, band_width); } } else { penalty = min_score_value; // look for a predecessor which is within score_matrix_height limit for (int32_t p = 1; p < pred_count; p++) { pred_node_id = incoming_edges[node_id * CUDAPOA_MAX_NODE_EDGES + p]; int32_t pred_idx_tmp = node_id_to_pos[pred_node_id] + 1; // only predecessors that are less than score_matrix_height distant can be taken into account if ((score_gIdx - pred_idx_tmp) < score_matrix_height) { int32_t trace_tmp = -(score_gIdx - pred_idx_tmp); int32_t score_tmp = get_score_tb(scores, pred_idx_tmp, -1, score_matrix_height, band_width, band_shift, gradient, max_column, min_score_value); if (penalty < score_tmp) { penalty = score_tmp; traceback[index] = trace_tmp; } } } first_element_prev_score = penalty + gap_score; set_score_tb(scores, score_gIdx, -1, score_matrix_height, first_element_prev_score, band_start, band_width); } } } pred_count = __shfl_sync(FULL_MASK, pred_count, 0); pred_idx = __shfl_sync(FULL_MASK, pred_idx, 0); //------------------------------------------------------------- SeqT graph_base = nodes[node_id]; for (int32_t read_pos = lane_idx * CUDAPOA_CELLS_PER_THREAD + band_start; read_pos < band_start + band_width; read_pos += CUDAPOA_MIN_BAND_WIDTH) { SeqT4<SeqT>* d_read4 = (SeqT4<SeqT>*)read; SeqT4<SeqT> read4 = d_read4[read_pos / CUDAPOA_CELLS_PER_THREAD]; TraceT4<TraceT> trace; ScoreT4<ScoreT> score = {min_score_value, min_score_value, min_score_value, min_score_value}; // note that whenever accessing a score matrix row, the row needs to be mapped to [0, score_matrix_height) get_scores_tb(scores, pred_idx, score_gIdx, read_pos, score_matrix_height, band_width, band_shift, gradient, max_column, gap_score, match_score, mismatch_score, read4, graph_base, score, trace); // Perform same score updates as above, but for rest of predecessors. for (int32_t p = 1; p < pred_count; p++) { int32_t pred_idx_tmp = node_id_to_pos[incoming_edges[node_id * CUDAPOA_MAX_NODE_EDGES + p]] + 1; if ((score_gIdx - pred_idx_tmp) < score_matrix_height) { get_scores_tb(scores, pred_idx_tmp, score_gIdx, read_pos, score_matrix_height, band_width, band_shift, gradient, max_column, gap_score, match_score, mismatch_score, read4, graph_base, score, trace); } } // While there are changes to the horizontal score values, keep updating the matrix. // So loop will only run the number of time there are corrections in the matrix. // The any_sync warp primitive lets us easily check if any of the threads had an update. bool loop = true; while (__any_sync(FULL_MASK, loop)) { loop = false; // Note: computation of s3 depends on s2, s2 depends on s1 and s1 on s0. // If we reverse the order of computation in this loop from s3 to s0, it will increase // ILP. However, in longer reads where indels are more frequent, this reverse computations // results in larger number of iterations. Since if s0 is changed, value of s1, s2 and s3 which // already have been computed in parallel need to be updated again. // The shfl_up lets us grab a value from the lane below. int32_t last_score = __shfl_up_sync(FULL_MASK, score.s3, 1); if (lane_idx == 0) { last_score = first_element_prev_score; } if (score.s0 < last_score + gap_score) { score.s0 = last_score + gap_score; trace.t0 = 0; } if (score.s1 < score.s0 + gap_score) { score.s1 = score.s0 + gap_score; trace.t1 = 0; } if (score.s2 < score.s1 + gap_score) { score.s2 = score.s1 + gap_score; trace.t2 = 0; } int32_t tscore = max(score.s2 + gap_score, score.s3); if (tscore > score.s3) { score.s3 = tscore; trace.t3 = 0; loop = true; } } // Copy over the last element score of the last lane into a register of first lane // which can be used to compute the first cell of the next warp. first_element_prev_score = __shfl_sync(FULL_MASK, score.s3, WARP_SIZE - 1); // row is mapped to [0, score_matrix_height) span int64_t index = static_cast<int64_t>(read_pos + 1 - band_start) + static_cast<int64_t>(score_gIdx % score_matrix_height) * static_cast<int64_t>(band_width + CUDAPOA_BANDED_MATRIX_RIGHT_PADDING); scores[index] = score.s0; scores[index + 1L] = score.s1; scores[index + 2L] = score.s2; scores[index + 3L] = score.s3; index = static_cast<int64_t>(read_pos + 1 - band_start) + static_cast<int64_t>(score_gIdx) * static_cast<int64_t>(band_width + CUDAPOA_BANDED_MATRIX_RIGHT_PADDING); traceback[index] = trace.t0; traceback[index + 1L] = trace.t1; traceback[index + 2L] = trace.t2; traceback[index + 3L] = trace.t3; __syncwarp(); } } int32_t aligned_nodes = 0; if (lane_idx == 0) { // Find location of the maximum score in the matrix. int32_t i = 0; int32_t j = read_length; int32_t mscore = min_score_value; for (int32_t idx = 1; idx <= graph_count; idx++) { if (outgoing_edge_count[graph[idx - 1]] == 0) { if ((graph_count - idx) < score_matrix_height) { int32_t s = get_score_tb(scores, idx, j, score_matrix_height, band_width, band_shift, gradient, max_column, min_score_value); if (mscore < s) { mscore = s; i = idx; } } } } // if i was not set, throw an error indicating selected score_matrix_height (i.e. max predecessor distance) is too small if (i == 0) { j = 0; aligned_nodes = CUDAPOA_KERNEL_NW_TRACEBACK_BUFFER_FAILED; } //------------------------------------------------------------------------ // Fill in traceback int32_t loop_count = 0; while (!(i == 0 && j == 0) && loop_count < static_cast<int32_t>(read_length + graph_count + 2)) { loop_count++; int32_t band_start = get_band_start_for_row_tb(i, gradient, band_width, band_shift, max_column); TraceT trace = get_trace(traceback, i, j, band_start, band_width); if (trace == 0) { // horizontal path (indel) alignment_graph[aligned_nodes] = -1; alignment_read[aligned_nodes] = j - 1; j--; } else if (trace < 0) { // vertical path (indel) alignment_graph[aligned_nodes] = graph[i - 1]; alignment_read[aligned_nodes] = -1; i += trace; } else { // diagonal path (match/mismatch) alignment_graph[aligned_nodes] = graph[i - 1]; alignment_read[aligned_nodes] = j - 1; i -= trace; j--; if (Adaptive) { // no need to request rerun if (a) it's not the first run, (b) band_width == CUDAPOA_MAX_ADAPTIVE_BAND_WIDTH already if (rerun == 0 && band_width < CUDAPOA_MAX_ADAPTIVE_BAND_WIDTH) { // check if traceback gets too close or hits the band limits, if so stop and rerun with extended band-width // threshold for proximity to band limits works better if defined proportionate to the sequence length int32_t threshold = max(1, max_column / 1024); // ad-hoc rule 7 if (j > threshold && j < max_column - threshold) { int32_t band_start = get_band_start_for_row(i, gradient, band_width, band_shift, max_column); if (j <= band_start + threshold) // ad-hoc rule 8-a, too close to left bound { aligned_nodes = CUDAPOA_SHIFT_ADAPTIVE_BAND_TO_LEFT; break; } if (j >= (band_start + band_width - threshold)) // ad-hoc rule 8-b, too close to right bound { aligned_nodes = CUDAPOA_SHIFT_ADAPTIVE_BAND_TO_RIGHT; break; } } } } } aligned_nodes++; } if (loop_count >= (read_length + graph_count + 2)) { aligned_nodes = CUDAPOA_KERNEL_NW_BACKTRACKING_LOOP_FAILED; } #ifdef NW_VERBOSE_PRINT printf("aligned nodes %d, loop count %d\n", aligned_nodes, loop_count); #endif } aligned_nodes = __shfl_sync(FULL_MASK, aligned_nodes, 0); return aligned_nodes; } // global kernel used in testing, hence uses int16_t for SizeT and ScoreT, // may need to change if test inputs change to long reads template <typename SizeT> __global__ void runNeedlemanWunschBandedTBKernel(uint8_t* nodes, SizeT* graph, SizeT* node_id_to_pos, int32_t graph_count, uint16_t* incoming_edge_count, SizeT* incoming_edges, uint16_t* outgoing_edge_count, uint8_t* read, int32_t read_length, int16_t* scores, int16_t* traceback, int32_t scores_width, int32_t max_nodes_per_graph, SizeT* alignment_graph, SizeT* alignment_read, int32_t band_width, int32_t score_matrix_height, int32_t gap_score, int32_t mismatch_score, int32_t match_score, SizeT* aligned_nodes, bool adaptive) { static_assert(std::is_same<SizeT, int16_t>::value, "This function only accepts int16_t as SizeT."); float banded_buffer_size = static_cast<float>(max_nodes_per_graph) * static_cast<float>(scores_width); if (adaptive) { *aligned_nodes = needlemanWunschBandedTraceback<uint8_t, int16_t, int16_t, int16_t, true>(nodes, graph, node_id_to_pos, graph_count, incoming_edge_count, incoming_edges, outgoing_edge_count, read, read_length, scores, traceback, banded_buffer_size, alignment_graph, alignment_read, band_width, score_matrix_height, gap_score, mismatch_score, match_score, 0); } else { *aligned_nodes = needlemanWunschBandedTraceback<uint8_t, int16_t, int16_t, int16_t, false>(nodes, graph, node_id_to_pos, graph_count, incoming_edge_count, incoming_edges, outgoing_edge_count, read, read_length, scores, traceback, banded_buffer_size, alignment_graph, alignment_read, band_width, score_matrix_height, gap_score, mismatch_score, match_score, 0); } } // Host function that calls the kernel template <typename SizeT> void runNWbandedTB(uint8_t* nodes, SizeT* graph, SizeT* node_id_to_pos, int32_t graph_count, uint16_t* incoming_edge_count, SizeT* incoming_edges, uint16_t* outgoing_edge_count, uint8_t* read, int32_t read_length, int16_t* scores, int16_t* traceback, int32_t scores_width, int32_t max_nodes_per_graph, SizeT* alignment_graph, SizeT* alignment_read, int32_t band_width, int32_t score_matrix_height, int32_t gap_score, int32_t mismatch_score, int32_t match_score, SizeT* aligned_nodes, bool adaptive) { runNeedlemanWunschBandedTBKernel<<<1, CUDAPOA_BANDED_THREADS_PER_BLOCK>>>(nodes, graph, node_id_to_pos, graph_count, incoming_edge_count, incoming_edges, outgoing_edge_count, read, read_length, scores, traceback, scores_width, max_nodes_per_graph, alignment_graph, alignment_read, band_width, score_matrix_height, gap_score, mismatch_score, match_score, aligned_nodes, adaptive); GW_CU_CHECK_ERR(cudaPeekAtLastError()); } } // namespace cudapoa } // namespace genomeworks } // namespace claraparabricks
the_stack
#define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 512; inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } #endif __device__ double dmcn_im2col_bilinear(const double *bottom_data, const int data_width, const int height, const int width, double h, double w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; double lh = h - h_low; double lw = w - w_low; double hh = 1 - lh, hw = 1 - lw; double v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; double v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; double v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; double v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; double w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; double val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } __device__ double dmcn_get_gradient_weight(double argmax_h, double argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; double weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } __device__ double dmcn_get_coordinate_weight(double argmax_h, double argmax_w, const int height, const int width, const double *im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; double weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } __global__ void modulated_deformable_im2col_gpu_kernel(const int n, const double *data_im, const double *data_offset, const double *data_mask, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, double *data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; double *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; //const double* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const double *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const double *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const double *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; const double offset_h = data_offset_ptr[data_offset_h_ptr]; const double offset_w = data_offset_ptr[data_offset_w_ptr]; const double mask = data_mask_ptr[data_mask_hw_ptr]; double val = static_cast<double>(0); const double h_im = h_in + i * dilation_h + offset_h; const double w_im = w_in + j * dilation_w + offset_w; //if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const double map_h = i * dilation_h + offset_h; //const double map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val * mask; data_col_ptr += batch_size * height_col * width_col; //data_col_ptr += height_col * width_col; } } } } __global__ void modulated_deformable_col2im_gpu_kernel(const int n, const double *data_col, const double *data_offset, const double *data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, double *grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const double *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const double *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; const double offset_h = data_offset_ptr[data_offset_h_ptr]; const double offset_w = data_offset_ptr[data_offset_w_ptr]; const double mask = data_mask_ptr[data_mask_hw_ptr]; const double cur_inv_h_data = h_in + i * dilation_h + offset_h; const double cur_inv_w_data = w_in + j * dilation_w + offset_w; const double cur_top_grad = data_col[index] * mask; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; double weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } __global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n, const double *data_col, const double *data_im, const double *data_offset, const double *data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, double *grad_offset, double *grad_mask) { CUDA_KERNEL_LOOP(index, n) { double val = 0, mval = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const double *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const double *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const double *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const double *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); const double offset_h = data_offset_ptr[data_offset_h_ptr]; const double offset_w = data_offset_ptr[data_offset_w_ptr]; const double mask = data_mask_ptr[data_mask_hw_ptr]; double inv_h = h_in + i * dilation_h + offset_h; double inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } else { mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); } const double weight = dmcn_get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos] * mask; cnt += 1; } // KERNEL_ASSIGN(grad_offset[index], offset_req, val); grad_offset[index] = val; if (offset_c % 2 == 0) // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval); grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval; } } void modulated_deformable_im2col_cuda(cudaStream_t stream, const double *data_im, const double *data_offset, const double *data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, double *data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * height_col * width_col; modulated_deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_im, data_offset, data_mask, height_im, width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, channels, deformable_group, height_col, width_col, data_col); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); } } void modulated_deformable_col2im_cuda(cudaStream_t stream, const double *data_col, const double *data_offset, const double *data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, double *grad_im) { const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; modulated_deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col, data_offset, data_mask, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, deformable_group, height_col, width_col, grad_im); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); } } void modulated_deformable_col2im_coord_cuda(cudaStream_t stream, const double *data_col, const double *data_im, const double *data_offset, const double *data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, double *grad_offset, double *grad_mask) { const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; modulated_deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col, data_im, data_offset, data_mask, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, grad_offset, grad_mask); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err)); } }
the_stack
// Params --------------------------------------------------------------------- struct Params { int device; int n_gpu_threads; int n_gpu_blocks; int n_threads; int n_warmup; int n_reps; const char *file_name; const char *comparison_file; int switching_limit; Params(int argc, char **argv) { device = 0; n_gpu_threads = 256; n_gpu_blocks = 8; n_threads = 2; n_warmup = 1; n_reps = 1; file_name = "input/NYR_input.dat"; comparison_file = "output/NYR_bfs.out"; switching_limit = 128; int opt; while((opt = getopt(argc, argv, "hd:i:g:t:w:r:f:c:l:")) >= 0) { switch(opt) { case 'h': usage(); exit(0); break; case 'd': device = atoi(optarg); break; case 'i': n_gpu_threads = atoi(optarg); break; case 'g': n_gpu_blocks = atoi(optarg); break; case 't': n_threads = atoi(optarg); break; case 'w': n_warmup = atoi(optarg); break; case 'r': n_reps = atoi(optarg); break; case 'f': file_name = optarg; break; case 'c': comparison_file = optarg; break; case 'l': switching_limit = atoi(optarg); break; default: fprintf(stderr, "\nUnrecognized option!\n"); usage(); exit(0); } } assert(n_gpu_threads > 0 && "Invalid # of device threads!"); assert(n_gpu_blocks > 0 && "Invalid # of device blocks!"); assert(n_threads > 0 && "Invalid # of host threads!"); } void usage() { fprintf(stderr, "\nUsage: ./sssp [options]" "\n" "\nGeneral options:" "\n -h help" "\n -d <D> CUDA device ID (default=0)" "\n -i <I> # of device threads per block (default=256)" "\n -g <G> # of device blocks (default=8)" "\n -t <T> # of host threads (default=2)" "\n -w <W> # of untimed warmup iterations (default=1)" "\n -r <R> # of timed repetition iterations (default=1)" "\n" "\nBenchmark-specific options:" "\n -f <F> name of input file with control points (default=input/NYR_input.dat)" "\n -c <C> comparison file (default=output/NYR_bfs_BFS.out)" "\n -l <L> switching limit (default=128)" "\n"); } }; // Input Data ----------------------------------------------------------------- void read_input_size(int &n_nodes, int &n_edges, const Params &p) { FILE *fp = fopen(p.file_name, "r"); fscanf(fp, "%d", &n_nodes); fscanf(fp, "%d", &n_edges); if(fp) fclose(fp); } void read_input(int &source, Node *&h_nodes, Edge *&h_edges, const Params &p) { int start, edgeno; int n_nodes, n_edges; int id, cost; FILE *fp = fopen(p.file_name, "r"); fscanf(fp, "%d", &n_nodes); fscanf(fp, "%d", &n_edges); fscanf(fp, "%d", &source); printf("Number of nodes = %d\t", n_nodes); printf("Number of edges = %d\t", n_edges); // initalize the memory: Nodes for(int i = 0; i < n_nodes; i++) { fscanf(fp, "%d %d", &start, &edgeno); h_nodes[i].x = start; h_nodes[i].y = edgeno; } #if PRINT_ALL for(int i = 0; i < n_nodes; i++) { printf("%d, %d\n", h_nodes[i].x, h_nodes[i].y); } #endif // initalize the memory: Edges for(int i = 0; i < n_edges; i++) { fscanf(fp, "%d", &id); fscanf(fp, "%d", &cost); h_edges[i].x = id; h_edges[i].y = -cost; } if(fp) fclose(fp); } // CUDA kernel ------------------------------------------------------------------------------------------ __global__ void SSSP_gpu(Node *graph_nodes_av, Edge *graph_edges_av, int *cost, int *color, int *q1, int *q2, int *n_t, int *head, int *tail, int *threads_end, int *threads_run, int *overflow, int *gray_shade, int *iter, int LIMIT, const int CPU) { __shared__ int l_mem[W_QUEUE_SIZE+2]; __shared__ int tail_bin; int* l_q2 = (int*)&l_mem[0]; int* shift = (int*)&l_mem[W_QUEUE_SIZE]; int* base = (int*)&l_mem[W_QUEUE_SIZE+1]; const int tid = threadIdx.x; const int gtid = blockIdx.x * blockDim.x + threadIdx.x; const int MAXWG = gridDim.x; const int WG_SIZE = blockDim.x; int iter_local = atomicAdd(&iter[0], 0); int n_t_local = atomicAdd(n_t, 0); int gray_shade_local = atomicAdd(&gray_shade[0], 0); if(tid == 0) { // Reset queue tail_bin = 0; } // Fetch frontier elements from the queue if(tid == 0) *base = atomicAdd(&head[0], WG_SIZE); __syncthreads(); int my_base = *base; while(my_base < n_t_local) { // If local queue might overflow if(tail_bin >= W_QUEUE_SIZE / 2) { if(tid == 0) { // Add local tail_bin to tail *shift = atomicAdd(&tail[0], tail_bin); } __syncthreads(); int local_shift = tid; while(local_shift < tail_bin) { q2[*shift + local_shift] = l_q2[local_shift]; // Multiple threads are copying elements at the same time, so we shift by multiple elements for next iteration local_shift += WG_SIZE; } __syncthreads(); if(tid == 0) { // Reset local queue tail_bin = 0; } __syncthreads(); } if(my_base + tid < n_t_local && *overflow == 0) { // Visit a node from the current frontier int pid = q1[my_base + tid]; //////////////// Visit node /////////////////////////// atomicExch(&color[pid], BLACK); // Node visited int cur_cost = atomicAdd(&cost[pid], 0); // Look up shortest-path distance to this node Node cur_node; cur_node.x = graph_nodes_av[pid].x; cur_node.y = graph_nodes_av[pid].y; Edge cur_edge; // For each outgoing edge for(int i = cur_node.x; i < cur_node.y + cur_node.x; i++) { cur_edge.x = graph_edges_av[i].x; cur_edge.y = graph_edges_av[i].y; int id = cur_edge.x; int cost_local = cur_edge.y; cost_local += cur_cost; int orig_cost = atomicMax(&cost[id], cost_local); if(orig_cost < cost_local) { int old_color = atomicMax(&color[id], gray_shade_local); if(old_color != gray_shade_local) { // Push to the queue int tail_index = atomicAdd(&tail_bin, 1); if(tail_index >= W_QUEUE_SIZE) { *overflow = 1; } else l_q2[tail_index] = id; } } } } if(tid == 0) *base = atomicAdd(&head[0], WG_SIZE); // Fetch more frontier elements from the queue __syncthreads(); my_base = *base; } ///////////////////////////////////////////////////////// // Compute size of the output and allocate space in the global queue if(tid == 0) { *shift = atomicAdd(&tail[0], tail_bin); } __syncthreads(); ///////////////////// CONCATENATE INTO GLOBAL MEMORY ///////////////////// int local_shift = tid; while(local_shift < tail_bin) { q2[*shift + local_shift] = l_q2[local_shift]; // Multiple threads are copying elements at the same time, so we shift by multiple elements for next iteration local_shift += WG_SIZE; } ////////////////////////////////////////////////////////////////////////// if(gtid == 0) { atomicAdd(&iter[0], 1); } } // Main ------------------------------------------------------------------------------------------ int main(int argc, char **argv) { const Params p(argc, argv); Timer timer; // Allocate int n_nodes, n_edges; read_input_size(n_nodes, n_edges, p); timer.start("Allocation"); Node * h_nodes = (Node *)malloc(sizeof(Node) * n_nodes); Node * d_nodes; hipMalloc((void**)&d_nodes, sizeof(Node) * n_nodes); Edge * h_edges = (Edge *)malloc(sizeof(Edge) * n_edges); Edge * d_edges; hipMalloc((void**)&d_edges, sizeof(Edge) * n_edges); std::atomic_int *h_color = (std::atomic_int *)malloc(sizeof(std::atomic_int) * n_nodes); int * d_color; hipMalloc((void**)&d_color, sizeof(int) * n_nodes); std::atomic_int *h_cost = (std::atomic_int *)malloc(sizeof(std::atomic_int) * n_nodes); int * d_cost; hipMalloc((void**)&d_cost, sizeof(int) * n_nodes); int * h_q1 = (int *)malloc(n_nodes * sizeof(int)); int * d_q1; hipMalloc((void**)&d_q1, sizeof(int) * n_nodes); int * h_q2 = (int *)malloc(n_nodes * sizeof(int)); int * d_q2; hipMalloc((void**)&d_q2, sizeof(int) * n_nodes); std::atomic_int h_head[1]; int * d_head; hipMalloc((void**)&d_head, sizeof(int)); std::atomic_int h_tail[1]; int * d_tail; hipMalloc((void**)&d_tail, sizeof(int)); std::atomic_int h_threads_end[1]; int * d_threads_end; hipMalloc((void**)&d_threads_end, sizeof(int)); std::atomic_int h_threads_run[1]; int * d_threads_run; hipMalloc((void**)&d_threads_run, sizeof(int)); int h_num_t[1]; int * d_num_t; hipMalloc((void**)&d_num_t, sizeof(int)); int h_overflow[1]; int * d_overflow; hipMalloc((void**)&d_overflow, sizeof(int)); std::atomic_int h_gray_shade[1]; int * d_gray_shade; hipMalloc((void**)&d_gray_shade, sizeof(int)); std::atomic_int h_iter[1]; int * d_iter; hipMalloc((void**)&d_iter, sizeof(int)); hipDeviceSynchronize(); //CUDA_ERR(); //ALLOC_ERR(h_nodes, h_edges, h_color, h_cost, h_q1, h_q2); timer.stop("Allocation"); // Initialize timer.start("Initialization"); const int max_gpu_threads = 256; int source; read_input(source, h_nodes, h_edges, p); for(int i = 0; i < n_nodes; i++) { h_cost[i].store(INF); } h_cost[source].store(0); for(int i = 0; i < n_nodes; i++) { h_color[i].store(WHITE); } h_tail[0].store(0); h_head[0].store(0); h_threads_end[0].store(0); h_threads_run[0].store(0); h_q1[0] = source; h_iter[0].store(0); h_overflow[0] = 0; h_gray_shade[0].store(GRAY0); timer.stop("Initialization"); timer.print("Initialization", 1); // Copy to device timer.start("Copy To Device"); hipMemcpy(d_nodes, h_nodes, sizeof(Node) * n_nodes, hipMemcpyHostToDevice); hipMemcpy(d_edges, h_edges, sizeof(Edge) * n_edges, hipMemcpyHostToDevice); hipDeviceSynchronize(); //CUDA_ERR(); timer.stop("Copy To Device"); for(int rep = 0; rep < p.n_reps + p.n_warmup; rep++) { // Reset for(int i = 0; i < n_nodes; i++) { h_cost[i].store(INF); } h_cost[source].store(0); for(int i = 0; i < n_nodes; i++) { h_color[i].store(WHITE); } h_tail[0].store(0); h_head[0].store(0); h_threads_end[0].store(0); h_threads_run[0].store(0); h_q1[0] = source; h_iter[0].store(0); h_overflow[0] = 0; h_gray_shade[0].store(GRAY0); if(rep >= p.n_warmup) timer.start("Kernel"); // Run first iteration in master CPU thread h_num_t[0] = 1; int pid; int index_i, index_o; for(index_i = 0; index_i < h_num_t[0]; index_i++) { pid = h_q1[index_i]; h_color[pid].store(BLACK); int cur_cost = h_cost[pid].load(); for(int i = h_nodes[pid].x; i < (h_nodes[pid].y + h_nodes[pid].x); i++) { int id = h_edges[i].x; int cost = h_edges[i].y; cost += cur_cost; h_cost[id].store(cost); h_color[id].store(GRAY0); index_o = h_tail[0].fetch_add(1); h_q2[index_o] = id; } } h_num_t[0] = h_tail[0].load(); h_tail[0].store(0); h_threads_run[0].fetch_add(1); h_gray_shade[0].store(GRAY1); h_iter[0].fetch_add(1); if(rep >= p.n_warmup) timer.stop("Kernel"); // Pointers to input and output queues int * h_qin = h_q2; int * h_qout = h_q1; int * d_qin; // = d_q2; int * d_qout; // = d_q1; const int CPU_EXEC = (p.n_threads > 0) ? 1 : 0; const int GPU_EXEC = (p.n_gpu_blocks > 0 && p.n_gpu_threads > 0) ? 1 : 0; // Run subsequent iterations on CPU or GPU until number of input queue elements is 0 while(*h_num_t != 0) { if((*h_num_t < p.switching_limit || GPU_EXEC == 0) && CPU_EXEC == 1) { // If the number of input queue elements is lower than switching_limit if(rep >= p.n_warmup) timer.start("Kernel"); // Continue until switching_limit condition is not satisfied while((*h_num_t != 0) && (*h_num_t < p.switching_limit || GPU_EXEC == 0) && CPU_EXEC == 1) { // Swap queues if(h_iter[0] % 2 == 0) { h_qin = h_q1; h_qout = h_q2; } else { h_qin = h_q2; h_qout = h_q1; } std::thread main_thread(run_cpu_threads, h_nodes, h_edges, h_cost, h_color, h_qin, h_qout, h_num_t, h_head, h_tail, h_threads_end, h_threads_run, h_gray_shade, h_iter, p.n_threads, p.switching_limit, GPU_EXEC); main_thread.join(); h_num_t[0] = h_tail[0].load(); // Number of elements in output queue h_tail[0].store(0); h_head[0].store(0); if(h_iter[0].load() % 2 == 0) h_gray_shade[0].store(GRAY0); else h_gray_shade[0].store(GRAY1); } if(rep >= p.n_warmup) timer.stop("Kernel"); } else if((*h_num_t >= p.switching_limit || CPU_EXEC == 0) && GPU_EXEC == 1) { // If the number of input queue elements is higher than or equal to switching_limit if(rep >= p.n_warmup) timer.start("Copy To Device"); hipMemcpy( d_cost, h_cost, sizeof(int) * n_nodes, hipMemcpyHostToDevice); hipMemcpy( d_color, h_color, sizeof(int) * n_nodes, hipMemcpyHostToDevice); hipMemcpy( d_threads_run, h_threads_run, sizeof(int), hipMemcpyHostToDevice); hipMemcpy( d_threads_end, h_threads_end, sizeof(int), hipMemcpyHostToDevice); hipMemcpy( d_overflow, h_overflow, sizeof(int), hipMemcpyHostToDevice); hipMemcpy( d_q1, h_q1, sizeof(int) * n_nodes, hipMemcpyHostToDevice); hipMemcpy( d_q2, h_q2, sizeof(int) * n_nodes, hipMemcpyHostToDevice); hipMemcpy( d_iter, h_iter, sizeof(int), hipMemcpyHostToDevice); hipDeviceSynchronize(); //CUDA_ERR(); if(rep >= p.n_warmup) timer.stop("Copy To Device"); // Continue until switching_limit condition is not satisfied while((*h_num_t != 0) && (*h_num_t >= p.switching_limit || CPU_EXEC == 0) && GPU_EXEC == 1) { //printf("h_iter %d\n", h_iter[0].load()); // Swap queues if(h_iter[0] % 2 == 0) { d_qin = d_q1; d_qout = d_q2; } else { d_qin = d_q2; d_qout = d_q1; } if(rep >= p.n_warmup) timer.start("Copy To Device"); hipMemcpy( d_num_t, h_num_t, sizeof(int), hipMemcpyHostToDevice); hipMemcpy( d_tail, h_tail, sizeof(int), hipMemcpyHostToDevice); hipMemcpy( d_head, h_head, sizeof(int), hipMemcpyHostToDevice); hipMemcpy( d_gray_shade, h_gray_shade, sizeof(int), hipMemcpyHostToDevice); hipDeviceSynchronize(); //CUDA_ERR(); if(rep >= p.n_warmup) timer.stop("Copy To Device"); if(rep >= p.n_warmup) timer.start("Kernel"); assert(p.n_gpu_threads <= max_gpu_threads && "The thread block size is greater than the maximum thread block size that can be used on this device"); dim3 dimGrid(p.n_gpu_blocks); dim3 dimBlock(p.n_gpu_threads); hipLaunchKernelGGL(SSSP_gpu, dimGrid, dimBlock, 0, 0, d_nodes, d_edges, d_cost, d_color, d_qin, d_qout, d_num_t, d_head, d_tail, d_threads_end, d_threads_run, d_overflow, d_gray_shade, d_iter, p.switching_limit, CPU_EXEC); hipDeviceSynchronize(); //CUDA_ERR(); if(rep >= p.n_warmup) timer.stop("Kernel"); if(rep >= p.n_warmup) timer.start("Copy Back and Merge"); hipMemcpy( h_tail, d_tail, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy( h_iter, d_iter, sizeof(int), hipMemcpyDeviceToHost); hipDeviceSynchronize(); //CUDA_ERR(); if(rep >= p.n_warmup) timer.stop("Copy Back and Merge"); h_num_t[0] = h_tail[0].load(); // Number of elements in output queue h_tail[0].store(0); h_head[0].store(0); if(h_iter[0].load() % 2 == 0) h_gray_shade[0].store(GRAY0); else h_gray_shade[0].store(GRAY1); } if(rep >= p.n_warmup) timer.start("Copy Back and Merge"); hipMemcpy( h_cost, d_cost, sizeof(int) * n_nodes, hipMemcpyDeviceToHost); hipMemcpy( h_color, d_color, sizeof(int) * n_nodes, hipMemcpyDeviceToHost); hipMemcpy( h_threads_run, d_threads_run, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy( h_threads_end, d_threads_end, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy( h_overflow, d_overflow, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy( h_q1, d_q1, sizeof(int) * n_nodes, hipMemcpyDeviceToHost); hipMemcpy( h_q2, d_q2, sizeof(int) * n_nodes, hipMemcpyDeviceToHost); hipDeviceSynchronize(); //CUDA_ERR(); if(rep >= p.n_warmup) timer.stop("Copy Back and Merge"); } } } // end of iteration timer.print("Allocation", 1); timer.print("Copy To Device", p.n_reps); timer.print("Kernel", p.n_reps); timer.print("Copy Back and Merge", p.n_reps); // Verify answer bool ok = verify(h_cost, n_nodes, p.comparison_file); // Free memory timer.start("Deallocation"); free(h_nodes); free(h_edges); free(h_color); free(h_cost); free(h_q1); free(h_q2); hipFree(d_nodes); hipFree(d_edges); hipFree(d_cost); hipFree(d_color); hipFree(d_q1); hipFree(d_q2); hipFree(d_num_t); hipFree(d_head); hipFree(d_tail); hipFree(d_threads_end); hipFree(d_threads_run); hipFree(d_overflow); hipFree(d_iter); hipFree(d_gray_shade); //CUDA_ERR(); timer.stop("Deallocation"); timer.print("Deallocation", 1); if (ok) printf("Test Passed\n"); return 0; }
the_stack
// ConnectRegionNew.cu // 实现图像的连通区域操作 #include "ConnectRegionNew.h" #include <iostream> #include <fstream> #include <cmath> using namespace std; // 宏:DEF_BLOCK_H // 定义了默认的一个线程块处理图像的行数。 #define DEF_BLOCK_H 4 // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 4 #define DEF_BLOCK_Y 2 // 宏:CONNREGION_INI_IFI // 定义了一个无穷大 #define CONNREGION_INI_IFI 0x7fffffff // 宏:CONNREGION_INI_SMA // 定义了一个无穷小 #define CONNREGION_INI_SMA 0 // 宏:BEFORESCAN // 定义开始扫描前的线程块状态 #define BEFORESCAN 0 // 宏:AFTERFIRSTSCAN // 定义进行第一次扫描后的线程块状态 #define AFTERFIRSTSCAN 1 // 宏:AFTERUNIFIED // 定义统一标记后的线程块状态 #define AFTERUNIFIED 2 // 宏:CONNREGION_PACK_LEVEL // 定义了一个线程中计算的像素点个数,若该值为4,则在一个线程中计算2 ^ 4 = 16 // 个像素点 #define CONNREGION_PACK_LEVEL 4 #define CONNREGION_PACK_NUM (1 << CONNREGION_PACK_LEVEL) #define CONNREGION_PACK_MASK (CONNREGION_PACK_LEVEL - 1) #if (CONNREGION_PACK_LEVEL < 1 || CONNREGION_PACK_LEVEL > 5) # error Unsupport CONNREGION_PACK_LEVEL Value!!! #endif // Device 子程序:_findMinLabel // 将标记值设为连通数组中的值 static __device__ int // 返回值:根标记,即最小标记值 _findMinLabel( int *connector, // 连通数组 int blockx, // 线程块索引在本程序中代表处理灰度数组的索引 int pitch, // 等距步长 int label // 标记值 ); // Device 子程序:_spin // 保证各线程块进行同步 static __device__ void // 无返回值 _spin( int n, // 线程块要同步的状态标记 int m, // 线程块的数量 int *state // 线程块标记数组 ); // Kernel 函数: _labelingConnectRegionKer (标记连通区域) // 标记连通区域。该过程主要分为三个部分,首先,扫描第一遍,从第一个图像的标记值 // 为其在源图像中的索引值,其后标记值为对其邻域对比后得到的标记值,如果存在领域, // 更新为最小的邻域标记值,否则标记值加1,然后,把每个线程块的标记结果统一,最后 // 通过找到根标记,实现最终标记。 // Kernel 函数:_labelingConnectRegionKer (标记连通区域) static __global__ void // 无返回值 _labelingConnectRegionKer(ImageCuda inimg, // 输入图像 int *devIndGray, // 需要处理的灰度范围数组 int indGrayNum, // 需要处理的灰度组数 LabelMaps *devLabelM, // 输出区域集 int *lastLineLabelofBlock, // 存储每一个处理线程块的 // 最后一行标记值 int *connector, // 连通数组 int *bState // 线程块状态数组 ); // Kernel 函数:_computeAreaKer // 计算连通区域的面积 static __global__ void // 无返回值 _computeAreaKer( ImageCuda inimg, // 输入图像 LabelMaps *devLabelM // 输出区域集 ); // Kernel 函数:_filterAreaKer (筛选出面积大小符合要求的连通区域) static __global__ void // Kernel 函数无返回值 _filterAreaKer( ImageCuda inimg, // 输入图像 LabelMaps * devLabelM, // 输入区域集 int *devIndGray, // 需要处理的灰度范围数组 int *frSize, // 各区域集中连通区域的个数数组 int minArea, //区域面积的最小值 int maxArea //区域面积的最大值 ); // Kernel 函数:_getFilteredRegionKer (得到符合要求的面积区域的相关信息) static __global__ void _getFilteredRegionKer( ImageCuda inimg, // 输入图像 LabelMaps * devLabelM, // 输入区域集 int *devIndGray, // 需要处理的灰度范围数组 int *frSize, // 各区域集中连通区域的个数数组 int minArea, //区域面积的最小值 int maxArea, //区域面积的最大值 int indGrayNum // 需要处理的灰度组数 ); // Device 子程序:_findMinLabel(中继处理) static __device__ int _findMinLabel(int* connector, int blockx, int pitch, int label) { int lab1 = label,lab2; // 循环查找并查集的根,即最小标记值。 while ((lab2 = connector[blockx * pitch + lab1]) >= 0) { lab1 = lab2; } return lab1; } // Device 子程序:_spin (线程块同步) static __device__ void _spin(int n, int m, int *state) { int counter = 0; do { for (int i = 0; i < m; i++) { if (state[i] >= n) counter++; } } while (counter < m); } // Kernel 函数:_labelingConnectRegionKer (标记连通区域) static __global__ void _labelingConnectRegionKer(ImageCuda inimg, int *devIndGray, int indGrayNum, LabelMaps *devLabelM, int *lastLineLabelofBlock, int *connector, int *bState) { // 用来表示线程块的大小。 int bSize = DEF_BLOCK_H * inimg.imgMeta.width; // 动态申请共享内存,使用时共享内存大小为线程块的大小,用于保存扫描后的标记 // 值,并且数组的第一个元素初始化为所处理的第一个像素在图像中的索引值。 extern __shared__ int bLabel[]; // 每个线程块中的每一个元素对应的索引值。 int bMinLabel = blockIdx.y * (bSize - inimg.imgMeta.width); int min = devIndGray[2 * blockIdx.x]; int max = devIndGray[2 * blockIdx.x + 1]; int pitch = inimg.imgMeta.width * inimg.imgMeta.height; // 记录连通数目,初始为所处理的第一个像素在图像中的索引值。 int labelCounter = bMinLabel + 1; // 标记数组的第一个元素初始化为所处理的第一个像素在图像中的索引值。 bLabel[0] = 0; if (inimg.imgMeta.imgData[blockIdx.y * (DEF_BLOCK_H - 1) * inimg.pitchBytes] >= min && inimg.imgMeta.imgData[blockIdx.y * (DEF_BLOCK_H - 1) * inimg.pitchBytes] <= max ) bLabel[0] = labelCounter++; // 线程块数。 int countofBlock = gridDim.x * gridDim.y; // 标记线程块的状态,便于同步 bState[blockIdx.y * gridDim.x + blockIdx.x] = BEFORESCAN; // 实现各线程块同步。 _spin(BEFORESCAN, countofBlock, bState); int i; // 当前处理图像的像素的下标。 int curidx; // 标记图像位置的列坐标和行坐标。 int cur_x, cur_y; // 从图像的第 blockIdx.y BLOCK的第一行的第二个PIXEL开始,由左向右进行扫描。 for (i = 1; i < inimg.imgMeta.width; i++) { cur_x = i; cur_y = blockIdx.y * (DEF_BLOCK_H - 1); curidx = cur_y * inimg.pitchBytes + cur_x; bLabel[i] = 0; // 如果该图像像素和左侧像素点连通,则将本线程块中对就位置的标记值设为左 // 侧像素点对应位置的标记值。 if (inimg.imgMeta.imgData[curidx] >= min && inimg.imgMeta.imgData[curidx] <= max) { if (inimg.imgMeta.imgData[curidx - 1] >= min && inimg.imgMeta.imgData[curidx - 1] <= max) { bLabel[i] = bLabel[i - 1]; }else bLabel[i] = labelCounter++; } } // 从对应图像BLOCK的第二行的第一个PIXEL开始(最左), // 由左向右,由上向下地进行扫描。 for (; i < bSize; i++) { // 得到对应像素在图中的位置索引值。 cur_x = i % inimg.imgMeta.width; cur_y = i / inimg.imgMeta.width + blockIdx.y * (DEF_BLOCK_H - 1); curidx = cur_y * inimg.pitchBytes + cur_x; // 对应的正上方的像素的索引值。 int upidx = i - inimg.imgMeta.width; // 初始化标志值以区分该像素是否已被处理过。 bLabel[i] = 0; // 如果该位置的像素在所要处理的灰度范围内,则对该像素进行处理并进行标记 // 标记原则为:从左侧的对应像素开始处理,比对这个左侧像素是否也在所要处 // 理的灰度范围内,如果也在,将该像素位置的对应的标记值更新为较小的标记 // 值,并更新连通数组。右上方和正上方以及左上方的处理方式类似。 if (inimg.imgMeta.imgData[curidx] >= min && inimg.imgMeta.imgData[curidx] <= max) { // 先处理左方像素。 int leftLabel; if (cur_x > 0 && inimg.imgMeta.imgData[curidx - 1] >= min && inimg.imgMeta.imgData[curidx - 1] <= max) { leftLabel = bLabel[i - 1]; }else leftLabel = -2; // 依次处理右上方,正上方,左上方的像素,并检查是否也在所要处理的 // 灰度范围内判断是否和该像素连通。 for (int times = 0;times < 4;times++) { // 处理右上方的像素,检查是否也在所要处理的灰度范围内以判断是否 // 和该像素连通。 if (times == 0 && cur_x < inimg.imgMeta.width - 1) { if (inimg.imgMeta.imgData[curidx - inimg.pitchBytes + 1] >= min && inimg.imgMeta.imgData[curidx - inimg.pitchBytes + 1] <= max) bLabel[i] = bLabel[upidx + 1]; } // 处理正上方的像素,检查是否也在所要处理的灰度范围内以判断是否 // 和该像素连通。 if (times == 1) { if (inimg.imgMeta.imgData[curidx - inimg.pitchBytes] >= min && inimg.imgMeta.imgData[curidx - inimg.pitchBytes] <= max) { if (bLabel[i] != 0) { int a = _findMinLabel(connector, blockIdx.x, pitch, bLabel[i]); int b = _findMinLabel(connector, blockIdx.x, pitch, bLabel[upidx]); if (a != b) { int c = (a > b) ? a : b; connector[blockIdx.x * pitch + c] = a + b - c; } }else bLabel[i] = bLabel[upidx]; } } // 处理左上方的像素,检查是否也在所要处理的灰度范围内以判断是否 // 和该像素连通。 if (times == 2 && cur_x > 0) { if (inimg.imgMeta.imgData[curidx - inimg.pitchBytes - 1] >= min && inimg.imgMeta.imgData[curidx - inimg.pitchBytes - 1] <= max) { if (bLabel[i] != 0) { int a = _findMinLabel(connector, blockIdx.x, pitch, bLabel[i]); int b = _findMinLabel(connector, blockIdx.x, pitch, bLabel[upidx - 1]); if (a != b) { int c = (a > b) ? a : b; connector[blockIdx.x * pitch + c] = a + b - c; } }else bLabel[i] = bLabel[upidx - 1]; } } // 如果该像素的左侧像素和右上方像素都连通,则更新连通区域使左侧 // 像素对应位置的标志值与右上方保持一致。左侧像素与正上方像素及 // 左上方像素和右上方像素,左侧像素与正上方像素同理保持一致。 if (times == 3) { if (leftLabel != -2) { if (bLabel[i] != 0) { int a = _findMinLabel(connector, blockIdx.x, pitch, bLabel[i]); int b = _findMinLabel(connector, blockIdx.x, pitch, leftLabel); if (a != b) { int c = (a > b) ? a : b; connector[blockIdx.x * pitch + c] = a + b - c; } }else bLabel[i] = leftLabel; } } } if (bLabel[i] == 0) bLabel[i] = (leftLabel != -2) ? leftLabel : labelCounter++; } } // 进行bh行扫描后,将bLabel的最后一行的内容copy到 // lastLineLabelofBlock [blockIdx.y][]中。 for (i = 0; i < inimg.imgMeta.width; i++) { if (blockIdx.y < gridDim.y - 1) { lastLineLabelofBlock[blockIdx.x * inimg.imgMeta.width * (gridDim.y - 1) + blockIdx.y * inimg.imgMeta.width + i] = bLabel[bSize - inimg.imgMeta.width + i]; } } // 更新各大线程块的状态。 bState[blockIdx.y * gridDim.x + blockIdx.x] = AFTERFIRSTSCAN; // 实现各线程块同步。 _spin(AFTERFIRSTSCAN, countofBlock, bState); // 根据连通数组更新标记值实现不同的线程块处理的同一行的像素值对应的标记值相 // 同。 if (blockIdx.y > 0) { for (i = 0; i < inimg.imgMeta.width; i++) { if (bLabel[i] != 0) { int a = _findMinLabel(connector, blockIdx.x, pitch, bLabel[i]); int b = _findMinLabel(connector, blockIdx.x, pitch, lastLineLabelofBlock[blockIdx.x * inimg.imgMeta.width * (gridDim.y - 1) + (blockIdx.y - 1) * inimg.imgMeta.width + i]); if (a != b) { int c = (a > b) ? a : b; connector[blockIdx.x * pitch + c] = a + b - c; } } } } // 更新各大线程块的状态。 bState[blockIdx.y * gridDim.x + blockIdx.x] = AFTERUNIFIED; // 实现各线程块同步。 _spin(AFTERUNIFIED, countofBlock, bState); // 将最终结果输出到输出标记值数组中。 int gbMinLabel = blockIdx.y * (DEF_BLOCK_H - 1) * inimg.imgMeta.width; for (i = 0; i < bSize; i++) { // 找到最小的标记值。 int trueLabel = _findMinLabel(connector, blockIdx.x, pitch, bLabel[i]); devLabelM[blockIdx.x].gLabel[gbMinLabel + i] = trueLabel; } } // Kernel 函数:_computeAreaKer (计算连通区域的面积) static __global__ void _computeAreaKer(ImageCuda inimg, LabelMaps *devLabelM) { // 所要处理的图们大小对应的宽度。 int width = inimg.imgMeta.width; // 所要处理的图们大小对应的高度。 int height = inimg.imgMeta.height; // 初始化区域个数。 devLabelM[blockIdx.z].regionCount = 0; // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的 // 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并 // 行度缩减的策略,默认令一个线程处理 16 个输出像素,这四个像素位于统一列 // 的相邻 16 行上,因此,对于 r 需要进行右移计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) << CONNREGION_PACK_LEVEL; int inidx = r * width + c; int curlabel, nexlabel; int cursum = 0; do { // 线程中处理第一个点。 // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资 // 源,一方面防止由于段错误导致的程序崩溃。 if (r >= height || c >= width) break; // 得到第一个输入坐标点对应的标记值。 curlabel = devLabelM[blockIdx.z].gLabel[inidx]; // if (blockIdx.z == 1) //printf("curlabel = %d\n", curlabel); cursum = 1; // 处理第二个点。 // 此后的像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各点 // 之间没有变化,故不用检查。 if (++r >= height) break; // 得到第二个点的像素值。 // 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y // 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计算。 inidx += width; nexlabel = devLabelM[blockIdx.z].gLabel[inidx]; // 若当前第二个点的标记值不等于前一个,把当前临时变量 cursum 中的统计结 // 果增加到共享内存中的相应区域;若该值等于前一个点的标记值,则临时变量 // cursum 加 1,继续检查下一个像素点。 if (curlabel != nexlabel) { atomicAdd(&devLabelM[blockIdx.z].area[curlabel], cursum); curlabel = nexlabel; } else { cursum++; } // 宏:CONNREGION_KERNEL_MAIN_PHASE // 定义计算下一个像素点的程序片段。使用这个宏可以实现获取下一个点的像素 // 值,并累加到共享内存,并且简化编码量 #define CONNREGION_KERNEL_MAIN_PHASE \ if (++r >= height) \ break; \ inidx += width; \ nexlabel = devLabelM[blockIdx.z].gLabel[inidx]; \ if (curlabel != nexlabel) { \ atomicAdd(&devLabelM[blockIdx.z].area[curlabel], cursum); \ curlabel = nexlabel; \ cursum = 1; \ } else { \ cursum++; \ } #define CONNREGION_KERNEL_MAIN_PHASEx2 \ CONNREGION_KERNEL_MAIN_PHASE \ CONNREGION_KERNEL_MAIN_PHASE #define CONNREGION_KERNEL_MAIN_PHASEx4 \ CONNREGION_KERNEL_MAIN_PHASEx2 \ CONNREGION_KERNEL_MAIN_PHASEx2 #define CONNREGION_KERNEL_MAIN_PHASEx8 \ CONNREGION_KERNEL_MAIN_PHASEx4 \ CONNREGION_KERNEL_MAIN_PHASEx4 #define CONNREGION_KERNEL_MAIN_PHASEx16 \ CONNREGION_KERNEL_MAIN_PHASEx8 \ CONNREGION_KERNEL_MAIN_PHASEx8 // 对于不同的 CONNREGION_PACK_LEVEL ,定义不同的执行次数,从而使一个线程内部 // 实现对多个点的像素值的统计。 #if (CONNREGION_PACK_LEVEL >= 2) CONNREGION_KERNEL_MAIN_PHASEx2 # if (CONNREGION_PACK_LEVEL >= 3) CONNREGION_KERNEL_MAIN_PHASEx4 # if (CONNREGION_PACK_LEVEL >= 4) CONNREGION_KERNEL_MAIN_PHASEx8 # if (CONNREGION_PACK_LEVEL >= 5) CONNREGION_KERNEL_MAIN_PHASEx16 # endif # endif # endif #endif // 取消前面的宏定义。 #undef CONNREGION_KERNEL_MAIN_PHASEx16 #undef CONNREGION_KERNEL_MAIN_PHASEx8 #undef CONNREGION_KERNEL_MAIN_PHASEx4 #undef CONNREGION_KERNEL_MAIN_PHASEx2 #undef CONNREGION_KERNEL_MAIN_PHASE } while (0); // 使用原子操作来保证操作的正确性 if (cursum != 0) atomicAdd(&devLabelM[blockIdx.z].area[curlabel], cursum); } // Kernel 函数:_filterAreaKer (筛选出面积大小符合要求的连通区域) static __global__ void _filterAreaKer(ImageCuda inimg, LabelMaps * devLabelM, int *devIndGray, int *frSize, int minArea, int maxArea) { int imgSize = inimg.imgMeta.width * inimg.imgMeta.height; // 初始化 regionCount 的大小为0. devLabelM[blockIdx.x].regionCount = 0; for (int i = 0; i < imgSize; i++) { // 遍历每一个标记值对应的连通区域对应的面积的大小,如果面积大小在要求范 // 围内, regionCount 的大小加1,否则,将面积值置为0,得到区域的个数 if (devLabelM[blockIdx.x].area[i] > minArea && devLabelM[blockIdx.x].area[i] < maxArea) devLabelM[blockIdx.x].regionCount++; else devLabelM[blockIdx.x].area[i] = 0; } // 把 regionCount (区域个数) 赋给 frSize 数组,用于开辟保存区域的具体信息的结 // 构体的空间 frSize[blockIdx.x] = devLabelM[blockIdx.x].regionCount; } // Kernel 函数:_getFilteredRegionKer (得到符合要求的面积区域的相关信息) static __global__ void _getFilteredRegionKer(ImageCuda inimg, LabelMaps * devLabelM, int *devIndGray, int *frSize, int minArea, int maxArea, int indGrayNum) { // 图像大小。 int imgSize = inimg.imgMeta.width * inimg.imgMeta.height; int currentLabel; // 要筛选的连通区域的最小值。 int min = devIndGray[2 * blockIdx.x]; // 要筛选的连通区域的最大值。 int max = devIndGray[2 * blockIdx.x + 1]; // 对应的 LABEL MEMORY 号(BLOCK列index)。 int index = (min + max) / 2; // 初始化每个连通区域的索引值和标记值,索引值即为对应的 LABEL MEMORY 号。 for (int i = 0,k = 0; k < frSize[blockIdx.x]; i++) if (devLabelM[blockIdx.x].area[i] != 0) { devLabelM[blockIdx.x].fr[k].index = index; devLabelM[blockIdx.x].fr[k++].labelMapNum = i; } // 初始化每个连通区域的左上角坐标和右下角坐标。(左上角坐标初始化为无限大,右 // 下角坐标初始化为无限小) for (int i = 0;i < devLabelM[blockIdx.x].regionCount;i++) { devLabelM[blockIdx.x].fr[i].regionX1 = CONNREGION_INI_IFI; devLabelM[blockIdx.x].fr[i].regionY1 = CONNREGION_INI_IFI; devLabelM[blockIdx.x].fr[i].regionX2 = CONNREGION_INI_SMA; devLabelM[blockIdx.x].fr[i].regionY2 = CONNREGION_INI_SMA; } // 遍历每一个标记值,找到其对应的连通区域,通过比较得到左上角坐标和右下角坐 // 标(因为已经将每一个连通区域的左上角和右下角初始化,在遍历的过程对,将每一 // 个标记值对应的在图像中的坐标找到,如果当前坐标值的横坐标小于 regionX1,则 // 更新为当前的坐标的横坐标,regionY1,regionX1,regionY2 的更新同理。) for (int i = 0; i < imgSize; i++) { currentLabel = devLabelM[blockIdx.x].gLabel[i]; if (devLabelM[blockIdx.x].gLabel[i] != 0 ) { // 得到当前位置对应的图像中的纵坐标值。 int y = i / inimg.imgMeta.width; // 得到当前位置对应的图像中的横坐标值。 int x = i % inimg.imgMeta.width; // 得到当前位置对应的标记值 。 currentLabel = devLabelM[blockIdx.x].gLabel[i]; for (int i = 0;i < devLabelM[blockIdx.x].regionCount;i++) { // 找到存储对应的连通区域信息的结构体。 if (currentLabel == devLabelM[blockIdx.x].fr[i].labelMapNum) { // 更新左上角的横坐标,取最小值。 if (x < devLabelM[blockIdx.x].fr[i].regionX1) devLabelM[blockIdx.x].fr[i].regionX1 = x; // 更新左上角的纵坐标,取最小值。 if (x > devLabelM[blockIdx.x].fr[i].regionX2) devLabelM[blockIdx.x].fr[i].regionX2 = x; // 更新右下角的横坐标,取最大值。 if (y <devLabelM[blockIdx.x].fr[i].regionY1) devLabelM[blockIdx.x].fr[i].regionY1 = y; // 更新右下角的纵坐标,取最大值。 if (y > devLabelM[blockIdx.x].fr[i].regionY2) devLabelM[blockIdx.x].fr[i].regionY2 = y; } } } } } // 宏:FAIL_CONNECT_REGION_FREE // 如果出错,就释放之前申请的内存。 #define FAIL_CONNECT_REGION_NEW_FREE do { \ if (devtmplabel != NULL) \ cudaFree(devtmplabel); \ if (devLabel != NULL) \ cudaFree(devLabel); \ if (connector != NULL) \ cudaFree(connector); \ if (bState != NULL) \ cudaFree(bState); \ if (devFrsize != NULL) \ cudaFree(devFrsize); \ if (devIndGray != NULL) \ cudaFree(devIndGray); \ if (devLabelM != NULL) \ cudaFree(devLabelM); \ } while (0) // Host 成员方法:connectRegionNew(连通区域新方法) __host__ int ConnectRegionNew::connectRegionNew(Image *inimg, int * indGray, int indGrayNum, LabelMaps *labelM) { // 检查输入输出图像是否为 NULL,如果为 NULL 直接报错返回。 if (inimg == NULL) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 cudaError_t cudaerrcode; // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 计算各标记数组的存储数据大小 int data_size = insubimgCud.imgMeta.width * insubimgCud.imgMeta.height * sizeof (int); // 存储中间标记值的数组,其大小与输入图像大小一致 int *devtmplabel; // 存储最终标记值的数组,其大小与输入图像大小一致 int *devLabel; // 存储连通数组,其大小与输入图像大小 一致。 int *connector; // 存储线程块状态数组,其大小等于线程块数。 int *bState; // 存储连通区域个数的数组,其大小等于需要处理的灰度组数。 int *hstFrsize = new int[indGrayNum]; // 在设备端存储连通区域个数的数组。 int *devFrsize; // 需要处理的灰度范围数组。 int * devIndGray; // device 端区域集,用于处理信息。 LabelMaps *devLabelM; // 临时的区域集,用于在 host 端开辟空间。 LabelMaps *tmpMaps = new LabelMaps[indGrayNum]; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = 1; blocksize.y = 1; gridsize.x = indGrayNum; // 每个线程格中的总线程块数为把图像以 DEF_BLOCK_H 为一个线程块分割,并保证上 // 下两个线程处理的数据有一个重合的行。 gridsize.y = (insubimgCud.imgMeta.height + DEF_BLOCK_H - 3) / (DEF_BLOCK_H - 1); // 给设备端连通区域个数的数组分配空间。 cudaerrcode = cudaMalloc((void **)&devFrsize, indGrayNum * sizeof(int)); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_CONNECT_REGION_NEW_FREE; return cudaerrcode; } // 为标记数组分配大小。 cudaerrcode = cudaMalloc((void **)&devLabel, data_size); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_CONNECT_REGION_NEW_FREE; return cudaerrcode; } // 为临时标记数组分配大小。 cudaerrcode = cudaMalloc((void **)(&devtmplabel), indGrayNum * insubimgCud.imgMeta.width * (gridsize.y - 1) * sizeof(int)); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_CONNECT_REGION_NEW_FREE; return cudaerrcode; } // 为连通数组分配大小。 cudaerrcode = cudaMalloc((void **)(&connector), indGrayNum * insubimgCud.imgMeta.width * insubimgCud.imgMeta.height * sizeof(int)); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_CONNECT_REGION_NEW_FREE; return cudaerrcode; } // 为线程块状态数组分配大小。 cudaerrcode = cudaMalloc((void **)(&bState), gridsize.x * gridsize.y * sizeof(int)); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_CONNECT_REGION_NEW_FREE; return cudaerrcode; } // 将线程块状态数组中所有值初始化为 0。 cudaerrcode = cudaMemset(bState, 0, gridsize.x * gridsize.y * sizeof(int)); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_CONNECT_REGION_NEW_FREE; return cudaerrcode; } // 将连通数组初始化为 -1。 cudaerrcode = cudaMemset(connector, -1, indGrayNum * insubimgCud.imgMeta.width * insubimgCud.imgMeta.height * sizeof(int)); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_CONNECT_REGION_NEW_FREE; return cudaerrcode; } // 核函数中使用的共享内存的大小。 int bsize = sizeof (int) * DEF_BLOCK_H * insubimgCud.imgMeta.width; // 为要处理的灰度范围数组开辟空间,大小为 2 * indGrayNum。 cudaerrcode = cudaMalloc((void **)&devIndGray, 2 * indGrayNum * sizeof (int)); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_CONNECT_REGION_NEW_FREE; return cudaerrcode; } // 为 device 端区域集开辟空间,大小为 indGryaNum 个区域集结构体。 cudaerrcode = cudaMalloc((void **)&devLabelM, indGrayNum * sizeof (LabelMaps)); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_CONNECT_REGION_NEW_FREE; return cudaerrcode; } // 用于方向区域集的下标变量。 int lmsize; // 为每一个区域集中的标记值数组和面积数组开辟空间。大小和图像大小一致。 for (lmsize = 0; lmsize < indGrayNum; lmsize++) { cudaerrcode = cudaMalloc((void **)&(tmpMaps[lmsize].gLabel), data_size); cudaerrcode = cudaMalloc((void **)&(tmpMaps[lmsize].area), data_size); cudaerrcode = cudaMemset(tmpMaps[lmsize].area, 0, data_size); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_CONNECT_REGION_NEW_FREE; return cudaerrcode; } } // 把输入的灰度数组拷贝到设备端。 cudaMemcpy(devIndGray, indGray, 2 * indGrayNum * sizeof (int), cudaMemcpyHostToDevice); // 把输入区域集拷贝到设备端。 cudaMemcpy(devLabelM, tmpMaps, indGrayNum * sizeof (LabelMaps), cudaMemcpyHostToDevice); // 调用核函数,初始化每个线程块内标记值。并将结果计算出来,划分出连通区域。 _labelingConnectRegionKer<<<gridsize, blocksize, bsize>>>( insubimgCud, devIndGray, indGrayNum, devLabelM, devtmplabel, connector, bState); // 计算调用计算面积的 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blockforcalarea, gridforcalarea; int height = (insubimgCud.imgMeta.height + CONNREGION_PACK_MASK) / CONNREGION_PACK_NUM; blockforcalarea.x = DEF_BLOCK_X; blockforcalarea.y = DEF_BLOCK_Y; gridforcalarea.x = (insubimgCud.imgMeta.width + blockforcalarea.x - 1) / blockforcalarea.x; gridforcalarea.y = (height + blockforcalarea.y - 1) / blockforcalarea.y; gridforcalarea.z = indGrayNum; // 计算每一个区域的面积 _computeAreaKer<<<gridforcalarea, blockforcalarea>>>(insubimgCud, devLabelM); if (cudaGetLastError() != cudaSuccess) { // 核函数出错,结束迭代函数,释放申请的变量空间。 cout << "error" << endl; return CUDA_ERROR; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 mblocksize, mgridsize; mblocksize.x = 1; mblocksize.y = 1; mgridsize.x = indGrayNum; mgridsize.y = 1; // 根据各区域的大小筛选出符合面积要求的连通区域,并将区域个数存储在 devFrsize // 数组中,用于开辟新的空间存放连通区域的具体信息 _filterAreaKer<<<mgridsize,mblocksize>>>(insubimgCud,devLabelM, devIndGray, devFrsize, minArea, maxArea); if (cudaGetLastError() != cudaSuccess) { // 核函数出错,结束迭代函数,释放申请的变量空间。 cout << "error" << endl; return CUDA_ERROR; } // 将连通区域的数组个数数组数据从 device 端拷到 host 端。 cudaMemcpy(hstFrsize, devFrsize, indGrayNum * sizeof (int), cudaMemcpyDeviceToHost); // 将连通区域集的结果从设备端拷到主机端。 cudaMemcpy(tmpMaps, devLabelM, indGrayNum * sizeof (LabelMaps), cudaMemcpyDeviceToHost); // 为记录连通区域具体作息的结构体分配空间。 for (lmsize = 0; lmsize < indGrayNum; lmsize++) { cudaerrcode = cudaMalloc((void **)&(tmpMaps[lmsize].fr), hstFrsize[lmsize] * sizeof(FilteredRegions)); if (cudaerrcode != cudaSuccess) { // 释放内存空间。 FAIL_CONNECT_REGION_NEW_FREE; return cudaerrcode; } } // 把分配好空间的区域集从 host 端拷到 device端。 cudaMemcpy(devLabelM, tmpMaps, indGrayNum * sizeof (LabelMaps), cudaMemcpyHostToDevice); // 得到符合条件的连通区域的相关信息:左上角坐标值、右下角坐标值、标记值、对 // 应的 LABEL MEMORY 号(BLOCK列index) _getFilteredRegionKer<<<indGrayNum,1>>>(insubimgCud,devLabelM, devIndGray, devFrsize, minArea, maxArea, indGrayNum); if (cudaGetLastError() != cudaSuccess) { // 核函数出错,结束迭代函数,释放申请的变量空间。 cout << "error" << endl; return CUDA_ERROR; } // 进行最后拷贝,把区域集的完整信息从 device 端拷贝到 host 端 cudaMemcpy(hstFrsize, devFrsize, indGrayNum * sizeof (int), cudaMemcpyDeviceToHost); // 保存标记值数组的指针值。 int *devGlabel; // 保存区域结构体数组的指针值。 FilteredRegions *devFr; // 将区域集的指针从 device 端拷贝到 host 端。 cudaMemcpy(labelM, devLabelM, indGrayNum * sizeof (LabelMaps), cudaMemcpyDeviceToHost); // 通过拷贝得到的指针得到区域集的完整信息。 for (lmsize = 0; lmsize < indGrayNum; lmsize++) { devGlabel =labelM[lmsize].gLabel; devFr = labelM[lmsize].fr; labelM[lmsize].gLabel = new int[insubimgCud.imgMeta.width * insubimgCud.imgMeta.height]; labelM[lmsize].fr = new FilteredRegions[hstFrsize[lmsize]]; labelM[lmsize].regionCount = hstFrsize[lmsize]; cudaMemcpy(labelM[lmsize].gLabel, devGlabel, insubimgCud.imgMeta.width * insubimgCud.imgMeta.height * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(labelM[lmsize].fr, devFr, hstFrsize[lmsize] * sizeof(FilteredRegions), cudaMemcpyDeviceToHost); cudaFree(devFr); cudaFree(devGlabel); } // 以下程序段用于检测输出结果 for (lmsize = 0; lmsize < indGrayNum; lmsize++) { printf("labelM[%d].regionCount = %d\n", lmsize, labelM[lmsize].regionCount); for (int i = 0; i < insubimgCud.imgMeta.width * insubimgCud.imgMeta.height; i++) { if (i % insubimgCud.imgMeta.width == 0) printf("\n"); printf("%4d",labelM[lmsize].gLabel[i]); } printf("\n"); for (int i = 0; i < labelM[lmsize].regionCount; i++) { printf("labelM[%d}.fr[%d].index = %d\n",lmsize, i,labelM[lmsize].fr[i].index); printf("labelM[%d}.fr[%d].labelMapNum = %d\n", lmsize, i, labelM[lmsize].fr[i].labelMapNum); printf("labelM[%d}.fr[%d].X1 = %d\n",lmsize, i, labelM[lmsize].fr[i].regionX1); printf("labelM[%d}.fr[%d].Y1 = %d\n",lmsize, i, labelM[lmsize].fr[i].regionY1); printf("labelM[%d}.fr[%d].X2 = %d\n",lmsize, i, labelM[lmsize].fr[i].regionX2); printf("labelM[%d}.fr[%d].Y2 = %d\n",lmsize, i, labelM[lmsize].fr[i].regionY2); } } // 释放已分配的数组内存,避免内存泄露。 delete []tmpMaps; cudaFree(devFrsize); cudaFree(devIndGray); cudaFree(devLabelM); cudaFree(devtmplabel); cudaFree(devLabel); cudaFree(connector); cudaFree(bState); if (cudaGetLastError() != cudaSuccess) return CUDA_ERROR; // 处理完毕,退出。 return NO_ERROR; }
the_stack
#include <stdio.h> #include <thrust/execution_policy.h> #include <thrust/gather.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <chrono> #include <thread> #include <vector> #include "common_cuda_helper.hpp" #include "nms_cuda_kernel.cuh" #include "trt_cuda_helper.cuh" #include "trt_plugin_helper.hpp" struct NMSBox { float box[4]; }; struct nms_centerwh2xyxy { __host__ __device__ NMSBox operator()(const NMSBox box) { NMSBox out; out.box[0] = box.box[0] - box.box[2] / 2.0f; out.box[1] = box.box[1] - box.box[3] / 2.0f; out.box[2] = box.box[0] + box.box[2] / 2.0f; out.box[3] = box.box[1] + box.box[3] / 2.0f; return out; } }; struct nms_sbox_idle { const float* idle_box_; __host__ __device__ nms_sbox_idle(const float* idle_box) { idle_box_ = idle_box; } __host__ __device__ NMSBox operator()(const NMSBox box) { return {idle_box_[0], idle_box_[1], idle_box_[2], idle_box_[3]}; } }; struct nms_score_threshold { float score_threshold_; __host__ __device__ nms_score_threshold(const float score_threshold) { score_threshold_ = score_threshold; } __host__ __device__ bool operator()(const float score) { return score < score_threshold_; } }; __global__ void nms_reindex_kernel(int n, int* output, int* index_cache) { CUDA_1D_KERNEL_LOOP(index, n) { const int old_index = output[index * 3 + 2]; output[index * 3 + 2] = index_cache[old_index]; } } __global__ void mask_to_output_kernel(const unsigned long long* dev_mask, const int* index, int* output, int* output_count, int batch_id, int cls_id, int spatial_dimension, int col_blocks, int max_output_boxes_per_class) { extern __shared__ unsigned long long remv[]; // fill remv with 0 CUDA_1D_KERNEL_LOOP(i, col_blocks) { remv[i] = 0; } __syncthreads(); int start = *output_count; int out_per_class_count = 0; for (int i = 0; i < spatial_dimension; i++) { const int nblock = i / threadsPerBlock; const int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { if (threadIdx.x == 0) { output[start * 3 + 0] = batch_id; output[start * 3 + 1] = cls_id; output[start * 3 + 2] = index[i]; start += 1; } out_per_class_count += 1; if (out_per_class_count >= max_output_boxes_per_class) { break; } __syncthreads(); // set every overlap box with bit 1 in remv const unsigned long long* p = dev_mask + i * col_blocks; CUDA_1D_KERNEL_LOOP(j, col_blocks) { if (j >= nblock) { remv[j] |= p[j]; } } // j __syncthreads(); } } // i if (threadIdx.x == 0) { *output_count = start; } } size_t get_onnxnms_workspace_size(size_t num_batches, size_t spatial_dimension, size_t num_classes, size_t boxes_word_size, int center_point_box, size_t output_length) { size_t boxes_xyxy_workspace = 0; if (center_point_box == 1) { boxes_xyxy_workspace = mmcv::getAlignedSize( num_batches * spatial_dimension * 4 * boxes_word_size); } size_t scores_workspace = mmcv::getAlignedSize(spatial_dimension * boxes_word_size); size_t boxes_workspace = mmcv::getAlignedSize(spatial_dimension * 4 * boxes_word_size); const int col_blocks = DIVUP(spatial_dimension, threadsPerBlock); size_t mask_workspace = mmcv::getAlignedSize(spatial_dimension * col_blocks * sizeof(unsigned long long)); size_t index_template_workspace = mmcv::getAlignedSize(spatial_dimension * sizeof(int)); size_t index_workspace = mmcv::getAlignedSize(spatial_dimension * sizeof(int)); size_t count_workspace = mmcv::getAlignedSize(sizeof(int)); return scores_workspace + boxes_xyxy_workspace + boxes_workspace + mask_workspace + index_template_workspace + index_workspace + count_workspace; } /** * Launch the NonMaxSuppression kernel * * The NMS will be performed on each batch/class, share the kernel implement * `nms_cuda`. For each batch/class, the `boxes_sorted` and `index_cache` will * be sorted by scores, boxes_sorted will be used in `nms_cuda` kernel. After * that, the output would be generated by `mask_to_output_kernel` with * `dev_mask` and `sorted_cache`. * * @param[in] bboxes with shape [num_batch, spatial_dimension, 4], input boxes * @param[in] scores with shape [num_batch, num_classes, spatial_dimension], * input scores * @param[in] max_output_boxes_per_class max output boxes per class * @param[in] iou_threshold threshold of iou * @param[in] score_threshold threshold of scores * @param[in] offset box offset, only 0 or 1 is valid * @param[out] output with shape [output_length, 3], each row contain index * (batch_id, class_id, boxes_id), filling -1 if result is not valid. * @param[in] center_point_box 0 if boxes is [left, top, right, bottom] 1 if * boxes is [center_x, center_y, width, height] * @param[in] num_batches batch size of boxes and scores * @param[in] spatial_dimension boxes numbers each batch * @param[in] num_classes class numbers * @param[in] output_length the max output rows * @param[in] workspace memory for all temporary variables. * @param[in] stream cuda stream */ void TRTNMSCUDAKernelLauncher_float(const float* boxes, const float* scores, const int max_output_boxes_per_class, const float iou_threshold, const float score_threshold, const int offset, int* output, int center_point_box, int num_batches, int spatial_dimension, int num_classes, size_t output_length, void* workspace, cudaStream_t stream) { const int col_blocks = DIVUP(spatial_dimension, threadsPerBlock); float* boxes_sorted = (float*)workspace; workspace = static_cast<char*>(workspace) + mmcv::getAlignedSize(spatial_dimension * 4 * sizeof(float)); float* boxes_xyxy = nullptr; if (center_point_box == 1) { boxes_xyxy = (float*)workspace; workspace = static_cast<char*>(workspace) + mmcv::getAlignedSize(num_batches * spatial_dimension * 4 * sizeof(float)); thrust::transform(thrust::cuda::par.on(stream), (NMSBox*)boxes, (NMSBox*)(boxes + num_batches * spatial_dimension * 4), (NMSBox*)boxes_xyxy, nms_centerwh2xyxy()); cudaCheckError(); } float* scores_sorted = (float*)workspace; workspace = static_cast<char*>(workspace) + mmcv::getAlignedSize(spatial_dimension * sizeof(float)); unsigned long long* dev_mask = (unsigned long long*)workspace; workspace = static_cast<char*>(workspace) + mmcv::getAlignedSize(spatial_dimension * col_blocks * sizeof(unsigned long long)); int* index_cache = (int*)workspace; workspace = static_cast<char*>(workspace) + mmcv::getAlignedSize(spatial_dimension * sizeof(int)); // generate sequence [0,1,2,3,4 ....] int* index_template = (int*)workspace; workspace = static_cast<char*>(workspace) + mmcv::getAlignedSize(spatial_dimension * sizeof(int)); thrust::sequence(thrust::cuda::par.on(stream), index_template, index_template + spatial_dimension, 0); int max_output_boxes_per_class_cpu = max_output_boxes_per_class; if (max_output_boxes_per_class_cpu <= 0) { max_output_boxes_per_class_cpu = spatial_dimension; } int* output_count = (int*)workspace; workspace = static_cast<char*>(workspace) + mmcv::getAlignedSize(sizeof(int)); cudaMemsetAsync(output_count, 0, sizeof(int), stream); // fill output with -1 thrust::fill(thrust::cuda::par.on(stream), output, output + output_length * 3, -1); cudaCheckError(); dim3 blocks(col_blocks, col_blocks); dim3 threads(threadsPerBlock); for (int batch_id = 0; batch_id < num_batches; ++batch_id) { for (int cls_id = 0; cls_id < num_classes; ++cls_id) { const int batch_cls_id = batch_id * num_classes + cls_id; // sort boxes by score cudaMemcpyAsync(scores_sorted, scores + batch_cls_id * spatial_dimension, spatial_dimension * sizeof(float), cudaMemcpyDeviceToDevice, stream); cudaCheckError(); cudaMemcpyAsync(index_cache, index_template, spatial_dimension * sizeof(int), cudaMemcpyDeviceToDevice, stream); cudaCheckError(); thrust::sort_by_key(thrust::cuda::par.on(stream), scores_sorted, scores_sorted + spatial_dimension, index_cache, thrust::greater<float>()); if (center_point_box == 1) { thrust::gather(thrust::cuda::par.on(stream), index_cache, index_cache + spatial_dimension, (NMSBox*)(boxes_xyxy + batch_id * spatial_dimension * 4), (NMSBox*)boxes_sorted); } else { thrust::gather(thrust::cuda::par.on(stream), index_cache, index_cache + spatial_dimension, (NMSBox*)(boxes + batch_id * spatial_dimension * 4), (NMSBox*)boxes_sorted); } cudaCheckError(); if (score_threshold > 0.0f) { thrust::transform_if( thrust::cuda::par.on(stream), (NMSBox*)boxes_sorted, (NMSBox*)(boxes_sorted + spatial_dimension * 4), scores_sorted, (NMSBox*)boxes_sorted, nms_sbox_idle(boxes_sorted), nms_score_threshold(score_threshold)); } nms_cuda<<<blocks, threads, 0, stream>>>(spatial_dimension, iou_threshold, offset, boxes_sorted, dev_mask); // will be performed when dev_mask is full. mask_to_output_kernel<<<1, threadsPerBlock, col_blocks * sizeof(unsigned long long), stream>>>( dev_mask, index_cache, output, output_count, batch_id, cls_id, spatial_dimension, col_blocks, max_output_boxes_per_class_cpu); } // cls_id } // batch_id }
the_stack
#include "kernel_common.h" #include "util/mirrored_memory.h" namespace dart { static const LossFunctionType lossFunction = HuberLoss; // -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=- template <bool dbgDA, bool dbgErr, bool dbgNorm> __global__ void gpu_errorAndDataAssociationObsToMod(const float4 * obsVertMap, const float4 * obsNormMap, const int width, const int height, const SE3 T_mc, const SE3 * T_fms, const int * sdfFrames, const Grid3D<float> * sdfs, const int nSdfs, const float distanceThreshold, const float normThreshold, const float planeOffset, const float3 planeNormal, int * lastElement, DataAssociatedPoint * pts, int * debugDataAssociation, float * debugError, float4 * debugNorm) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < width && y < height) { const int index = x + y*width; if (dbgDA) { debugDataAssociation[index] = -1; } if (dbgErr) { debugError[index] = NAN; } if (dbgNorm) { debugNorm[index] = make_float4(0); } const float4 & xObs_c = obsVertMap[index]; if (xObs_c.w > 0) { const float4 xObs_m = T_mc*xObs_c; if (dot(make_float3(xObs_m),planeNormal) >= planeOffset) { // calculate distance float sdfError = 1e20; int grid = -1; for (int g=0; g < nSdfs; ++g) { const int f = sdfFrames[g]; const float4 xObs_f = T_fms[f]*xObs_m; const Grid3D<float> & sdf = sdfs[g]; const float3 xObs_g = sdf.getGridCoords(make_float3(xObs_f)); if (!sdf.isInBoundsGradientInterp(xObs_g)) { continue; } const float d = (sdf.getValueInterpolated(xObs_g))*sdf.resolution; //if (fabs(d) < fabs(sdf_error)) { if (d < sdfError) { sdfError = d; grid = g; } } // skip unassociated points and points beyond the distance threshold if (sdfError*sdfError > distanceThreshold*distanceThreshold) { } else { const int f = sdfFrames[grid]; const float4 xObs_f = T_fms[f]*xObs_m; const Grid3D<float> & sdf = sdfs[grid]; const float3 xObs_g = sdf.getGridCoords(make_float3(xObs_f)); // TODO: figure out what's going on with the -1 const float4 nPred = -1*(SE3Invert( T_fms[f]*T_mc )*normalize(make_float4(sdf.getGradientInterpolated(xObs_g),0))); if (dbgNorm) { debugNorm[index] = nPred; } float4 v = obsNormMap[index]; float3 nObs = make_float3(0,0,0); if (v.w > 0.0) { v.w = 0; nObs = make_float3(v); if (dot(nPred,v) < normThreshold ) { return; } } if (dbgDA) { debugDataAssociation[index] = grid; } if (dbgErr) { debugError[index] = sdfError; } int myElement = atomicAdd(lastElement,1); DataAssociatedPoint dt; dt.index = index; dt.dataAssociation = grid; dt.error = sdfError; pts[myElement] = dt; } } } } } template <bool dbgDA, bool dbgErr, bool dbgNorm> __global__ void gpu_errorAndDataAssociationObsToModMultiModel(const float4 * obsVertMap, const float4 * obsNormMap, const int width, const int height, const int nModels, const SE3 * T_mcs, const SE3 * const * T_fms, const int * const * sdfFrames, const Grid3D<float> * const * sdfs, const int * nSdfs, const float * distanceThresholds, const float * normThresholds, const float * planeOffsets, const float3 * planeNormals, int * lastElement, DataAssociatedPoint * * pts, int * debugDataAssociation, float * debugError, float4 * debugNorm) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } const int index = x + y*width; if (dbgDA) { debugDataAssociation[index] = -1; } if (dbgErr) { debugError[index] = NAN; } if (dbgNorm) { debugNorm[index] = make_float4(0); } const float4 & xObs_c = obsVertMap[index]; if (xObs_c.w > 0) { float sdfError = 1e20; int associatedModel = -1; int associatedGrid = -1; for (int m=0; m<nModels; ++m) { const float4 xObs_m = T_mcs[m]*xObs_c; const float & planeOffset = planeOffsets[m]; const float3 & planeNormal = planeNormals[m]; if (dot(make_float3(xObs_m),planeNormal) >= planeOffset) { const int mNSdfs = nSdfs[m]; const int * mSdfFrames = sdfFrames[m]; const SE3 * mT_fms = T_fms[m]; const Grid3D<float> * mSdfs = sdfs[m]; for (int g=0; g<mNSdfs; ++g) { const int f = mSdfFrames[g]; const float4 xObs_f = mT_fms[f]*xObs_m; const Grid3D<float> & sdf = mSdfs[g]; //printf("model %d sdf %d is in frame %d\n",m,g,f); //printf("%f ",sdf.resolution); const float3 xObs_g = sdf.getGridCoords(make_float3(xObs_f)); if (!sdf.isInBoundsGradientInterp(xObs_g)) { continue; } const float d = (sdf.getValueInterpolated(xObs_g))*sdf.resolution; //printf("%f ",d); // if (fabs(d) < fabs(sdfError) { if (d < sdfError) { //printf("."); if (d*d < distanceThresholds[m]*distanceThresholds[m]) { //printf("*"); sdfError = d; associatedGrid = g; associatedModel = m; } } } } } if (associatedModel != -1) { // const int f = sdfFrames[associatedModel][associatedGrid]; // const float4 xObs_m = T_mcs[associatedModel]*xObs_c; // const float4 xObs_f = T_fms[associatedModel][f]*xObs_m; // const Grid3D<float> &sdf = sdfs[associatedModel][associatedGrid]; // const float3 xObs_g = sdf.getGridCoords(make_float3(xObs_f)); // const float4 nPred = 1*(SE3Invert( T_fms[associatedModel][f]*T_mcs[associatedModel] )*normalize(make_float4(sdf.getGradientInterpolated(xObs_g),0))); // float4 v = obsNormMap[index]; // float3 nObs = make_float3(0,0,0); // if (v.w > 0.0) { // v.w = 0; // nObs = make_float3(v); // if (dot(nPred,v) >= normThresholds[associatedModel]) { if (dbgDA) { debugDataAssociation[index] = ((associatedModel << 16) | associatedGrid); } if (dbgErr) { debugError[index] = sdfError; } if (dbgNorm) { debugNorm[index] = obsNormMap[index]; } int myElement = atomicAdd(&lastElement[associatedModel],1); DataAssociatedPoint * mPts = pts[associatedModel]; DataAssociatedPoint dt; dt.index = index; dt.dataAssociation = associatedGrid; dt.error = sdfError; mPts[myElement] = dt; // } // } } } } template <bool dbgJs> __global__ void gpu_normEqnsObsToMod(const int dims, const DataAssociatedPoint * pts, const float4 * obsVertMap, const int nPoints, const SE3 T_mc, const SE3 * T_fms, const SE3 * T_mfs, const int * sdfFrames, const Grid3D<float> * sdfs, const int * dependencies, const JointType * jointTypes, const float3 * jointAxes, const float huberDelta, float * result, float4 * debugJs) { extern __shared__ float s[]; int index = blockIdx.x*blockDim.x + threadIdx.x; if (index >= nPoints) { return; } if (dbgJs) { debugJs[index] = make_float4(0); } const float4 xObs_m = T_mc*obsVertMap[pts[index].index]; // array declarations float * J = &s[threadIdx.x*dims]; int obsFrame = sdfFrames[pts[index].dataAssociation]; const float4 xObs_f = T_fms[obsFrame]*xObs_m; // compute SDF gradient const int g = pts[index].dataAssociation; const Grid3D<float> & sdf = sdfs[g]; const float3 xObs_g = sdf.getGridCoords(make_float3(xObs_f)); const float3 sdfGrad_f = sdf.getGradientInterpolated(xObs_g); const float3 sdfGrad_m = SE3Rotate(T_mfs[obsFrame],sdfGrad_f); getErrorJacobianOfModelPoint(J,xObs_m,obsFrame,sdfGrad_m,dims,dependencies,jointTypes,jointAxes,T_fms,T_mfs); if (dbgJs) { debugJs[index*dims + 0] = make_float4(1,0,0,1); debugJs[index*dims + 1] = make_float4(0,1,0,1); debugJs[index*dims + 2] = make_float4(0,0,1,1); debugJs[index*dims + 3] = make_float4( 0,-xObs_m.z, xObs_m.y,1); debugJs[index*dims + 4] = make_float4( xObs_m.z, 0,-xObs_m.x,1); debugJs[index*dims + 5] = make_float4(-xObs_m.y, xObs_m.x, 0,1); } const float residual = pts[index].error; float * JTr = result; float * JTJ = &result[dims]; float * e = &result[dims + JTJSize(dims)]; switch(lossFunction) { case SquaredLoss: { computeSquaredLossResult(dims,-residual,J,e,JTr,JTJ); // TODO: why negative again? } break; case HuberLoss: { if (fabs(pts[index].error) < huberDelta ) { computeSquaredLossResult(dims,-residual,J,e,JTr,JTJ); // TODO: why negative again? } else { float v = huberDelta; if (pts[index].error < 0) { v = -v; } for (int i=0; i<dims; i++) { if( J[i]==0.0f) continue; atomicAdd(&JTr[i],v*-J[i]); for (int j=0; j<=i; j++) { float v2 = J[i]*J[j]; atomicAdd(&JTJ[((i*(i+1))>>1) + j],v2); } } atomicAdd(e,huberDelta * (fabs(pts[index].error) - 0.5*huberDelta)); } } break; } } __global__ void gpu_normEqnsObsToModReduced(const int fullDims, const int redDims, const DataAssociatedPoint * pts, const float4 * obsVertMap, const int nPoints, const SE3 T_mc, const SE3 * T_fms, const SE3 * T_mfs, const int * sdfFrames, const Grid3D<float> * sdfs, const int * dependencies, const JointType * jointTypes, const float3 * jointAxes, const float huberDelta, const float * dtheta_dalpha, float * result) { extern __shared__ float s[]; int index = blockIdx.x*blockDim.x + threadIdx.x; const int tid = threadIdx.x + blockDim.x*threadIdx.y; if (index >= nPoints) { return; } const float4 xObs_m = T_mc*obsVertMap[pts[index].index]; // array declarations float * de_dtheta = &s[tid*(fullDims+redDims)]; float * J = &s[tid*(fullDims+redDims) + fullDims]; int obsFrame = sdfFrames[pts[index].dataAssociation]; const float4 xObs_f = T_fms[obsFrame]*xObs_m; // compute SDF gradient const int g = pts[index].dataAssociation; const Grid3D<float> & sdf = sdfs[g]; const float3 xObs_g = sdf.getGridCoords(make_float3(xObs_f)); const float3 sdfGrad_f = sdf.getGradientInterpolated(xObs_g); const float3 sdfGrad_m = make_float3(SE3Rotate(T_mfs[obsFrame],make_float4(sdfGrad_f.x,sdfGrad_f.y,sdfGrad_f.z,0.0))); getErrorJacobianOfModelPoint(de_dtheta,xObs_m,obsFrame,sdfGrad_m,fullDims,dependencies,jointTypes,jointAxes,T_fms,T_mfs); doPoseGradientReduction(J,de_dtheta,dtheta_dalpha,fullDims,redDims); const float residual = pts[index].error; float * JTr = result; float * JTJ = &result[redDims]; float * e = &result[redDims + JTJSize(redDims)]; switch(lossFunction) { case SquaredLoss: { computeSquaredLossResult(redDims,-residual,J,e,JTr,JTJ); } break; case HuberLoss: { if (fabs(pts[index].error) < huberDelta ) { computeSquaredLossResult(redDims,-residual,J,e,JTr,JTJ); } else { float v = huberDelta; if (pts[index].error < 0) { v = -v; } for (int i=0; i<redDims; i++) { if( J[i]==0.0f) continue; atomicAdd(&JTr[i],v*-J[i]); } atomicAdd(e,huberDelta * (fabs(pts[index].error) - 0.5*huberDelta)); } } break; } } __global__ void gpu_normEqnsObsToModParamMap(const int fullDims, const int redDims, const DataAssociatedPoint * pts, const float4 * obsVertMap, const int nPoints, const SE3 T_mc, const SE3 * T_fms, const SE3 * T_mfs, const int * sdfFrames, const Grid3D<float> * sdfs, const int * dependencies, const JointType * jointTypes, const float3 * jointAxes, const float huberDelta, const int * dMapping, float * result) { extern __shared__ float s[]; int index = blockIdx.x*blockDim.x + threadIdx.x; const int tid = threadIdx.x + blockDim.x*threadIdx.y; if (index >= nPoints) { return; } const float4 xObs_m = T_mc*obsVertMap[pts[index].index]; // array declarations float * de_dtheta = &s[tid*(fullDims+redDims)]; float * J = &s[tid*(fullDims+redDims) + fullDims]; int obsFrame = sdfFrames[pts[index].dataAssociation]; const float4 xObs_f = T_fms[obsFrame]*xObs_m; // compute SDF gradient const int g = pts[index].dataAssociation; const Grid3D<float> & sdf = sdfs[g]; const float3 xObs_g = sdf.getGridCoords(make_float3(xObs_f)); const float3 sdfGrad_f = sdf.getGradientInterpolated(xObs_g); const float3 sdfGrad_m = make_float3(SE3Rotate(T_mfs[obsFrame],make_float4(sdfGrad_f.x,sdfGrad_f.y,sdfGrad_f.z,0.0))); getErrorJacobianOfModelPoint(de_dtheta,xObs_m,obsFrame,sdfGrad_m,fullDims,dependencies,jointTypes,jointAxes,T_fms,T_mfs); doParamMapping(J,de_dtheta,dMapping,fullDims,redDims); const float residual = pts[index].error; float * JTr = result; float * JTJ = &result[redDims]; float * e = &result[redDims + JTJSize(redDims)]; switch(lossFunction) { case SquaredLoss: { computeSquaredLossResult(redDims,-residual,J,e,JTr,JTJ); } break; case HuberLoss: { if (fabs(pts[index].error) < huberDelta ) { computeSquaredLossResult(redDims,-residual,J,e,JTr,JTJ); } else { float v = huberDelta; if (pts[index].error < 0) { v = -v; } for (int i=0; i<redDims; i++) { if( J[i]==0.0f) continue; atomicAdd(&JTr[i],v*-J[i]); } atomicAdd(e,huberDelta * (fabs(pts[index].error) - 0.5*huberDelta)); } } break; } } void errorAndDataAssociation(const float4 * dObsVertMap, const float4 * dObsNormMap, const int width, const int height, const MirroredModel & model, const OptimizationOptions & opts, DataAssociatedPoint * dPts, int * dLastElement, int * hLastElement, int * debugDataAssociation, float * debugError, float4 * debugNorm) { dim3 block(16,8); dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); cudaMemset(dLastElement,0,sizeof(int)); if (debugDataAssociation == 0) { if (debugError == 0) { if (debugNorm == 0) { gpu_errorAndDataAssociationObsToMod<false,false,false><<<grid,block>>>(dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm); } else { gpu_errorAndDataAssociationObsToMod<false,false,true><<<grid,block>>>(dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm); } } else { if (debugNorm == 0) { gpu_errorAndDataAssociationObsToMod<false,true,false><<<grid,block>>>(dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm); } else { gpu_errorAndDataAssociationObsToMod<false,true,true><<<grid,block>>>(dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm); } } } else { if (debugError == 0) { if (debugNorm == 0) { gpu_errorAndDataAssociationObsToMod<true,false,false><<<grid,block>>>(dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm); } else { gpu_errorAndDataAssociationObsToMod<true,false,true><<<grid,block>>>(dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm); } } else { if (debugNorm == 0) { gpu_errorAndDataAssociationObsToMod<true,true,false><<<grid,block>>>(dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm); } else { gpu_errorAndDataAssociationObsToMod<true,true,true><<<grid,block>>>(dObsVertMap, dObsNormMap, width, height, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getNumSdfs(), opts.distThreshold[0], opts.normThreshold, opts.planeOffset[0], opts.planeNormal[0], dLastElement, dPts, debugDataAssociation, debugError, debugNorm); } } } cudaMemcpy(hLastElement,dLastElement,sizeof(int),cudaMemcpyDeviceToHost); } void errorAndDataAssociationMultiModel(const float4 * dObsVertMap, const float4 * dObsNormMap, const int width, const int height, const int nModels, const SE3 * T_mcs, const SE3 * const * T_fms, const int * const * sdfFrames, const Grid3D<float> * const * sdfs, const int * nSdfs, const float * distanceThresholds, const float * normalThresholds, const float * planeOffsets, const float3 * planeNormals, int * lastElements, DataAssociatedPoint * * pts, int * dDebugDataAssociation, float * dDebugError, float4 * dDebugNorm, cudaStream_t stream) { cudaMemset(lastElements,0,nModels*sizeof(int)); dim3 block; if (height == 1) { block.x = 128; block.y = block.z = 1; } else { block.x = 16; block.y = 8; block.z = 1; } dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); if (dDebugDataAssociation == 0) { if (dDebugError == 0) { if (dDebugNorm == 0) { gpu_errorAndDataAssociationObsToModMultiModel<false,false,false><<<grid,block,0,stream>>>(dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm); } else { gpu_errorAndDataAssociationObsToModMultiModel<false,false,true><<<grid,block,0,stream>>>(dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm); } } else { if (dDebugNorm == 0) { gpu_errorAndDataAssociationObsToModMultiModel<false,true,false><<<grid,block,0,stream>>>(dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm); } else { gpu_errorAndDataAssociationObsToModMultiModel<false,true,true><<<grid,block,0,stream>>>(dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm); } } } else { if (dDebugError == 0) { if (dDebugNorm == 0) { gpu_errorAndDataAssociationObsToModMultiModel<true,false,false><<<grid,block,0,stream>>>(dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm); } else { gpu_errorAndDataAssociationObsToModMultiModel<true,false,true><<<grid,block,0,stream>>>(dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm); } } else { if (dDebugNorm == 0) { gpu_errorAndDataAssociationObsToModMultiModel<true,true,false><<<grid,block,0,stream>>>(dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm); } else { gpu_errorAndDataAssociationObsToModMultiModel<true,true,true><<<grid,block,0,stream>>>(dObsVertMap, dObsNormMap, width, height, nModels, T_mcs, T_fms, sdfFrames, sdfs, nSdfs,distanceThresholds, normalThresholds, planeOffsets, planeNormals, lastElements, pts, dDebugDataAssociation, dDebugError, dDebugNorm); } } } } void normEqnsObsToMod(const int dims, const float4 * dObsVertMap, const int width, const int height, const MirroredModel & model, const OptimizationOptions & opts, DataAssociatedPoint * dPts, int nElements, float * dResult, float4 * debugJs) { std::cout << nElements << " points associated to model " << model.getModelID() << std::endl; dim3 block; if (height == 1) { block.x = 128; block.y = block.z = 1; } else { block.x = 16; block.y = 8; block.z = 1; } dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); // initilize system to zero cudaMemset(dResult,0,(dims + JTJSize(dims) + 1)*sizeof(float)); if (nElements > 10) { block = dim3(64,1,1); grid = dim3(ceil((double)nElements/64),1,1); { if (debugJs == 0) { gpu_normEqnsObsToMod<false><<<grid,block,64*dims*sizeof(float)>>>(dims, dPts, dObsVertMap, nElements, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), opts.huberDelta, dResult, debugJs); } else { gpu_normEqnsObsToMod<true><<<grid,block,64*dims*sizeof(float)>>>(dims, dPts, dObsVertMap, nElements, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), opts.huberDelta, dResult, debugJs); } #ifdef CUDA_ERR_CHECK cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpu_normEqnsObsToMod error: %s\n",cudaGetErrorString(err)); } #endif } } } void normEqnsObsToModReduced(const int dims, const int reductionDims, const float * d_dtheta_dalpha, const float4 * dObsVertMap, const int width, const int height, const MirroredModel & model, const OptimizationOptions & opts, DataAssociatedPoint * dPts, int nElements, float * dResult) { dim3 block; if (height == 1) { block.x = 128; block.y = block.z = 1; } else { block.x = 16; block.y = 8; block.z = 1; } dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); // initilize system to zero cudaMemset(dResult,0,(reductionDims + JTJSize(reductionDims) + 1)*sizeof(float)); if (nElements > 10) { block = dim3(64,1,1); grid = dim3(ceil((double)nElements/64),1,1); { gpu_normEqnsObsToModReduced<<<grid,block,64*(dims+reductionDims)*sizeof(float)>>>(dims, reductionDims, dPts, dObsVertMap, nElements, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), opts.huberDelta, d_dtheta_dalpha, dResult); #ifdef CUDA_ERR_CHECK cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpu_normEqnsObsToModReduced error: %s\n",cudaGetErrorString(err)); } #endif } } } void normEqnsObsToModParamMap(const int dims, const int reductionDims, const int * dMapping, const float4 * dObsVertMap, const int width, const int height, const MirroredModel & model, const OptimizationOptions & opts, DataAssociatedPoint * dPts, int nElements, float * dResult) { dim3 block; if (height == 1) { block.x = 128; block.y = block.z = 1; } else { block.x = 16; block.y = 8; block.z = 1; } dim3 grid( ceil( width / (float)block.x), ceil(height / (float)block.y )); // initilize system to zero cudaMemset(dResult,0,(reductionDims + JTJSize(reductionDims) + 1)*sizeof(float)); if (nElements > 10) { block = dim3(64,1,1); grid = dim3(ceil((double)nElements/64),1,1); { gpu_normEqnsObsToModParamMap<<<grid,block,64*(dims+reductionDims)*sizeof(float)>>>(dims, reductionDims, dPts, dObsVertMap, nElements, model.getTransformCameraToModel(), model.getDeviceTransformsModelToFrame(), model.getDeviceTransformsFrameToModel(), model.getDeviceSdfFrames(), model.getDeviceSdfs(), model.getDeviceDependencies(), model.getDeviceJointTypes(), model.getDeviceJointAxes(), opts.huberDelta, dMapping, dResult); #ifdef CUDA_ERR_CHECK cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("gpu_normEqnsObsToModReduced error: %s\n",cudaGetErrorString(err)); } #endif } } } }
the_stack
#include "utils.h" #include "kernel_prng.h" #include "kernel_metropolis.h" #include "kernel_reduction.h" int main(int argc, char **argv){ int L = 32; int R = 1; int atrials = 1; int ains = 1; int apts = 1; int ams = 1; uint64_t seed = 2; float TR = 0.1f; float dT = 0.1f; float h = 0.1f; for (int i=0; i<argc; i++) { /* lattice size and number of replicas */ if(strcmp(argv[i],"-l") == 0){ L = atoi(argv[i+1]); if ( (L % 32) != 0 ) { fprintf(stderr, "lattice dimensional size must be multiples of 32"); exit(1); } R = atoi(argv[i+2]); } /* get TR and dT */ else if(strcmp(argv[i],"-t") == 0){ TR = atof(argv[i+1]); dT = atof(argv[i+2]); } /* the magnetic field constant */ else if(strcmp(argv[i],"-h") == 0){ h = atof(argv[i+1]); } /* adaptative dt parameters (number of trials, insertions, tempering, simulation */ else if(strcmp(argv[i], "-a") == 0){ atrials = atoi(argv[i+1]); ains = atoi(argv[i+2]); apts = atoi(argv[i+3]); ams = atoi(argv[i+4]); } /* seed for random number generation */ else if(strcmp(argv[i],"-z") == 0){ seed = atol(argv[i+1]); } } /* total number of spins per replica */ int N = (L)*(L)*(L); /* compute Ra to be the final size Ra = R + TL */ int Ra = R + (atrials * ains); /* active replicas per gpu */ int ar = R; /* replica pool per gpu */ int rpool = Ra; /* parameter seed */ uint64_t hpcgs, hpcgi; gpu_pcg32_srandom_r(&hpcgs, &hpcgi, seed, 1); seed = gpu_pcg32_random_r(&hpcgs, &hpcgi); /* build the space of computation for the lattices */ dim3 mcblock (BX, BY / 2, BZ); dim3 mcgrid ((L + BX - 1) / BX, (L + BY - 1) / (2 * BY), (L + BZ - 1) / BZ); dim3 lblock (BLOCKSIZE1D, 1, 1); dim3 lgrid ((N + BLOCKSIZE1D - 1) / BLOCKSIZE1D, 1, 1); /* build the space of computation for random numbers and lattice simulation */ dim3 prng_block (BLOCKSIZE1D, 1, 1); dim3 prng_grid (((N / 4) + BLOCKSIZE1D - 1) / BLOCKSIZE1D, 1, 1); /* T is a sorted temp array */ float* T = (float*)malloc(sizeof(float) * Ra); #ifdef DEBUG // memory for H array int* hH = (int*)malloc(sizeof(int) * N); int* hr = (int*)malloc(sizeof(int) * N); uint64_t* pcga = (uint64_t*)malloc(sizeof(uint64_t) * N/4); uint64_t* pcgb = (uint64_t*)malloc(sizeof(uint64_t) * N/4); #endif /* allocate the replica pool */ int** mdlat = (int**) malloc(sizeof(int *) * rpool); /* per temperature counter array */ float* aex = (float*) malloc(sizeof(float) * rpool); /* per temperature counter array */ float* aavex = (float*)malloc(sizeof(float) * rpool); /* exchange energies */ float* aexE = (float*)malloc(sizeof(float) * rpool); /* PRNG states volume, one state per thread */ uint64_t** apcga = (uint64_t**)malloc(sizeof(uint64_t*) * rpool); uint64_t** apcgb = (uint64_t**)malloc(sizeof(uint64_t*) * rpool); /* fragmented indices for replicas temperature sorted */ findex_t* arts = (findex_t*)malloc(sizeof(findex_t) * rpool); /* fragmented indices for temperatures replica sorted */ findex_t* atrs = (findex_t*)malloc(sizeof(findex_t) * rpool); /* fragmented temperatures sorted */ float* aT = (float*)malloc(sizeof(float) * rpool); /* malloc device magnetic field */ int* dH; cudaMalloc((void**)&dH, sizeof(int)*N); /* malloc device energy reductions */ float* dE; cudaMalloc((void**)&dE, sizeof(float)*rpool); /* malloc the data for 'r' replicas on each GPU */ for (int k = 0; k < rpool; ++k) { cudaMalloc(&mdlat[k], sizeof(int) * N); cudaMalloc(&apcga[k], (N/4) * sizeof(uint64_t)); cudaMalloc(&apcgb[k], (N/4) * sizeof(uint64_t)); // offset and sequence approach kernel_gpupcg_setup<<<prng_grid, prng_block>>>(apcga[k], apcgb[k], N/4, seed + N/4 * k, k); #ifdef DEBUG printf("tid=%i N=%i N/4 = %i R = %i seed = %lu k = %d \n", 0, N, N/4, R, seed + (N/4 * k), k); #endif } /* host memory setup for each replica */ for(int i = 0; i < R; i++){ /* array of temperatures increasing order */ T[i] = TR - (R-1 - i)*dT; } int count = 0; for(int j = 0; j < ar; ++j){ arts[j] = atrs[j] = (findex_t){0, j}; aT[j] = TR - (float)(R - 1 - count) * dT; aex[j] = 0; ++count; } /* print parameters */ printf("\tparameters:{\n"); printf("\t\tL: %i\n", L); printf("\t\tvolume: %i\n", N); printf("\t\t[TR,dT]: [%f, %f]\n", TR, dT); printf("\t\t[atrials, ains, apts, ams]: [%i, %i, %i, %i]\n", atrials, ains, apts, ams); printf("\t\tmag_field h: %f\n", h); printf("\t\treplicas: %i\n", R); printf("\t\tseed: %lu\n", seed); /* find good temperature distribution */ FILE *fw = fopen("trials.dat", "w"); fprintf(fw, "trial av min max\n"); #ifdef DEBUG /* print the beginning temp */ printarrayfrag(aT, ar, "Initial temp set:\naT"); printf("\n\n"); #endif double start = rtclock(); /* each adaptation iteration improves the temperature distribution */ for (int trial = 0; trial < atrials; ++trial) { /* progress printing */ printf("[trial %i of %i]\n", trial+1, atrials); fflush(stdout); /* distribution for H */ kernel_reset_random_gpupcg<<<lgrid, lblock>>>(dH, N, apcga[0], apcgb[0]); #ifdef DEBUG cudaMemcpy(hH, dH, N*sizeof(int), cudaMemcpyDeviceToHost); for (int n = 0; n < N; n++) printf("dH %d %d\n", n, hH[n]); #endif /* reset ex counters */ reset_array(aex, rpool, 0.0f); /* reset average ex counters */ reset_array(aavex, rpool, 0.0f); /* reset gpu data with a new seed from the sequential PRNG */ seed = gpu_pcg32_random_r(&hpcgs, &hpcgi); #ifdef DEBUG printf("new seed [%lu]\n", seed); #endif for (int k = 0; k < ar; ++k) { kernel_reset<int><<< lgrid, lblock >>>(mdlat[k], N, 1); cudaCheckErrors("kernel: reset spins up"); kernel_gpupcg_setup<<<prng_grid, prng_block>>>(apcga[k], apcgb[k], N/4, seed + (uint64_t)(N/4 * k), k); cudaCheckErrors("kernel: prng reset"); #ifdef DEBUG cudaMemcpy(pcga, apcga[k], sizeof(uint64_t)*N/4, cudaMemcpyDeviceToHost); for (int i = 0; i < N/4; i++) printf("pcga: %d %d %lu\n", k, i, pcga[i]); cudaMemcpy(pcgb, apcgb[k], sizeof(uint64_t)*N/4, cudaMemcpyDeviceToHost); for (int i = 0; i < N/4; i++) printf("pcgb: %d %d %lu\n", k, i, pcgb[i]); #endif } /* parallel tempering */ for(int p = 0; p < apts; ++p) { /* metropolis simulation */ for(int i = 0; i < ams; ++i) { for(int k = 0; k < ar; ++k) { kernel_metropolis<<< mcgrid, mcblock >>>(N, L, mdlat[k], dH, h, -2.0f/aT[atrs[k].i], apcga[k], apcgb[k], 0); #ifdef DEBUG cudaMemcpy(pcga, apcga[k], sizeof(uint64_t)*N/4, cudaMemcpyDeviceToHost); cudaMemcpy(pcgb, apcgb[k], sizeof(uint64_t)*N/4, cudaMemcpyDeviceToHost); cudaMemcpy(hr, mdlat[k], N*sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < N/4; i++) printf("black pcga & pcgb: %d %d %lu %lu\n", k, i, pcga[i], pcgb[i]); for (int i = 0; i < N; i++) printf("black replica: %d %d %d\n", k, i, hr[i]); #endif } cudaDeviceSynchronize(); cudaCheckErrors("mcmc: kernel metropolis white launch"); for(int k = 0; k < ar; ++k) { kernel_metropolis<<< mcgrid, mcblock >>>(N, L, mdlat[k], dH, h, -2.0f/aT[atrs[k].i], apcga[k], apcgb[k], 1); #ifdef DEBUG cudaMemcpy(pcga, apcga[k], sizeof(uint64_t)*N/4, cudaMemcpyDeviceToHost); cudaMemcpy(pcgb, apcgb[k], sizeof(uint64_t)*N/4, cudaMemcpyDeviceToHost); cudaMemcpy(hr, mdlat[k], N*sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < N/4; i++) printf("white pcga & pcgb: %d %d %lu %lu\n", k, i, pcga[i], pcgb[i]); for (int i = 0; i < N; i++) printf("white replica: %d %d %d\n", k, i, hr[i]); #endif } cudaDeviceSynchronize(); cudaCheckErrors("mcmc: kernel metropolis black launch"); } #ifdef DEBUG for(int k = 0; k < ar; ++k) { } #endif /* compute energies for exchange */ // adapt_ptenergies(s, tid); kernel_reset<float><<< (ar + BLOCKSIZE1D - 1)/BLOCKSIZE1D, BLOCKSIZE1D >>> (dE, ar, 0.0f); cudaDeviceSynchronize(); /* compute one energy reduction for each replica */ dim3 block(BX, BY, BZ); dim3 grid((L + BX - 1)/BX, (L + BY - 1)/BY, (L + BZ - 1)/BZ); for(int k = 0; k < ar; ++k){ /* launch reduction kernel for k-th replica */ kernel_redenergy<float><<<grid, block>>>(mdlat[k], L, dE + k, dH, h); cudaDeviceSynchronize(); cudaCheckErrors("kernel_redenergy"); } cudaMemcpy(aexE, dE, ar*sizeof(float), cudaMemcpyDeviceToHost); /* exchange phase */ double delta = 0.0; findex_t fnow, fleft; fnow.f = 0; // the f field is always 0 for a single GPU fnow.i = ar-1; /* traverse in reverse temperature order */ for (int k = R-1; k > 0; --k) { if((k % 2) == (p % 2)){ fgoleft(&fnow, ar); continue; } fleft = fgetleft(fnow, ar); delta = (1.0f/aT[fnow.i] - 1.0f/aT[fleft.i]) * (aexE[arts[fleft.i].i] - aexE[arts[fnow.i].i]); double randme = gpu_rand01(&hpcgs, &hpcgi); #ifdef DEBUG printf("delta=%f exp(-delta) = %f rand = %f\n", delta, exp(-delta), randme); #endif if( delta < 0.0 || randme < exp(-delta) ){ //adapt_swap(s, fnow, fleft); findex_t t1 = arts[fnow.i]; findex_t t2 = arts[fleft.i]; findex_t taux = atrs[t1.i]; findex_t raux = arts[fnow.i]; /* swap rts */ arts[fnow.i] = arts[fleft.i]; arts[fleft.i] = raux; /* swap trs */ atrs[t1.i] = atrs[t2.i]; atrs[t2.i] = taux; /* this array is temp sorted */ aex[fnow.i] += 1.0f; } fgoleft(&fnow, ar); } printf("\rpt........%i%%", 100 * (p + 1)/apts); fflush(stdout); } double avex = 0; for(int k = 1; k < ar; ++k){ avex += aavex[k] = 2.0 * aex[k] / (double)apts; } avex /= (double)(R-1); double minex = 1; for(int k = 1; k < ar; ++k){ if (aavex[k] < minex) minex = aavex[k]; } double maxex = 0; for(int k = 1; k < ar; ++k){ if (aavex[k] > maxex) maxex = aavex[k]; } fprintf(fw, "%d %f %f %f\n", trial, avex, minex, maxex); fflush(fw); printf(" [<avg> = %.3f <min> = %.3f <max> = %.3f]\n\n", avex, minex, maxex); printarrayfrag(aex, ar, "aex"); printarrayfrag(aavex, ar, "aavex"); printindexarrayfrag(aexE, arts, ar, "aexE"); // update aT, R, ar after insertion insert_temps(aavex, aT, &R, &ar, ains); // update aT rebuild_temps(aT, R, ar); // update arts and atrs rebuild_indices(arts, atrs, ar); } // atrials double end = rtclock(); printf("Total trial time %.2f secs\n", end-start); fclose(fw); for(int i = 0; i < rpool; ++i) { cudaFree(mdlat[i]); cudaFree(apcga[i]); cudaFree(apcgb[i]); } cudaFree(dH); cudaFree(dE); free(T); #ifdef DEBUG free(hH); free(hr); free(pcga); free(pcgb); #endif free(aex); free(aavex); free(aexE); free(mdlat); free(apcga); free(apcgb); free(arts); free(atrs); free(aT); return 0; }
the_stack
#pragma once #include <gunrock/util/basic_utils.h> #include <gunrock/util/error_utils.cuh> #include <gunrock/util/multithread_utils.cuh> #include <gunrock/util/multithreading.cuh> #include <vector> namespace gunrock { namespace app { /** * @brief Base partitioner structure. * * @tparam _VertexId * @tparam _SizeT * @tparam _Value * @tparam ENABLE_BACKWARD * @tparam KEEP_ORDER * @tparam KEEP_NODE_NUM * */ template <typename _VertexId, typename _SizeT, typename _Value> // bool ENABLE_BACKWARD = false, // bool KEEP_ORDER = false, // bool KEEP_NODE_NUM = false> struct PartitionerBase { typedef _VertexId VertexId; typedef _SizeT SizeT; typedef _Value Value; typedef Csr<VertexId, SizeT, Value> GraphT; // Members public: // Number of GPUs to be partitioned int num_gpus; int Status; float factor; int seed; // Original graph const GraphT* graph; // Partitioned graphs GraphT* sub_graphs; int** partition_tables; VertexId** convertion_tables; VertexId** original_vertexes; int** backward_partitions; VertexId** backward_convertions; SizeT** backward_offsets; SizeT** in_counter; SizeT** out_offsets; SizeT** out_counter; bool enable_backward; bool keep_order; bool keep_node_num; // Methods /* * @brief ThreadSlice data structure. * * @tparam VertexId * @tparam SizeT * @tparam Value */ template <typename VertexId, typename SizeT, typename Value> struct ThreadSlice { public: const GraphT* graph; GraphT* sub_graph; GraphT* sub_graphs; int thread_num, num_gpus; util::cpu_mt::CPUBarrier* cpu_barrier; CUTThread thread_Id; int* partition_table0; int** partition_table1; int** partition_tables; VertexId* convertion_table0; VertexId** convertion_table1; VertexId** convertion_tables; int** backward_partitions; VertexId** backward_convertions; SizeT** backward_offsets; VertexId** original_vertexes; SizeT** in_counter; SizeT** out_offsets; SizeT** out_counter; bool enable_backward; bool keep_order; bool keep_node_num; }; /** * @brief PartitionerBase default constructor. */ PartitionerBase(bool _enable_backward = false, bool _keep_order = false, bool _keep_node_num = false) : enable_backward(_enable_backward), keep_order(_keep_order), keep_node_num(_keep_node_num), Status(0), num_gpus(0), graph(NULL), sub_graphs(NULL), partition_tables(NULL), convertion_tables(NULL), original_vertexes(NULL), in_counter(NULL), out_offsets(NULL), out_counter(NULL), backward_partitions(NULL), backward_convertions(NULL), backward_offsets(NULL) {} /* * @brief PartitionerBase default destructor. */ virtual ~PartitionerBase() { Release(); } /* * @brief Initialization function. * * @param[in] graph * @param[in] num_gpus */ cudaError_t Init(const GraphT& graph, int num_gpus) { cudaError_t retval = cudaSuccess; this->num_gpus = num_gpus; this->graph = &graph; Release(); sub_graphs = new GraphT[num_gpus]; partition_tables = new int*[num_gpus + 1]; convertion_tables = new VertexId*[num_gpus + 1]; original_vertexes = new VertexId*[num_gpus]; in_counter = new SizeT*[num_gpus]; out_offsets = new SizeT*[num_gpus]; out_counter = new SizeT*[num_gpus]; if (enable_backward) { backward_partitions = new int*[num_gpus]; backward_convertions = new VertexId*[num_gpus]; backward_offsets = new SizeT*[num_gpus]; } for (int i = 0; i < num_gpus + 1; i++) { partition_tables[i] = NULL; convertion_tables[i] = NULL; if (i != num_gpus) { original_vertexes[i] = NULL; if (enable_backward) { backward_partitions[i] = NULL; backward_convertions[i] = NULL; backward_offsets[i] = NULL; } } } partition_tables[0] = (int*)malloc(sizeof(int) * graph.nodes); convertion_tables[0] = (VertexId*)malloc(sizeof(VertexId) * graph.nodes); memset(partition_tables[0], 0, sizeof(int) * graph.nodes); memset(convertion_tables[0], 0, sizeof(VertexId) * graph.nodes); for (int i = 0; i < num_gpus; i++) { in_counter[i] = new SizeT[num_gpus + 1]; out_offsets[i] = new SizeT[num_gpus + 1]; out_counter[i] = new SizeT[num_gpus + 1]; memset(in_counter[i], 0, sizeof(SizeT) * (num_gpus + 1)); memset(out_offsets[i], 0, sizeof(SizeT) * (num_gpus + 1)); memset(out_counter[i], 0, sizeof(SizeT) * (num_gpus + 1)); } Status = 1; return retval; } /* * @breif Release function. */ cudaError_t Release() { cudaError_t retval = cudaSuccess; if (Status == 0) return retval; for (int i = 0; i < num_gpus + 1; i++) { free(convertion_tables[i]); convertion_tables[i] = NULL; free(partition_tables[i]); partition_tables[i] = NULL; if (i == num_gpus) continue; free(original_vertexes[i]); original_vertexes[i] = NULL; delete[] in_counter[i]; in_counter[i] = NULL; delete[] out_offsets[i]; out_offsets[i] = NULL; delete[] out_counter[i]; out_counter[i] = NULL; if (enable_backward) { free(backward_partitions[i]); backward_partitions[i] = NULL; free(backward_convertions[i]); backward_convertions[i] = NULL; free(backward_offsets[i]); backward_offsets[i] = NULL; } } delete[] convertion_tables; convertion_tables = NULL; delete[] partition_tables; partition_tables = NULL; delete[] original_vertexes; original_vertexes = NULL; if (num_gpus > 1) delete[] sub_graphs; sub_graphs = NULL; delete[] in_counter; in_counter = NULL; delete[] out_offsets; out_offsets = NULL; delete[] out_counter; out_counter = NULL; if (enable_backward) { delete[] backward_convertions; backward_convertions = NULL; delete[] backward_partitions; backward_partitions = NULL; delete[] backward_offsets; backward_offsets = NULL; } Status = 0; return retval; } /** * @brief MakeSubGraph_Thread function. * * @param[in] thread_data_ * * \return CUT_THREADPROC */ static CUT_THREADPROC MakeSubGraph_Thread(void* thread_data_) { ThreadSlice<VertexId, SizeT, Value>* thread_data = (ThreadSlice<VertexId, SizeT, Value>*)thread_data_; const GraphT* graph = thread_data->graph; GraphT* sub_graph = thread_data->sub_graph; GraphT* sub_graphs = thread_data->sub_graphs; int gpu = thread_data->thread_num; util::cpu_mt::CPUBarrier* cpu_barrier = thread_data->cpu_barrier; int num_gpus = thread_data->num_gpus; int* partition_table0 = thread_data->partition_table0; VertexId* convertion_table0 = thread_data->convertion_table0; int** partition_table1 = thread_data->partition_table1; VertexId** convertion_tables = thread_data->convertion_tables; int** partition_tables = thread_data->partition_tables; VertexId** convertion_table1 = thread_data->convertion_table1; VertexId** original_vertexes = thread_data->original_vertexes; int** backward_partitions = thread_data->backward_partitions; VertexId** backward_convertions = thread_data->backward_convertions; SizeT** backward_offsets = thread_data->backward_offsets; SizeT** out_offsets = thread_data->out_offsets; SizeT* in_counter = thread_data->in_counter[gpu]; SizeT* out_counter = thread_data->out_counter[gpu]; bool enable_backward = thread_data->enable_backward; bool keep_node_num = thread_data->keep_node_num; // bool keep_order = thread_data->keep_order; SizeT num_nodes = 0, node_counter; SizeT num_edges = 0, edge_counter; VertexId* marker = new VertexId[graph->nodes]; VertexId* tconvertion_table = new VertexId[graph->nodes]; SizeT in_counter_ = 0; memset(marker, 0, sizeof(int) * graph->nodes); memset(out_counter, 0, sizeof(SizeT) * (num_gpus + 1)); for (SizeT node = 0; node < graph->nodes; node++) if (partition_table0[node] == gpu) { convertion_table0[node] = keep_node_num ? node : out_counter[gpu]; tconvertion_table[node] = keep_node_num ? node : out_counter[gpu]; marker[node] = 1; for (SizeT edge = graph->row_offsets[node]; edge < graph->row_offsets[node + 1]; edge++) { SizeT neighbour = graph->column_indices[edge]; int peer = partition_table0[neighbour]; if ((peer != gpu) && (marker[neighbour] == 0)) { tconvertion_table[neighbour] = keep_node_num ? neighbour : out_counter[peer]; out_counter[peer]++; marker[neighbour] = 1; num_nodes++; } } out_counter[gpu]++; num_nodes++; num_edges += graph->row_offsets[node + 1] - graph->row_offsets[node]; } delete[] marker; marker = NULL; out_offsets[gpu][0] = 0; node_counter = out_counter[gpu]; for (int peer = 0; peer < num_gpus; peer++) { if (peer == gpu) continue; int peer_ = peer < gpu ? peer + 1 : peer; out_offsets[gpu][peer_] = node_counter; node_counter += out_counter[peer]; } out_offsets[gpu][num_gpus] = node_counter; // util::cpu_mt::PrintCPUArray<SizeT, // SizeT>("out_offsets",out_offsets[gpu],num_gpus+1,gpu); util::cpu_mt::IncrementnWaitBarrier(cpu_barrier, gpu); node_counter = 0; for (int peer = 0; peer < num_gpus; peer++) { if (peer == gpu) continue; int peer_ = peer < gpu ? peer + 1 : peer; int gpu_ = gpu < peer ? gpu + 1 : gpu; in_counter[peer_] = out_offsets[peer][gpu_ + 1] - out_offsets[peer][gpu_]; node_counter += in_counter[peer_]; } in_counter[num_gpus] = node_counter; if (keep_node_num) num_nodes = graph->nodes; if (graph->node_values == NULL && graph->edge_values == NULL) sub_graph->template FromScratch<false, false>(num_nodes, num_edges); else if (graph->node_values != NULL && graph->edge_values == NULL) sub_graph->template FromScratch<false, true>(num_nodes, num_edges); else if (graph->node_values == NULL && graph->edge_values != NULL) sub_graph->template FromScratch<true, false>(num_nodes, num_edges); else sub_graph->template FromScratch<true, true>(num_nodes, num_edges); if (convertion_table1[0] != NULL) free(convertion_table1[0]); if (partition_table1[0] != NULL) free(partition_table1[0]); if (original_vertexes[0] != NULL) free(original_vertexes[0]); convertion_table1[0] = (VertexId*)malloc(sizeof(VertexId) * num_nodes); partition_table1[0] = (int*)malloc(sizeof(int) * num_nodes); original_vertexes[0] = (VertexId*)malloc(sizeof(VertexId) * num_nodes); if (enable_backward) { if (backward_partitions[gpu] != NULL) free(backward_partitions[gpu]); if (backward_convertions[gpu] != NULL) free(backward_convertions[gpu]); if (backward_offsets[gpu] != NULL) free(backward_offsets[gpu]); backward_offsets[gpu] = (SizeT*)malloc(sizeof(SizeT) * (num_nodes + 1)); backward_convertions[gpu] = (VertexId*)malloc(sizeof(VertexId) * in_counter[num_gpus]); backward_partitions[gpu] = (int*)malloc(sizeof(int) * in_counter[num_gpus]); if (keep_node_num) { marker = new VertexId[num_gpus * graph->nodes]; memset(marker, 0, sizeof(VertexId) * num_gpus * graph->nodes); } else { marker = new VertexId[num_gpus * out_counter[gpu]]; memset(marker, 0, sizeof(VertexId) * num_gpus * out_counter[gpu]); } for (SizeT neighbour = 0; neighbour < graph->nodes; neighbour++) if (partition_table0[neighbour] != gpu) { for (SizeT edge = graph->row_offsets[neighbour]; edge < graph->row_offsets[neighbour + 1]; edge++) { VertexId node = graph->column_indices[edge]; if (partition_table0[node] != gpu) continue; marker[convertion_table0[node] * num_gpus + partition_table0[neighbour]] = 1 + neighbour; } } } edge_counter = 0; for (SizeT node = 0; node < graph->nodes; node++) if (partition_table0[node] == gpu) { VertexId node_ = tconvertion_table[node]; sub_graph->row_offsets[node_] = edge_counter; if (graph->node_values != NULL) sub_graph->node_values[node_] = graph->node_values[node]; partition_table1[0][node_] = 0; convertion_table1[0][node_] = node_; original_vertexes[0][node_] = node; for (SizeT edge = graph->row_offsets[node]; edge < graph->row_offsets[node + 1]; edge++) { SizeT neighbour = graph->column_indices[edge]; int peer = partition_table0[neighbour]; int peer_ = peer < gpu ? peer + 1 : peer; if (peer == gpu) peer_ = 0; VertexId neighbour_ = keep_node_num ? neighbour : tconvertion_table[neighbour] + out_offsets[gpu][peer_]; sub_graph->column_indices[edge_counter] = neighbour_; if (graph->edge_values != NULL) sub_graph->edge_values[edge_counter] = graph->edge_values[edge]; if (peer != gpu && !keep_node_num) { sub_graph->row_offsets[neighbour_] = num_edges; partition_table1[0][neighbour_] = peer_; convertion_table1[0][neighbour_] = convertion_table0[neighbour]; original_vertexes[0][neighbour_] = neighbour; } edge_counter++; } } else if (keep_node_num) { sub_graph->row_offsets[node] = edge_counter; partition_table1[0][node] = partition_table0[node] < gpu ? partition_table0[node] + 1 : partition_table0[node]; convertion_table1[0][node] = convertion_table0[node]; original_vertexes[0][node] = node; } sub_graph->row_offsets[num_nodes] = num_edges; if (enable_backward) { in_counter_ = 0; util::cpu_mt::IncrementnWaitBarrier(cpu_barrier, gpu); if (!keep_node_num) { for (VertexId node_ = 0; node_ < num_nodes; node_++) { backward_offsets[gpu][node_] = in_counter_; if (partition_table1[0][node_] != 0) { continue; } for (int peer = 0; peer < num_gpus; peer++) { if (marker[node_ * num_gpus + peer] == 0) continue; int peer_ = peer < gpu ? peer + 1 : peer; int gpu_ = gpu < peer ? gpu + 1 : gpu; VertexId neighbour = marker[node_ * num_gpus + peer] - 1; VertexId neighbour_ = convertion_table0[neighbour]; for (SizeT edge = sub_graphs[peer].row_offsets[neighbour_]; edge < sub_graphs[peer].row_offsets[neighbour_ + 1]; edge++) { VertexId _node = sub_graphs[peer].column_indices[edge]; if (convertion_tables[peer + 1][_node] == node_ && partition_tables[peer + 1][_node] == gpu_) { backward_convertions[gpu][in_counter_] = _node; break; } } backward_partitions[gpu][in_counter_] = peer_; in_counter_++; } } backward_offsets[gpu][num_nodes] = in_counter_; } else { delete[] backward_partitions[gpu]; backward_partitions[gpu] = new int[num_nodes * (num_gpus - 1)]; delete[] backward_convertions[gpu]; backward_convertions[gpu] = new VertexId[num_nodes * (num_gpus - 1)]; for (VertexId node = 0; node < num_nodes; node++) { backward_offsets[gpu][node] = node * (num_gpus - 1); for (int peer = 1; peer < num_gpus; peer++) { backward_convertions[gpu][node * (num_gpus - 1) + peer - 1] = node; backward_partitions[gpu][node * (num_gpus - 1) + peer - 1] = peer; } } backward_offsets[gpu][num_nodes] = num_nodes * (num_gpus - 1); } delete[] marker; marker = NULL; } out_counter[num_gpus] = 0; in_counter[num_gpus] = 0; for (int peer = 0; peer < num_gpus; peer++) { int peer_ = peer < gpu ? peer + 1 : peer; int gpu_ = peer < gpu ? gpu : gpu + 1; if (peer == gpu) { peer_ = 0; gpu_ = 0; } out_counter[peer_] = out_offsets[gpu][peer_ + 1] - out_offsets[gpu][peer_]; out_counter[num_gpus] += out_counter[peer_]; in_counter[peer_] = out_offsets[peer][gpu_ + 1] - out_offsets[peer][gpu_]; in_counter[num_gpus] += in_counter[peer_]; } // util::cpu_mt::PrintCPUArray<SizeT, // SizeT>("out_counter",out_counter,num_gpus+1,gpu); // util::cpu_mt::PrintCPUArray<SizeT, SizeT>("in_counter ", // in_counter,num_gpus+1,gpu); delete[] tconvertion_table; tconvertion_table = NULL; CUT_THREADEND; } /** * @brief Make subgraph function. * * \return cudaError_t object indicates the success of all CUDA calls. */ cudaError_t MakeSubGraph() { cudaError_t retval = cudaSuccess; ThreadSlice<VertexId, SizeT, Value>* thread_data = new ThreadSlice<VertexId, SizeT, Value>[num_gpus]; CUTThread* thread_Ids = new CUTThread[num_gpus]; util::cpu_mt::CPUBarrier cpu_barrier; cpu_barrier = util::cpu_mt::CreateBarrier(num_gpus); for (int gpu = 0; gpu < num_gpus; gpu++) { thread_data[gpu].graph = graph; thread_data[gpu].sub_graph = &(sub_graphs[gpu]); thread_data[gpu].sub_graphs = sub_graphs; thread_data[gpu].thread_num = gpu; thread_data[gpu].cpu_barrier = &cpu_barrier; thread_data[gpu].num_gpus = num_gpus; thread_data[gpu].partition_table0 = partition_tables[0]; thread_data[gpu].convertion_table0 = convertion_tables[0]; thread_data[gpu].partition_tables = partition_tables; thread_data[gpu].partition_table1 = &(partition_tables[gpu + 1]); thread_data[gpu].convertion_table1 = &(convertion_tables[gpu + 1]); thread_data[gpu].original_vertexes = &(original_vertexes[gpu]); thread_data[gpu].convertion_tables = convertion_tables; thread_data[gpu].enable_backward = enable_backward; thread_data[gpu].keep_node_num = keep_node_num; thread_data[gpu].keep_order = keep_order; if (enable_backward) { thread_data[gpu].backward_partitions = backward_partitions; thread_data[gpu].backward_convertions = backward_convertions; thread_data[gpu].backward_offsets = backward_offsets; } thread_data[gpu].in_counter = in_counter; thread_data[gpu].out_offsets = out_offsets; thread_data[gpu].out_counter = out_counter; thread_data[gpu].thread_Id = cutStartThread((CUT_THREADROUTINE) & (MakeSubGraph_Thread), (void*)(&(thread_data[gpu]))); thread_Ids[gpu] = thread_data[gpu].thread_Id; } cutWaitForThreads(thread_Ids, num_gpus); util::cpu_mt::DestoryBarrier(&cpu_barrier); delete[] thread_Ids; thread_Ids = NULL; delete[] thread_data; thread_data = NULL; Status = 2; return retval; } /** * @brief Partition function. * * @param[in] sub_graphs * @param[in] partition_tables * @param[in] convertion_tables * @param[in] original_vertexes * @param[in] out_offsets * @param[in] cross_counter * @param[in] factor * @param[in] seed * * \return cudaError_t object indicates the success of all CUDA calls. */ cudaError_t Partition(GraphT*& sub_graphs, int**& partition_tables, VertexId**& convertion_tables, VertexId**& original_vertexes, SizeT**& out_offsets, SizeT**& cross_counter, float factor = -1, int seed = -1) { SizeT** backward_offsets = NULL; int** backward_partitions = NULL; VertexId** backward_convertions = NULL; return Partition(sub_graphs, partition_tables, convertion_tables, original_vertexes, in_counter, out_offsets, out_counter, backward_offsets, backward_partitions, backward_convertions, factor, seed); } /** * @brief Partition function. * * @param[in] sub_graphs * @param[in] partition_tables * @param[in] convertion_tables * @param[in] original_vertexes * @param[in] in_counter * @param[in] out_offsets * @param[in] out_counter * @param[in] backward_offsets * @param[in] backward_partitions * @param[in] backward_convertions * @param[in] factor * @param[in] seed * * \return cudaError_t object indicates the success of all CUDA calls. */ virtual cudaError_t Partition(GraphT*& sub_graphs, int**& partition_tables, VertexId**& convertion_tables, VertexId**& original_vertexes, SizeT**& in_counter, SizeT**& out_offsets, SizeT**& out_counter, SizeT**& backward_offsets, int**& backward_partitions, VertexId**& backward_convertions, float factor = -1, int seed = -1) { return util::GRError("PartitionBase::Partition is undefined", __FILE__, __LINE__); } }; } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#include "Matrix.h" #include <iostream> #include <fstream> using namespace std; #include "ErrorCode.h" // Host 静态方法:newMatrix(创建矩阵) __host__ int MatrixBasicOp::newMatrix(Matrix **outmat) { MatrixCuda *resmatCud; // 对应于返回的 outmat 的 MatrixCuda 型数据。 // 检查装载输出矩阵的指针是否为 NULL。 if (outmat == NULL) return NULL_POINTER; // 申请矩阵元数据的空间。 resmatCud = new MatrixCuda; // 初始化矩阵上的数据为空矩阵。 resmatCud->matMeta.width = 0; resmatCud->matMeta.height = 0; resmatCud->matMeta.roiX1 = 0; resmatCud->matMeta.roiY1 = 0; resmatCud->matMeta.roiX2 = 0; resmatCud->matMeta.roiY2 = 0; resmatCud->matMeta.matData = NULL; resmatCud->deviceId = -1; resmatCud->pitchWords = 0; // 将 Matrix 赋值给输出参数。 *outmat = &(resmatCud->matMeta); // 处理完毕,返回。 return NO_ERROR; } // Host 静态方法:deleteMatrix(销毁矩阵) __host__ int MatrixBasicOp::deleteMatrix(Matrix *inmat) { // 检查矩阵的指针是否为 NULL。 if (inmat == NULL) return NULL_POINTER; // 根据输入参数的 Matrix 型指针,得到对应的 MatrixCuda 型数据。 MatrixCuda *inmatCud = MATRIX_CUDA(inmat); // 检查矩阵所在的地址空间是否合法,如果矩阵所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (inmatCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 释放矩阵数据,即像素数据。 if (inmat->matData == NULL || inmat->width == 0 || inmat->height == 0 || inmatCud->pitchWords == 0) { // 如果输入矩阵是空的,则不进行矩阵数据释放操作(因为本来也没有数据可被 // 释放)。 // Do Nothing; } if (inmatCud->deviceId < 0) { // 对于数据存储于 Host 内存,直接利用 delete 关键字释放矩阵数据。 delete[] inmat->matData; } else if (inmatCud->deviceId == curdevid) { // 对于数据存储于当前 Device 内存中,则直接利用 cudaFree 接口释放该矩阵 // 数据。 cudaFree(inmat->matData); } else { // 对于数据存储于非当前 Device 内存中,则需要首先切换设备,将该设备作为 // 当前 Device,然后释放之,最后还需要将设备切换回来以保证后续处理的正 // 确性。 cudaSetDevice(inmatCud->deviceId); cudaFree(inmat->matData); cudaSetDevice(curdevid); } // 释放矩阵的元数据。 delete inmatCud; // 处理完毕,返回。 return NO_ERROR; } // Host 静态方法:makeAtCurrentDevice(在当前 Device 内存中构建数据) __host__ int MatrixBasicOp::makeAtCurrentDevice(Matrix *mat, size_t width, size_t height) { // 检查输入矩阵是否为 NULL if (mat == NULL) return NULL_POINTER; // 检查给定的矩阵的长宽是否合法 if (width < 1 || height < 1) return INVALID_DATA; // 检查矩阵是否为空矩阵 if (mat->matData != NULL) return UNMATCH_IMG; // 获取 mat 对应的 MatrixCuda 型数据。 MatrixCuda *matCud = MATRIX_CUDA(mat); // 在当前的 Device 上申请存储指定尺寸图片所需要的内存空间。 cudaError_t cuerrcode; float *newspace; size_t pitchbytes; cuerrcode = cudaMallocPitch((void **)(&newspace), &pitchbytes, width * sizeof (float), height); if (cuerrcode != cudaSuccess) return CUDA_ERROR; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 修改矩阵的元数据。其中 ROI 被设为整幅图片。 mat->width = width; mat->height = height; mat->roiX1 = 0; mat->roiY1 = 0; mat->roiX2 = width; mat->roiY2 = height; mat->matData = newspace; matCud->deviceId = curdevid; matCud->pitchWords = pitchbytes / sizeof (float); // 这里我们坚信由 cudaMallocPitch 得到的 pitch 是可以被 sizeof (float) 整除 // 的。如果真的不能整除,上面的那行除法会带来错误(当然,这在实际 CUDA 中是 // 不会出现的)。 // 处理完毕,退出。 return NO_ERROR; } // Host 静态方法:makeAtHost(在 Host 内存中构建数据) __host__ int MatrixBasicOp::makeAtHost(Matrix *mat, size_t width, size_t height) { // 检查输入矩阵是否为 NULL if (mat == NULL) return NULL_POINTER; // 检查给定的矩阵的长宽是否合法 if (width < 1 || height < 1) return INVALID_DATA; // 检查矩阵是否为空矩阵 if (mat->matData != NULL) return UNMATCH_IMG; // 获取 mat 对应的 MatrixCuda 型数据。 MatrixCuda *matCud = MATRIX_CUDA(mat); // 为矩阵数据在 Host 内存中申请空间 mat->matData = new float[width * height]; if (mat->matData == NULL) return OUT_OF_MEM; // 设置矩阵中的元数据 mat->width = width; mat->height = height; mat->roiX1 = 0; mat->roiY1 = 0; mat->roiX2 = width; mat->roiY2 = height; matCud->deviceId = -1; matCud->pitchWords = width; // 处理完毕,退出 return NO_ERROR; } // Host 静态方法:readFromFile(从文件读取矩阵) __host__ int MatrixBasicOp::readFromFile(const char *filepath, Matrix *outmat) { // 检查文件路径和矩阵是否为 NULL。 if (filepath == NULL || outmat == NULL) return NULL_POINTER; // 根据输入参数的 Matrix 型指针,得到对应的 MatrixCuda 型数据。 MatrixCuda *outmatCud = MATRIX_CUDA(outmat); // 检查矩阵所在的地址空间是否合法,如果矩阵所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetErrorString(cudaGetDeviceCount(&devcnt)); if (outmatCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 打开矩阵文件。 ifstream matfile(filepath, ios::in | ios::binary); if (!matfile) return NO_FILE; // 读取文件头部的文件类型信息,如果文件的头两个字节不是 BM,则说明该文件不 // 是 BMP 文件,则报错。 char headstr[2] = { '\0' }; matfile.seekg(0x0000, ios::beg); matfile.read(headstr, 2); if (headstr[0] != 'B' || headstr[1] != 'M') return WRONG_FILE; // 读取文件中的 BPP 字段(每个像素占用的比特数量),如果 BPP 的值不为 8,说 // 明该文件不是一个灰度 BMP 矩阵,则报错。 unsigned short bpp = 0; matfile.seekg(0x001C, ios::beg); matfile.read(reinterpret_cast<char *>(&bpp), 2); if (bpp != 8) return WRONG_FILE; // 从文件中读取矩阵宽度和高度信息。 unsigned int width = 0, height = 0; matfile.seekg(0x0012, ios::beg); matfile.read(reinterpret_cast<char *>(&width), 4); matfile.read(reinterpret_cast<char *>(&height), 4); // 如果矩阵的尺寸不合法,则报错退出。 if (width < 1 || height < 1) return WRONG_FILE; // 从文件中读取像素数据所在的文件中的偏移位置。 unsigned int dataoff = 0; matfile.seekg(0x000A, ios::beg); matfile.read(reinterpret_cast<char *>(&dataoff), 4); // 获取存放矩阵像素数据的 Host 内存空间。本着尽量重用的思想,如果原来的矩阵 // 内存数据是存储于 Host 内存,且尺寸和新的矩阵尺寸一致时,则不重新申请 // Host 内存空间,直接利用原来的空间存放新的矩阵数据。 float *matdata = outmat->matData; bool reusedata = true; if (outmat->matData == NULL || outmatCud->deviceId >= 0 || outmat->width != width || outmat->height != height) { matdata = new float[width * height]; // 如果没有申请到新的数据,则报错。 if (matdata == NULL) return OUT_OF_MEM; reusedata = false; } // 计算 BMP 文件中每行的 Padding 尺寸。在 BMP 文件中,每行的数据都需要保证 // 4 字节对齐。如果某行的宽度不是 4 的整数倍(注意,灰度图中每行的像素个数 // 同每行实际数据占用的字节数是相等的),则需要补充一些字节,使其达到 4 的 // 整数倍。 unsigned int dummybytes = (4 - (width & 3)) & 3; // 将文件指针移动到数据存储的开始位置 matfile.seekg(dataoff, ios::beg); // 文件读取的数据缓冲区,每次读取一行的数据。 unsigned char *pbufdata = new unsigned char[width + dummybytes]; // 读取矩阵中的各行的矩阵数据。由于 BMP 采用了右手坐标,即矩阵的左下角点为 // 原点,整个矩阵位于第一象限,而我们系统内部使用的是左手坐标,即矩阵的左上 // 角点为原点,整个矩阵亦位于第一象限。这样,BMP 文件中的第一行矩阵数据,其 // 时是最后一行数据,因此,这个关于 r 的 for 循环是从大到小的循环。 for (int r = height - 1; r >= 0; r--) { // 读取矩阵数据(每次读取一行的数据) matfile.read(reinterpret_cast<char *>(pbufdata), width + dummybytes); // 通过这个 for 循环将读取到的一行数据逐一的转换成浮点型归一化的数据, // 即从 0.0 到 1.0 范围内测数据。 for (int c = 0; c < width; c++) { // 将对应列的数据归一化到 [0.0, 1.0],存放到矩阵的浮点型数组中。 outmat->matData[r * outmatCud->pitchWords + c] = ((float)pbufdata[c]) / 255.0f; } } // 到此为止,矩阵数据读取完毕,这是可以安全的释放掉矩阵原来的数据。一直拖到 // 最后才释放原来的数据,正是为了防止一旦矩阵读取失败,不至于让系统进入一个 // 混乱的状态,因为原来的数据还是处于一个可用的状态。 if (reusedata == false || outmat->matData != NULL) { if (outmatCud->deviceId < 0) { // 如果原来的数据存放于 Host 内存中,则使用 delete 关键字释放。 delete[] outmat->matData; } else { // 如果原来的数据存放于 Device 内存中,则首先调到对应的 Device,然 // 后使用 cudaFree 释放掉内存。 cudaSetDevice(outmatCud->deviceId); cudaFree(outmat->matData); cudaSetDevice(curdevid); } } // 为矩阵赋值新的元数据。这里 ROI 被重置为整幅矩阵。 outmat->width = width; outmat->height = height; outmat->roiX1 = 0; outmat->roiY1 = 0; outmat->roiX2 = width; outmat->roiY2 = height; outmat->matData = matdata; outmatCud->deviceId = -1; outmatCud->pitchWords = width; // 处理完毕,返回。 return NO_ERROR; } // Host 静态方法:writeToFile(将矩阵写入文件) __host__ int MatrixBasicOp::writeToFile(const char *filepath, Matrix *inmat) { // 检查文件路径和矩阵是否为 NULL。 if (filepath == NULL || inmat == NULL) return NULL_POINTER; // 打开需要写入的文件。 ofstream matfile(filepath, ios::out | ios::binary); if (!matfile) return NO_FILE; // 根据输入参数的 Matrix 型指针,得到对应的 MatrixCuda 型数据。 MatrixCuda *inmatCud = MATRIX_CUDA(inmat); // 将图片的数据拷贝回 Host 内存中,这样图片就可以被下面的代码所读取,然后将 // 矩阵的数据写入到磁盘中。这里需要注意的是,安排图片的拷贝过程在文件打开之 // 后是因为,如果一旦文件打开失败,则不会改变矩阵在内存中的存储状态,这可能 // 会对后续处理更加有利。 int errcode; errcode = MatrixBasicOp::copyToHost(inmat); if (errcode < 0) return errcode; // 计算一些和 BMP 矩阵相关的参数: // 计算 BMP 文件中每行的 Padding 尺寸。在 BMP 文件中,每行的数据都需要保证 // 4 字节对齐。如果某行的宽度不是 4 的整数倍(注意,灰度图中每行的像素个数 // 同每行实际数据占用的字节数是相等的),则需要补充一些字节,使其达到 4 的 // 整数倍。 unsigned int dummybytes = (4 - (inmat->width & 3)) & 3; // 计算在磁盘上存储图片总共需要的字节数量,这个数量包括了上面提到的 Padding // 的尺寸。 unsigned int datalen = inmat->height * (inmat->width + dummybytes); // 在存储到磁盘中后,像素数据实际的起始位置。因为 BMP 文件存在信息头,实际 // 的像素数据是在这些信息头的后面的。对于系统中使用到的灰度矩阵来说,信息头 // 包含了两个部分,最前面的是矩阵的元数据(如矩阵的宽度、高度;数据的尺寸等 // 信息),紧随其后的是颜色表,颜色表共有 256 个条目,对应了 256 级灰度,每 // 个条目包含了 4 个字节,这四个字节分别为 RGBA 四个通道的亮度值。 unsigned int dataoff = 4 * 256 + 54; // 向文件中写入 BMP 头信息 unsigned short ustemp; // 这三个变量用来保存头信息中的临时域的值,三个变量 unsigned int uitemp; // 用来处理不同的数据类型。 int sitemp; // 文件类型头 ustemp = 0x4D42; matfile.write(reinterpret_cast<char *>(&ustemp), 2); // 文件长度 uitemp = datalen + dataoff; matfile.write(reinterpret_cast<char *>(&uitemp), 4); // 保留区段甲 ustemp = 0; matfile.write(reinterpret_cast<char *>(&ustemp), 2); // 保留区段乙 ustemp = 0; matfile.write(reinterpret_cast<char *>(&ustemp), 2); // 像素数据在文件中开始的位置 uitemp = dataoff; matfile.write(reinterpret_cast<char *>(&uitemp), 4); // 矩阵信息头尺寸 uitemp = 40; matfile.write(reinterpret_cast<char *>(&uitemp), 4); // 矩阵宽度 sitemp = inmat->width; matfile.write(reinterpret_cast<char *>(&sitemp), 4); // 矩阵高度 sitemp = inmat->height; matfile.write(reinterpret_cast<char *>(&sitemp), 4); // 矩阵层次数量 ustemp = 1; matfile.write(reinterpret_cast<char *>(&ustemp), 2); // BPP(每像素的比特数量) ustemp = 8; matfile.write(reinterpret_cast<char *>(&ustemp), 2); // 压缩算法 uitemp = 0; matfile.write(reinterpret_cast<char *>(&uitemp), 4); // 矩阵尺寸 uitemp = datalen; matfile.write(reinterpret_cast<char *>(&uitemp), 4); // 每公尺的像素数量(X-方向) sitemp = 0; matfile.write(reinterpret_cast<char *>(&sitemp), 4); // 每公尺的像素数量(Y-方向) sitemp = 0; matfile.write(reinterpret_cast<char *>(&sitemp), 4); // ClrUsed uitemp = 256; matfile.write(reinterpret_cast<char *>(&uitemp), 4); // ClrImportant uitemp = 0; matfile.write(reinterpret_cast<char *>(&uitemp), 4); // 写入颜色表信息 // 颜色信息共有 256 个条目,对应了 256 个灰度级;每个条目包含了 4 个颜色通 // 道的数据。由于矩阵是灰度矩阵,因此对于灰度为 i 的对应的颜色值为 < i, i, // i, FF >。 unsigned char coloritem[4] = { 0x00, 0x00, 0x00, 0xFF }; for (int i = 0; i < 256; i++) { coloritem[0] = coloritem[1] = coloritem[2] = i; matfile.write(reinterpret_cast<char *>(coloritem), 4); } // 保存一行图像像素数据的缓冲空间。 unsigned char *pbufdata = new unsigned char[inmat->width + dummybytes]; // 为了防止引起不必要的麻烦与错误,这里将补白区间内的数据手动赋值为 0。 for (int i = inmat->width; i < inmat->width + dummybytes; i++) pbufdata[i] = '\0'; // 逐行写入矩阵的像素数据。由于 BMP 采用了右手坐标,即矩阵的左下角点为原 // 点,整个矩阵位于第一象限,而我们系统内部使用的是左手坐标,即矩阵的左上角 // 点为原点,整个矩阵亦位于第一象限。这样,BMP 文件中的第一行矩阵数据,其时 // 是最后一行数据,因此该循环为从大到小的循环。 for (int r = inmat->height - 1; r >= 0; r--) { // 逐一将当前行的各列数据转换成对应的 [0, 255] 灰度值。 for (int c = 0; c < inmat->width; c++) { pbufdata[c] = (unsigned char)( inmat->matData[r * inmatCud->pitchWords + c] * 255.0f); } // 写入当前行的像素数据。 matfile.write(reinterpret_cast<char *>(pbufdata), inmat->width + dummybytes); } // 处理完毕,返回。 return NO_ERROR; } // Host 静态方法:copyToCurrentDevice(将矩阵拷贝到当前 Device 内存上) __host__ int MatrixBasicOp::copyToCurrentDevice(Matrix *mat) { // 检查矩阵是否为 NULL。 if (mat == NULL) return NULL_POINTER; // 根据输入参数的 Matrix 型指针,得到对应的 MatrixCuda 型数据。 MatrixCuda *matCud = MATRIX_CUDA(mat); // 检查矩阵所在的地址空间是否合法,如果矩阵所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (matCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 如果矩阵是一个不包含数据的空矩阵,则报错。 if (mat->matData == NULL || mat->width == 0 || mat->height == 0 || matCud->pitchWords == 0) return UNMATCH_IMG; // 对于不同的情况,将矩阵数据拷贝到当前设备上。 if (matCud->deviceId < 0) { // 如果矩阵的数据位于 Host 内存上,则需要在当前 Device 的内存空间上申请 // 空间,然后将 Host 内存上的数据进行 Padding 后拷贝到当前 Device 上。 float *devptr; // 新的数据空间,在当前 Device 上。 size_t pitch; // Padding 后的每行尺寸 cudaError_t cuerrcode; // CUDA 调用返回的错误码。 // 在当前设备上申请空间,使用 Pitch 版本的申请函数,用来进行 Padding。 cuerrcode = cudaMallocPitch((void **)(&devptr), &pitch, mat->width * sizeof (float), mat->height); if (cuerrcode != cudaSuccess) return CUDA_ERROR; // 进行 Padding 并拷贝数据到当前 Device 上。注意,这里 mat->pitchWords // == mat->width。 cuerrcode = cudaMemcpy2D(devptr, pitch, mat->matData, matCud->pitchWords, mat->width * sizeof (float), mat->height, cudaMemcpyHostToDevice); if (cuerrcode != cudaSuccess) { cudaFree(devptr); return CUDA_ERROR; } // 释放掉原来存储于 Host 内存上的矩阵数据。 delete[] mat->matData; // 更新矩阵数据,把新的在当前 Device 上申请的数据和相关数据写入矩阵元数 // 据中。 mat->matData = devptr; matCud->deviceId = curdevid; matCud->pitchWords = pitch / sizeof (float); // 操作完毕,返回。 return NO_ERROR; } else if (matCud->deviceId != curdevid) { // 对于数据存在其他 Device 的情况,仍旧要在当前 Device 上申请数据空间, // 并从另一个 Device 上拷贝数据到新申请的当前 Device 的数据空间中。 float *devptr; // 新申请的当前 Device 上的数据。 size_t datasize = matCud->pitchWords * mat->height * // 数据尺寸。 sizeof (float); cudaError_t cuerrcode; // CUDA 调用返回的错误码。 // 在当前 Device 上申请空间。 cuerrcode = cudaMalloc((void **)(&devptr), datasize); if (cuerrcode != cudaSuccess) return CUDA_ERROR; // 将数据从矩阵原来的存储位置拷贝到当前的 Device 上。 cuerrcode = cudaMemcpyPeer(devptr, curdevid, mat->matData, matCud->deviceId, datasize); if (cuerrcode != cudaSuccess) { cudaFree(devptr); return CUDA_ERROR; } // 释放掉矩阵在原来的 Device 上的数据。 cudaFree(mat->matData); // 将新的矩阵数据信息写入到矩阵元数据中。 mat->matData = devptr; matCud->deviceId = curdevid; // 操作完成,返回。 return NO_ERROR; } // 对于其他情况,即矩阵数据本来就在当前 Device 上,则直接返回,不进行任何的 // 操作。 return NO_ERROR; } // Host 静态方法:copyToCurrentDevice(将矩阵拷贝到当前 Device 内存上) __host__ int MatrixBasicOp::copyToCurrentDevice(Matrix *srcmat, Matrix *dstmat) { // 检查输入矩阵是否为 NULL。 if (srcmat == NULL) return NULL_POINTER; // 如果输出矩阵为 NULL,或者输出矩阵和输入矩阵为同一各矩阵,则调用 In-place // 版本的函数。 if (dstmat == NULL || dstmat == srcmat) return copyToCurrentDevice(srcmat); // 获取 srcmat 和 dstmat 对应的 MatrixCuda 型指针。 MatrixCuda *srcmatCud = MATRIX_CUDA(srcmat); MatrixCuda *dstmatCud = MATRIX_CUDA(dstmat); // 用来存放旧的 dstmat 数据,使得在拷贝操作失败时可以恢复为原来的可用的数据 // 信息,防止系统进入一个混乱的状态。 MatrixCuda olddstmatCud = *dstmatCud; // 旧的 dstmat 数据 bool reusedata = true; // 记录是否重用了原来的矩阵数据空间。 // 该值为 ture,则原来的数据空间被重 // 用,不需要在之后释放数据,否则需要 // 在最后释放旧的空间。 // 如果源矩阵是一个空矩阵,则不进行任何操作,直接报错。 if (srcmat->matData == NULL || srcmat->width == 0 || srcmat->height == 0 || srcmatCud->pitchWords == 0) return INVALID_DATA; // 检查矩阵所在的地址空间是否合法,如果矩阵所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (srcmatCud->deviceId >= devcnt || dstmatCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 如果目标矩阵中存在有数据,则需要根据情况,若原来的数据不存储在当前的 // Device 上,或者即使存储在当前的 Device 上,但数据尺寸不匹配,则需要释放 // 掉原来申请的空间,以便重新申请合适的内存空间。此处不进行真正的释放操作, // 其目的在于当后续操作出现错误时,可以很快的恢复 dstmat 中原来的信息,使得 // 整个系统不会处于一个混乱的状态,本函数会在最后,确定 dstmat 被成功的更换 // 为了新的数据以后,才会真正的将原来的矩阵数据释放掉。 if (dstmatCud->deviceId != curdevid) { // 对于数据存在 Host 或其他的 Device 上,则直接释放掉原来的数据空间。 reusedata = 0; dstmat->matData = NULL; } else if (!(((srcmatCud->deviceId < 0 && srcmat->width == dstmat->width) || dstmatCud->pitchWords == srcmatCud->pitchWords) && srcmat->height == dstmat->height)) { // 对于数据存在于当前 Device 上,则需要检查数据的尺寸是否和源矩阵相匹 // 配。检查的标准包括:要求源矩阵的 Padding 后的行宽度和目标矩阵的相 // 同,源矩阵和目标矩阵的高度相同;如果源矩阵是存储在 Host 内存中的,则 // 仅要求源矩阵和目标矩阵的宽度相同即可。如果目标矩阵和源矩阵的尺寸不匹 // 配则仍旧需要释放目标矩阵原来的数据空间。 reusedata = 0; dstmat->matData = NULL; } // 将目标矩阵的尺寸更改为源矩阵的尺寸。 dstmat->width = srcmat->width; dstmat->height = srcmat->height; // 将目标矩阵的 ROI 更改为源矩阵的 ROI。 dstmat->roiX1 = srcmat->roiX1; dstmat->roiY1 = srcmat->roiY1; dstmat->roiX2 = srcmat->roiX2; dstmat->roiY2 = srcmat->roiY2; // 更改目标矩阵的数据存储位置为当前 Device。 dstmatCud->deviceId = curdevid; // 将矩阵数据从源矩阵中拷贝到目标矩阵中。 if (srcmatCud->deviceId < 0) { // 如果源矩阵数据存储于 Host 内存,则使用 cudaMemcpy2D 进行 Padding 形 // 式的拷贝。 cudaError_t cuerrcode; // CUDA 调用返回的错误码。 // 如果目标矩阵的 matData == NULL,说明目标矩阵原本要么是一个空矩阵,要 // 么目标矩阵原本的数据空间不合适,需要重新申请。这时,需要为目标矩阵重 // 新在当前 Device 上申请一个合适的数据空间。 if (dstmat->matData == NULL) { cuerrcode = cudaMallocPitch((void **)(&dstmat->matData), &dstmatCud->pitchWords, dstmat->width * sizeof (float), dstmat->height); if (cuerrcode != cudaSuccess) { // 如果申请内存的操作失败,则再报错返回前需要将旧的目标矩阵数据 // 恢复到目标矩阵中,以保证系统接下的操作不至于混乱。 *dstmatCud = olddstmatCud; return CUDA_ERROR; } // 将通过 cudaMallocPitch 得到的以字节为单位的 pitch 值转换为以字为 // 单位的值。 dstmatCud->pitchWords /= sizeof (float); } // 使用 cudaMemcpy2D 进行 Padding 形式的拷贝。 cuerrcode = cudaMemcpy2D(dstmat->matData, dstmatCud->pitchWords * sizeof (float), srcmat->matData, srcmatCud->pitchWords * sizeof (float), srcmat->width * sizeof (float), srcmat->height, cudaMemcpyHostToDevice); if (cuerrcode != cudaSuccess) { // 如果拷贝操作失败,则再报错退出前,需要将旧的目标矩阵数据恢复到目 // 标矩阵中。此外,如果数据不是重用的,则需要释放新申请的数据空间, // 防止内存泄漏。 if (!reusedata) cudaFree(dstmat->matData); *dstmatCud = olddstmatCud; return CUDA_ERROR; } } else { // 如果源矩阵数据存储于 Device 内存(无论是当前 Device 还是其他的 // Device),都是用端到端的拷贝。 cudaError_t cuerrcode; // CUDA 调用返回的错误码。 size_t datasize = srcmatCud->pitchWords * srcmat->height * sizeof (float); // 数据尺寸,单位:字节。 // 如果目标矩阵需要申请数据空间,则进行申请。 if (dstmat->matData == NULL) { cuerrcode = cudaMalloc((void **)(&dstmat->matData), datasize); if (cuerrcode != cudaSuccess) { // 如果发生错误,则需要首先恢复旧的矩阵数据,之后报错。恢复旧的 // 矩阵数据以防止系统进入混乱状态。 *dstmatCud = olddstmatCud; return CUDA_ERROR; } } // 更新目标矩阵的 Padding 尺寸与源矩阵相同。注意,因为源矩阵也存储在 // Device 上,在 Device 上的数据都是经过 Padding 的,又因为 // cudaMemcpyPeer 方法没有提供 Pitch 版本接口,所以,我们这里直接借用源 // 矩阵的 Padding 尺寸。 dstmatCud->pitchWords = srcmatCud->pitchWords; // 使用 cudaMemcpyPeer 实现两个 Device (可以为同一个 Device)间的数据 // 拷贝,将源矩阵在 Device 上的数据信息复制到目标矩阵中。 cuerrcode = cudaMemcpyPeer(dstmat->matData, curdevid, srcmat->matData, srcmatCud->deviceId, datasize); if (cuerrcode != cudaSuccess) { // 如果拷贝操作失败,则再报错退出前,需要将旧的目标矩阵数据恢复到目 // 标矩阵中。此外,如果数据不是重用的,则需要释放新申请的数据空间, // 防止内存泄漏。 if (!reusedata) cudaFree(dstmat->matData); *dstmatCud = olddstmatCud; return CUDA_ERROR; } } // 到此步骤已经说明新的矩阵数据空间已经成功的申请并拷贝了新的数据,因此,旧 // 的数据空间已毫无用处。本步骤就是释放掉旧的数据空间以防止内存泄漏。这里, // 作为拷贝的 olddstmatCud 是局部变量,因此相应的元数据会在本函数退出后自动 // 释放,不用理会。 if (olddstmatCud.matMeta.matData != NULL) { if (olddstmatCud.deviceId < 0) { // 如果旧数据空间是 Host 内存上的,则需要无条件释放。 delete[] olddstmatCud.matMeta.matData; } else if (olddstmatCud.deviceId != curdevid) { // 如果旧数据空间不是当前 Device 内存上的其他 Device 内存上的数据, // 则也需要无条件的释放。 cudaSetDevice(olddstmatCud.deviceId); cudaFree(olddstmatCud.matMeta.matData); cudaSetDevice(curdevid); } else if (!reusedata) { // 如果旧数据就在当前的 Device 内存上,则对于 reusedata 未置位的情 // 况进行释放,因为一旦置位,旧的数据空间就被用于承载新的数据,则不 // 能释放。 cudaFree(olddstmatCud.matMeta.matData); } } return NO_ERROR; } // Host 静态方法:copyToHost(将矩阵拷贝到 Host 内存上) __host__ int MatrixBasicOp::copyToHost(Matrix *mat) { // 检查矩阵是否为 NULL。 if (mat == NULL) return NULL_POINTER; // 根据输入参数的 Matrix 型指针,得到对应的 MatrixCuda 型数据。 MatrixCuda *matCud = MATRIX_CUDA(mat); // 检查矩阵所在的地址空间是否合法,如果矩阵所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (matCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 如果矩阵是一个不包含数据的空矩阵,则报错。 if (mat->matData == NULL || mat->width == 0 || mat->height == 0 || matCud->pitchWords == 0) return UNMATCH_IMG; // 对于不同的情况,将矩阵数据拷贝到当前设备上。 if (matCud->deviceId < 0) { // 如果矩阵位于 Host 内存上,则不需要进行任何操作。 return NO_ERROR; } else { // 如果矩阵的数据位于 Device 内存上,则需要在 Host 的内存空间上申请空 // 间,然后将数据消除 Padding 后拷贝到 Host 上。 float *hostptr; // 新的数据空间,在 Host 上。 cudaError_t cuerrcode; // CUDA 调用返回的错误码。 // 在 Host 上申请空间。 hostptr = new float[mat->width * mat->height]; if (hostptr == NULL) return OUT_OF_MEM; // 将设备切换到数据所在的 Device 上。 cudaSetDevice(matCud->deviceId); // 消除 Padding 并拷贝数据 cuerrcode = cudaMemcpy2D(hostptr, mat->width * sizeof (float), mat->matData, matCud->pitchWords * sizeof (float), mat->width * sizeof (float), mat->height, cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { // 如果拷贝失败,则需要释放掉刚刚申请的内存空间,以防止内存泄漏。之 // 后报错返回。 delete[] hostptr; return CUDA_ERROR; } // 释放掉原来存储于 Device 内存上的矩阵数据。 cudaFree(mat->matData); // 对 Device 内存的操作完毕,将设备切换回当前 Device。 cudaSetDevice(curdevid); // 更新矩阵数据,把新的在当前 Device 上申请的数据和相关数据写入矩阵元数 // 据中。 mat->matData = hostptr; matCud->deviceId = -1; matCud->pitchWords = mat->width; // 操作完毕,返回。 return NO_ERROR; } // 程序永远也不会到达这个分支,因此如果到达这个分支,则说明系统紊乱。对于多 // 数编译器来说,会对此句报出不可达语句的 Warning,因此这里将其注释掉,以防 // 止不必要的 Warning。 //return UNKNOW_ERROR; } // Host 静态方法:copyToHost(将矩阵拷贝到 Host 内存上) __host__ int MatrixBasicOp::copyToHost(Matrix *srcmat, Matrix *dstmat) { // 检查输入矩阵是否为 NULL。 if (srcmat == NULL) return NULL_POINTER; // 如果输出矩阵为 NULL 或者和输入矩阵同为一个矩阵,则调用对应的 In-place 版 // 本的函数。 if (dstmat == NULL || dstmat == srcmat) return copyToHost(srcmat); // 获取 srcmat 和 dstmat 对应的 MatrixCuda 型指针。 MatrixCuda *srcmatCud = MATRIX_CUDA(srcmat); MatrixCuda *dstmatCud = MATRIX_CUDA(dstmat); // 用来存放旧的 dstmat 数据,使得在拷贝操作失败时可以恢复为原来的可用的数据 // 信息,防止系统进入一个混乱的状态。 MatrixCuda olddstmatCud = *dstmatCud; // 旧的 dstmat 数据 bool reusedata = true; // 记录是否重用了原来的矩阵数据空间。 // 该值为 true,则原来的数据空间被重 // 用,不需要在之后释放数据,否则需要 // 释放旧的空间。 // 如果源矩阵是一个空矩阵,则不进行任何操作,直接报错。 if (srcmat->matData == NULL || srcmat->width == 0 || srcmat->height == 0 || srcmatCud->pitchWords == 0) return INVALID_DATA; // 检查矩阵所在的地址空间是否合法,如果矩阵所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (srcmatCud->deviceId >= devcnt || dstmatCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 如果目标矩阵中存在有数据,则需要根据情况,若原来的数据不存储在 Host 上, // 或者即使存储在 Host 上,但数据尺寸不匹配,则需要释放掉原来申请的空间,以 // 便重新申请合适的内存空间。此处不进行真正的释放操作,其目的在于当后续操作 // 出现错误时,可以很快的恢复 dstmat 中原来的信息,使得整个系统不会处于一个 // 混乱的状态,本函数会在最后,确定 dstmat 被成功的更换为了新的数据以后,才 // 会真正的将原来的矩阵数据释放掉。 if (dstmatCud->deviceId >= 0) { // 对于数据存在于 Device 上,则亦直接释放掉原来的数据空间。 reusedata = 0; dstmat->matData = NULL; } else if (!(srcmat->width == dstmat->width && srcmat->height == dstmat->height)) { // 对于数据存在于 Host 上,则需要检查数据的尺寸是否和源矩阵相匹配。检查 // 的标准:源矩阵和目标矩阵的尺寸相同时,可重用原来的空间。 reusedata = 0; dstmat->matData = NULL; } // 将目标矩阵的尺寸更改为源矩阵的尺寸。 dstmat->width = srcmat->width; dstmat->height = srcmat->height; // 将目标矩阵的 ROI 更改为源矩阵的 ROI。 dstmat->roiX1 = srcmat->roiX1; dstmat->roiY1 = srcmat->roiY1; dstmat->roiX2 = srcmat->roiX2; dstmat->roiY2 = srcmat->roiY2; // 更改目标矩阵的数据存储位置为 Host。 dstmatCud->deviceId = -1; // 由于 Host 内存上的数据不使用 Padding,因此设置 Padding 尺寸为矩阵的宽 // 度。 dstmatCud->pitchWords = dstmat->width; // 如果目标矩阵的 matData == NULL,说明目标矩阵原本要么是一个空矩阵,要么目 // 标矩阵原本的数据空间不合适,需要重新申请。这时,需要为目标矩阵重新在 // Host 上申请一个合适的数据空间。 if (dstmat->matData == NULL) { dstmat->matData = new float[srcmat->width * srcmat->height]; if (dstmat->matData == NULL) { // 如果申请内存的操作失败,则再报错返回前需要将旧的目标矩阵数据 // 恢复到目标矩阵中,以保证系统接下的操作不至于混乱。 *dstmatCud = olddstmatCud; return OUT_OF_MEM; } } // 将矩阵数据从源矩阵中拷贝到目标矩阵中。 if (srcmatCud->deviceId < 0) { // 如果源矩阵数据存储于 Host 内存,则直接使用 C 标准支持库中的 emcpy // 完成拷贝。 // 将 srcmat 内的矩阵数据拷贝到 dstmat 中。memcpy 不返回错误,因此,没 // 有进行错误检查。 memcpy(dstmat->matData, srcmat->matData, srcmat->width * srcmat->height * sizeof (float)); } else { // 如果源矩阵数据存储于 Device 内存(无论是当前 Device 还是其他的 // Device),都是 2D 形式的拷贝,并消除 Padding。 cudaError_t cuerrcode; // CUDA 调用返回的错误码。 // 首先切换到 srcmat 矩阵数据所在的 Device,以方便进行内存操作。 cudaSetDevice(srcmatCud->deviceId); // 这里使用 cudaMemcpy2D 将 srcmat 中处于 Device 上的数据拷贝到 dstmat // 中位于 Host 的内存空间上面,该拷贝会同时消除 Padding。 cuerrcode = cudaMemcpy2D(dstmat->matData, dstmatCud->pitchWords * sizeof (float), srcmat->matData, srcmatCud->pitchWords * sizeof (float), srcmat->width * sizeof (float), srcmat->height, cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { // 如果拷贝操作失败,则再报错退出前,需要将旧的目标矩阵数据恢复到目 // 标矩阵中。此外,如果数据不是重用的,则需要释放新申请的数据空间, // 防止内存泄漏。最后,还需要把 Device 切换回来,以免整个程序乱套。 if (!reusedata) delete[] dstmat->matData; *dstmatCud = olddstmatCud; cudaSetDevice(curdevid); return CUDA_ERROR; } // 对内存操作完毕后,将设备切换回当前的 Device。 cudaSetDevice(curdevid); } // 到此步骤已经说明新的矩阵数据空间已经成功的申请并拷贝了新的数据,因此,旧 // 的数据空间已毫无用处。本步骤就是释放掉旧的数据空间以防止内存泄漏。这里, // 作为拷贝的 olddstmatCud 是局部变量,因此相应的元数据会在本函数退出后自动 // 释放,不用理会。 if (olddstmatCud.matMeta.matData != NULL) { if (olddstmatCud.deviceId > 0) { // 如果旧数据是存储于 Device 内存上的数据,则需要无条件的释放。 cudaSetDevice(olddstmatCud.deviceId); cudaFree(olddstmatCud.matMeta.matData); cudaSetDevice(curdevid); } else if (!reusedata) { // 如果旧数据就在 Host 内存上,则对于 reusedata 未置位的情况进行释 // 放,因为一旦置位,旧的数据空间就被用于承载新的数据,则不能释放。 delete[] olddstmatCud.matMeta.matData; } } // 处理完毕,退出。 return NO_ERROR; }
the_stack
* Cooperative tile SOA (structure-of-arrays) scan within CTAs ******************************************************************************/ #pragma once #include "../../../util/srts_grid.cuh" #include "../../../util/reduction/soa/cooperative_soa_reduction.cuh" #include "../../../util/scan/soa/serial_soa_scan.cuh" #include "../../../util/scan/soa/warp_soa_scan.cuh" B40C_NS_PREFIX namespace b40c { namespace util { namespace scan { namespace soa { /** * Cooperative SOA reduction in raking smem grid hierarchies */ template < typename RakingSoaDetails, typename SecondaryRakingSoaDetails = typename RakingSoaDetails::SecondaryRakingSoaDetails> struct CooperativeSoaGridScan; /** * Cooperative SOA tile scan */ template < int VEC_SIZE, // Length of vector-loads (e.g, vec-1, vec-2, vec-4) bool EXCLUSIVE = true> // Whether or not this is an exclusive scan struct CooperativeSoaTileScan { //--------------------------------------------------------------------- // Iteration structures for extracting partials from raking lanes and // using them to seed scans of tile vectors //--------------------------------------------------------------------- // Next lane/load template <int LANE, int TOTAL_LANES> struct ScanLane { template < typename RakingSoaDetails, typename TileSoa, typename ReductionOp> static __device__ __forceinline__ void Invoke( RakingSoaDetails raking_soa_details, TileSoa tile_soa, ReductionOp scan_op) { // Retrieve partial reduction from raking grid typename RakingSoaDetails::TileTuple exclusive_partial; raking_soa_details.lane_partials.Get(exclusive_partial, LANE, 0); // Scan the partials in this lane/load SerialSoaScan<VEC_SIZE, EXCLUSIVE>::Scan( tile_soa, exclusive_partial, LANE, scan_op); // Next load ScanLane<LANE + 1, TOTAL_LANES>::Invoke( raking_soa_details, tile_soa, scan_op); } }; // Terminate template <int TOTAL_LANES> struct ScanLane<TOTAL_LANES, TOTAL_LANES> { template < typename RakingSoaDetails, typename TileSoa, typename ReductionOp> static __device__ __forceinline__ void Invoke( RakingSoaDetails raking_soa_details, TileSoa tile_soa, ReductionOp scan_op) {} }; //--------------------------------------------------------------------- // Interface //--------------------------------------------------------------------- /** * Scan a single tile where carry is assigned (or updated if REDUCE_INTO_CARRY is set) * with the total aggregate only in raking threads. * * No post-synchronization needed before grid reuse. */ template < bool REDUCE_INTO_CARRY, typename RakingSoaDetails, typename TileSoa, typename TileTuple, typename ReductionOp> static __device__ __forceinline__ void ScanTileWithCarry( RakingSoaDetails raking_soa_details, TileSoa tile_soa, TileTuple &carry, ReductionOp scan_op) { // Reduce vectors in tile, placing resulting partial into corresponding raking grid lanes reduction::soa::CooperativeSoaTileReduction<VEC_SIZE>::template ReduceLane<0, RakingSoaDetails::SCAN_LANES>::Invoke( raking_soa_details, tile_soa, scan_op); __syncthreads(); CooperativeSoaGridScan<RakingSoaDetails>::template ScanTileWithCarry<REDUCE_INTO_CARRY>( raking_soa_details, carry, scan_op); __syncthreads(); // Scan partials in tile, retrieving resulting partial from raking grid lane partial ScanLane<0, RakingSoaDetails::SCAN_LANES>::Invoke( raking_soa_details, tile_soa, scan_op); } /** * Scan a single tile. Total aggregate is computed and returned in all threads. * * No post-synchronization needed before grid reuse. */ template < typename RakingSoaDetails, typename TileSoa, typename TileTuple, typename ReductionOp> static __device__ __forceinline__ void ScanTile( TileTuple &retval, RakingSoaDetails raking_soa_details, TileSoa tile_soa, ReductionOp scan_op) { // Reduce vectors in tile, placing resulting partial into corresponding raking grid lanes reduction::soa::CooperativeSoaTileReduction<VEC_SIZE>::template ReduceLane<0, RakingSoaDetails::SCAN_LANES>::Invoke( raking_soa_details, tile_soa, scan_op); __syncthreads(); CooperativeSoaGridScan<RakingSoaDetails>::ScanTile( raking_soa_details, scan_op); __syncthreads(); // Scan partials in tile, retrieving resulting partial from raking grid lane partial ScanLane<0, RakingSoaDetails::SCAN_LANES>::Invoke( raking_soa_details, tile_soa, scan_op); // Return last thread's inclusive partial retval = raking_soa_details.CumulativePartial(); } }; /****************************************************************************** * CooperativeSoaGridScan ******************************************************************************/ /** * Cooperative SOA raking grid reduction (specialized for last-level of raking grid) */ template <typename RakingSoaDetails> struct CooperativeSoaGridScan<RakingSoaDetails, NullType> { typedef typename RakingSoaDetails::TileTuple TileTuple; /** * Scan in last-level raking grid. */ template <typename ReductionOp> static __device__ __forceinline__ void ScanTile( RakingSoaDetails raking_soa_details, ReductionOp scan_op) { if (threadIdx.x < RakingSoaDetails::RAKING_THREADS) { // Raking reduction TileTuple inclusive_partial; reduction::soa::SerialSoaReduce<RakingSoaDetails::PARTIALS_PER_SEG>::Reduce( inclusive_partial, raking_soa_details.raking_segments, scan_op); // Exclusive warp scan TileTuple exclusive_partial = WarpSoaScan<RakingSoaDetails::LOG_RAKING_THREADS>::Scan( inclusive_partial, raking_soa_details.warpscan_partials, scan_op); // Exclusive raking scan SerialSoaScan<RakingSoaDetails::PARTIALS_PER_SEG>::Scan( raking_soa_details.raking_segments, exclusive_partial, scan_op); } } /** * Scan in last-level raking grid. Carry-in/out is updated only in raking threads (homogeneously) */ template < bool REDUCE_INTO_CARRY, typename ReductionOp> static __device__ __forceinline__ void ScanTileWithCarry( RakingSoaDetails raking_soa_details, TileTuple &carry, ReductionOp scan_op) { if (threadIdx.x < RakingSoaDetails::RAKING_THREADS) { // Raking reduction TileTuple inclusive_partial; reduction::soa::SerialSoaReduce<RakingSoaDetails::PARTIALS_PER_SEG>::Reduce( inclusive_partial, raking_soa_details.raking_segments, scan_op); // Exclusive warp scan, get total TileTuple warpscan_total; TileTuple exclusive_partial = WarpSoaScan< RakingSoaDetails::LOG_RAKING_THREADS>::Scan( inclusive_partial, warpscan_total, raking_soa_details.warpscan_partials, scan_op); // Seed exclusive partial with carry-in if (REDUCE_INTO_CARRY) { if (!ReductionOp::IDENTITY_STRIDES && (threadIdx.x == 0)) { // Thread-zero can't use the exclusive partial from the warpscan // because it contains garbage exclusive_partial = carry; } else { // Seed exclusive partial with the carry partial exclusive_partial = scan_op(carry, exclusive_partial); } // Update carry carry = scan_op(carry, warpscan_total); } else { // Set carry carry = warpscan_total; } // Exclusive raking scan SerialSoaScan<RakingSoaDetails::PARTIALS_PER_SEG>::Scan( raking_soa_details.raking_segments, exclusive_partial, scan_op); } } }; /** * Cooperative SOA raking grid reduction (specialized for last-level of raking grid) */ template < typename RakingSoaDetails, typename SecondaryRakingSoaDetails> struct CooperativeSoaGridScan { typedef typename RakingSoaDetails::TileTuple TileTuple; /** * Scan in last-level raking grid. */ template <typename ReductionOp> static __device__ __forceinline__ void ScanTile( RakingSoaDetails raking_soa_details, ReductionOp scan_op) { if (threadIdx.x < RakingSoaDetails::RAKING_THREADS) { // Raking reduction TileTuple inclusive_partial; reduction::soa::SerialSoaReduce<RakingSoaDetails::PARTIALS_PER_SEG>::Reduce( inclusive_partial, raking_soa_details.raking_segments, scan_op); // Store partial reduction into next raking grid raking_soa_details.secondary_details.lane_partials.Set(inclusive_partial, 0, 0); } __syncthreads(); // Collectively scan in next grid CooperativeSoaGridScan<SecondaryRakingSoaDetails>::ScanTile( raking_soa_details.secondary_details, scan_op); __syncthreads(); if (threadIdx.x < RakingSoaDetails::RAKING_THREADS) { // Retrieve partial reduction from next raking grid TileTuple exclusive_partial; raking_soa_details.secondary_details.lane_partials.Get(exclusive_partial, 0, 0); // Exclusive raking scan SerialSoaScan<RakingSoaDetails::PARTIALS_PER_SEG>::Scan( raking_soa_details.raking_segments, exclusive_partial, scan_op); } } /** * Scan in last-level raking grid. Carry-in/out is updated only in raking threads (homogeneously) */ template < bool REDUCE_INTO_CARRY, typename ReductionOp> static __device__ __forceinline__ void ScanTileWithCarry( RakingSoaDetails raking_soa_details, TileTuple &carry, ReductionOp scan_op) { if (threadIdx.x < RakingSoaDetails::RAKING_THREADS) { // Raking reduction TileTuple inclusive_partial; reduction::soa::SerialSoaReduce<RakingSoaDetails::PARTIALS_PER_SEG>::Reduce( inclusive_partial, raking_soa_details.raking_segments, scan_op); // Store partial reduction into next raking grid raking_soa_details.secondary_details.lane_partials.Set(inclusive_partial, 0, 0); } __syncthreads(); // Collectively scan in next grid CooperativeSoaGridScan<SecondaryRakingSoaDetails>::template ScanTileWithCarry<REDUCE_INTO_CARRY>( raking_soa_details.secondary_details, carry, scan_op); __syncthreads(); if (threadIdx.x < RakingSoaDetails::RAKING_THREADS) { // Retrieve partial reduction from next raking grid TileTuple exclusive_partial; raking_soa_details.secondary_details.lane_partials.Get(exclusive_partial, 0, 0); // Exclusive raking scan SerialSoaScan<RakingSoaDetails::PARTIALS_PER_SEG>::Scan( raking_soa_details.raking_segments, exclusive_partial, scan_op); } } }; } // namespace soa } // namespace scan } // namespace util } // namespace b40c B40C_NS_POSTFIX
the_stack
* * This tutorial explains how to use the iterative solvers in ViennaCL in a matrix-free manner, i.e. without explicitly assembling a matrix. * * We consider the solution of the Poisson equation \f$ \Delta \varphi = -1 \f$ on the unit square \f$ [0,1] \times [0,1] \f$ with homogeneous Dirichlet boundary conditions using a finite-difference discretization. * A \f$ N \times N \f$ grid is used, where the first and the last points per dimensions represent the boundary. * For simplicity we only consider the host-backend here. Have a look at custom-kernels.hpp and custom-cuda.cu on how to use custom kernels in such a matrix-free setting. * * \note matrix-free.cpp and matrix-free.cu are identical, the latter being required for compilation using CUDA nvcc * * We start with including the necessary system headers: **/ // // include necessary system headers // #include <iostream> // // ViennaCL includes // #include "viennacl/scalar.hpp" #include "viennacl/vector.hpp" #include "viennacl/linalg/prod.hpp" #include "viennacl/linalg/cg.hpp" #include "viennacl/linalg/bicgstab.hpp" #include "viennacl/linalg/gmres.hpp" /** * ViennaCL imposes two type requirements on a user-provided operator to compute `y = prod(A, x)` for the iterative solvers: * - A member function `apply()`, taking two ViennaCL base vectors `x` and `y` as arguments. This member function carries out the action of the matrix to the vector. * - A member function `size1()` returning the length of the result vectors. * Keep in mind that you can always wrap your existing classes accordingly to fit ViennaCL's interface requirements. * * We define a simple class for dealing with the \f$ N \times N \f$ grid for solving Poisson's equation. * It only holds the number of grid points per coordinate direction and implements the `apply()` and `size1()` routines. * Depending on whether the host, OpenCL, or CUDA is used for the computation, the respective implementation is called. * We skip the details for now and discuss (and implement) them at the end of this tutorial. **/ template<typename NumericT> class MyOperator { public: MyOperator(std::size_t N) : N_(N) {} // Dispatcher for y = Ax void apply(viennacl::vector_base<NumericT> const & x, viennacl::vector_base<NumericT> & y) const { #if defined(VIENNACL_WITH_CUDA) if (viennacl::traits::active_handle_id(x) == viennacl::CUDA_MEMORY) apply_cuda(x, y); #endif #if defined(VIENNACL_WITH_OPENCL) if (viennacl::traits::active_handle_id(x) == viennacl::OPENCL_MEMORY) apply_opencl(x, y); #endif if (viennacl::traits::active_handle_id(x) == viennacl::MAIN_MEMORY) apply_host(x, y); } std::size_t size1() const { return N_ * N_; } private: #if defined(VIENNACL_WITH_CUDA) void apply_cuda(viennacl::vector_base<NumericT> const & x, viennacl::vector_base<NumericT> & y) const; #endif #if defined(VIENNACL_WITH_OPENCL) void apply_opencl(viennacl::vector_base<NumericT> const & x, viennacl::vector_base<NumericT> & y) const; #endif void apply_host(viennacl::vector_base<NumericT> const & x, viennacl::vector_base<NumericT> & y) const; std::size_t N_; }; /** * <h2>Main Program</h2> * * In the `main()` routine we create the right hand side vector, instantiate the operator, and then call the solver. **/ int main() { typedef float ScalarType; // feel free to change to double (and change OpenCL kernel argument types accordingly) std::size_t N = 10; viennacl::vector<ScalarType> rhs = viennacl::scalar_vector<ScalarType>(N*N, ScalarType(-1)); MyOperator<ScalarType> op(N); /** * Run the CG method with our on-the-fly operator. * Use `viennacl::linalg::bicgstab_tag()` or `viennacl::linalg::gmres_tag()` instead of `viennacl::linalg::cg_tag()` to solve using BiCGStab or GMRES, respectively. **/ viennacl::vector<ScalarType> result = viennacl::linalg::solve(op, rhs, viennacl::linalg::cg_tag()); /** * Pretty-Print solution vector to verify solution. * (We use a slow direct element-access via `operator[]` here for convenience.) **/ std::cout.precision(3); std::cout << std::fixed; std::cout << "Result value map: " << std::endl; std::cout << std::endl << "^ y " << std::endl; for (std::size_t i=0; i<N; ++i) { std::cout << "| "; for (std::size_t j=0; j<N; ++j) std::cout << result[i * N + j] << " "; std::cout << std::endl; } std::cout << "*---------------------------------------------> x" << std::endl; /** * That's it, print a completion message. Read on for details on how to implement the actual compute kernels. **/ std::cout << "!!!! TUTORIAL COMPLETED SUCCESSFULLY !!!!" << std::endl; return EXIT_SUCCESS; } /** * <h2> Implementation Details </h2> * * So far we have only looked at the code for the control logic. * In the following we define the actual 'worker' code for the matrix-free implementations. * * <h3> Execution on Host </h3> * * Since the execution on the host has the smallest amount of boilerplate code surrounding it, we use this case as a starting point. **/ template<typename NumericT> void MyOperator<NumericT>::apply_host(viennacl::vector_base<NumericT> const & x, viennacl::vector_base<NumericT> & y) const { NumericT const * values_x = viennacl::linalg::host_based::detail::extract_raw_pointer<NumericT>(x.handle()); NumericT * values_y = viennacl::linalg::host_based::detail::extract_raw_pointer<NumericT>(y.handle()); NumericT dx = NumericT(1) / NumericT(N_ + 1); NumericT dy = NumericT(1) / NumericT(N_ + 1); /** * In the following we iterate over all \f$ N \times N \f$ points and apply the five-point stencil directly. * This is done in a straightforward manner for illustration purposes. * Multi-threaded execution via OpenMP can be obtained by uncommenting the pragma below. * * Feel free to apply additional optimizations with respect to data access patterns and the like. **/ // feel free to use // #pragma omp parallel for // here for (std::size_t i=0; i<N_; ++i) for (std::size_t j=0; j<N_; ++j) { NumericT value_right = (j < N_ - 1) ? values_x[ i *N_ + j + 1] : 0; NumericT value_left = (j > 0 ) ? values_x[ i *N_ + j - 1] : 0; NumericT value_top = (i < N_ - 1) ? values_x[(i+1)*N_ + j ] : 0; NumericT value_bottom = (i > 0 ) ? values_x[(i-1)*N_ + j ] : 0; NumericT value_center = values_x[i*N_ + j]; values_y[i*N_ + j] = ((value_right - value_center) / dx - (value_center - value_left) / dx) / dx + ((value_top - value_center) / dy - (value_center - value_bottom) / dy) / dy; } } /** * <h3> Execution via CUDA </h3> * * The host-based kernel code serves as a basis for the CUDA kernel. * The only thing we have to adjust are the array bounds: * We assign one CUDA threadblock per index `i`. * For a fixed `i`, parallelization across all threads in the block is obtained with respect to `j`. * * Again, feel free to apply additional optimizations with respect to data access patterns and the like... **/ #if defined(VIENNACL_WITH_CUDA) template<typename NumericT> __global__ void apply_cuda_kernel(NumericT const * values_x, NumericT * values_y, std::size_t N) { NumericT dx = NumericT(1) / (N + 1); NumericT dy = NumericT(1) / (N + 1); for (std::size_t i = blockIdx.x; i < N; i += gridDim.x) for (std::size_t j = threadIdx.x; j < N; j += blockDim.x) { NumericT value_right = (j < N - 1) ? values_x[ i *N + j + 1] : 0; NumericT value_left = (j > 0 ) ? values_x[ i *N + j - 1] : 0; NumericT value_top = (i < N - 1) ? values_x[(i+1)*N + j ] : 0; NumericT value_bottom = (i > 0 ) ? values_x[(i-1)*N + j ] : 0; NumericT value_center = values_x[i*N + j]; values_y[i*N + j] = ((value_right - value_center) / dx - (value_center - value_left) / dx) / dx + ((value_top - value_center) / dy - (value_center - value_bottom) / dy) / dy; } } #endif #if defined(VIENNACL_WITH_CUDA) template<typename NumericT> void MyOperator<NumericT>::apply_cuda(viennacl::vector_base<NumericT> const & x, viennacl::vector_base<NumericT> & y) const { apply_cuda_kernel<<<128, 128>>>(viennacl::cuda_arg(x), viennacl::cuda_arg(y), N_); } #endif /** * <h3> Execution via OpenCL </h3> * * The OpenCL kernel is almost identical to the CUDA kernel: Only a couple of keywords need to be replaced. * Also, the sources need to be packed into a string: **/ #if defined(VIENNACL_WITH_OPENCL) static const char * my_compute_program = "typedef float NumericT; \n" "__kernel void apply_opencl_kernel(__global NumericT const * values_x, \n" " __global NumericT * values_y, \n" " unsigned int N) {\n" " NumericT dx = (NumericT)1 / (N + 1); \n" " NumericT dy = (NumericT)1 / (N + 1); \n" " for (unsigned int i = get_group_id(0); i < N; i += get_num_groups(0)) \n" " for (unsigned int j = get_local_id(0); j < N; j += get_local_size(0)) { \n" " NumericT value_right = (j < N - 1) ? values_x[ i *N + j + 1] : 0; \n" " NumericT value_left = (j > 0 ) ? values_x[ i *N + j - 1] : 0; \n" " NumericT value_top = (i < N - 1) ? values_x[(i+1)*N + j ] : 0; \n" " NumericT value_bottom = (i > 0 ) ? values_x[(i-1)*N + j ] : 0; \n" " NumericT value_center = values_x[i*N + j]; \n" " values_y[i*N + j] = ((value_right - value_center) / dx - (value_center - value_left) / dx) / dx \n" " + ((value_top - value_center) / dy - (value_center - value_bottom) / dy) / dy; \n" " } \n" " } \n"; #endif /** * Before the kernel is called for the first time, the OpenCL program containing the kernel needs to be compiled. * We use a simple singleton using a static variable to achieve that. * * Except for the kernel compilation at the first invocation, the OpenCL kernel launch is just one line of code just like for CUDA. * Refer to custom-kernels.cpp for some more details. **/ #if defined(VIENNACL_WITH_OPENCL) template<typename NumericT> void MyOperator<NumericT>::apply_opencl(viennacl::vector_base<NumericT> const & x, viennacl::vector_base<NumericT> & y) const { viennacl::ocl::context & ctx = const_cast<viennacl::ocl::context &>(viennacl::traits::opencl_handle(x).context()); static bool first_run = true; if (first_run) { ctx.add_program(my_compute_program, "my_compute_program"); first_run = false; } viennacl::ocl::kernel & my_kernel = ctx.get_kernel("my_compute_program", "apply_opencl_kernel"); viennacl::ocl::enqueue(my_kernel(x, y, static_cast<cl_uint>(N_))); } #endif
the_stack
// GetContourSet.cu // 实现有连接性的闭合轮廓的获得算法 #include "GetContourSet.h" #include <iostream> #include <fstream> #include <cmath> using namespace std; #include "ErrorCode.h" // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 宏:GET_CONTOUR_SET_INI_IFI // 定义了一个无穷大 #define GET_CONTOUR_SET_INI_IFI 0x7fffffff // 宏:OUTER_ORIGIN_CONTOUR // 定义了输入闭曲线外部区域的标记值。 #define OUTER_ORIGIN_CONTOUR 0 // 宏:INNER_ORIGIN_CONTOUR // 定义了输入闭曲线内部区域的标记值。 #define INNER_ORIGIN_CONTOUR 10 // 宏:DILATE_CIRCLE // 定义了膨胀后得到环上点的标记值。 #define DILATE_CIRCLE 50 // 宏:OUTER_CONTOUR // 定义了膨胀后得到环上外轮廓点的标记值。 #define OUTER_CONTOUR 100 // 宏:OUTER_CIRCLE // 定义了经过二分类后外部环状物上的点的标记值。 #define OUTER_CIRCLE 150 // 宏:INNER_CONTOUR // 定义了经过二分类后内外环状物交界处的点的标记值。 #define INNER_CONTOUR 200 // Device 全局变量:_eightNeighDev(八邻域点索引下标) // 存放当前点八邻域范围内对应点的索引下标。 // 应用此数组可以便于进行八邻域像素点的遍历。 static __device__ int _eightNeighDev[8][2] = { { -1, -1 }, { 0, -1 }, { 1, -1 }, { 1, 0 }, { 1, 1 }, { 0, 1 }, { -1, 1 }, { -1, 0 } }; // Device 子程序:_findRootDev (查找根节点标记值) // 查找根节点标记值算法,根据给定的 label 数组和坐标值 // 返回该坐标对应的根节点坐标值。该函数是为了便于其他 Kernel 函数调用。 static __device__ int // 返回值:根节点标记值 _findRootDev( int label[], // 输入的标记数组 int idx // 输入点的标记值 ); // Device 子程序:_unionDev (合并两个像素点使其位于同一区域) // 合并两个不同像素点以使它们位于同一连通区域中 static __device__ void // 该函数无返回值 _unionDev( int label[], // 标记值数组 unsigned char elenum1, // 第一个像素点灰度值 unsigned char elenum2, // 第二个像素点灰度值 int elelabel1, // 第一个像素点标记值 int elelabel2, // 第二个像素点标记值 int *flag // 变换标记,当这两个输入像素点被合并到一个 // 区域后,该标记值将被设为 1。 ); // Kernel 函数:_imginitKer(初始化输入图像第一步) // 将输入图像所有点的灰度值置为 0 static __global__ void // Kernel 函数无返回值 _imginitKer( ImageCuda inimg // 输入图像 ); // Kernel 函数:_initInimgKer(初始化输入图像第二步) // 根据输入闭曲线的坐标值,将输入图像对应位置的灰度值修改为 255, // 从而得到对应的输入图像。 static __global__ void // Kernel 函数无返回值 _initInimgKer( CoordiSet incoordiset, // 输入闭曲线 int xmin, int ymin, // 输入闭曲线的最上,最左点坐标值 int radius, // 半径 ImageCuda inimg // 输入图像 ); // Kernel 函数:_initLabelPerBlockKer (初始化每个块内像素点的标记值) // 初始化每个线程块内点的标记值。该过程主要分为两个部分,首先, // 每个节点的标记值为其在源图像中的索引值,如对于坐标为 (c, r) 点, // 其初始标记值为 r * width + c ,其中 width 为图像宽; // 然后,将各点标记值赋值为该点满足阈值关系的八邻域点中的最小标记值。 // 该过程在一个线程块中进行。 static __global__ void // Kernel 函数无返回值 _initLabelPerBlockKer( ImageCuda inimg, // 输入图像 int label[] // 输入标记数组 ); // Kernel 函数:_mergeBordersKer (合并不同块内像素点的标记值) // 不同线程块的合并过程。该过程主要合并每两个线程块边界的点, // 在这里我们主要采用每次合并 4 × 4 个线程块的策略。 static __global__ void // Kernel 函数无返回值 _mergeBordersKer( ImageCuda inimg, // 输入图像 int *label, // 输入标记数组 int blockw, // 应合并线程块的长度 int blockh, // 应合并线程块的宽度 int threadz_z, // 合并水平方向线程块时,z 向线程最大值 int threadz_y // 合并竖直方向线程块时,z 向线程最大值 ); // Kernel 函数:_findFinalLabelKer (找到每个点对应的根节点标记值) // 找出每个点对应的根节点标记值,并将该值修改为当前点标记值。 static __global__ void // Kernel 函数无返回值 _findFinalLabelKer( int *label, // 输入标记值数组 int width, // 宽度 int height // 高度 ); // Kernel 函数:_initFlagSetKer(初始化标记值数组) // 初始化标记值数组,将环外点置为 0,环内点置为 10, // 同时使得生成的圆环上点的标记值置为 50 static __global__ void // Kernel 函数无返回值 _initFlagSetKer( ImageCuda inimg, // 输入图像 int inflagset[], // 输入标记值数组 int *outflagset // 输出标记值数组 ); // Kernel 函数:_findOuterContourKer(标记外轮廓点) // 将外环轮廓点标记值置为 100,同时将点的 class 值存入 classNum 变量中。 static __global__ void // Kernel 函数无返回值 _findOuterContourKer( int inflagset[], // 输入标记值数组 int *outflagset, // 输出标记值数组(存储外环轮廓点标记值) int devclassarr[], // 输入 class 数组 int width, int height, // 图像宽和图像高 int *classnum // 输出外环的 class 值 ); // Kernel 函数:_fillOuterCircleKer(将外环上点标记值置为 150) // 将属于外环的所有点的标记值置为 150。 static __global__ void // Kernel 函数无返回值 _fillOuterCircleKer( int inflagset[], // 输入标记值数组 int *outflagset, // 中间标记值数组(存储外环所有点标记值) int devclassarr[], // 输入 class 数组 int classnum[], // 输入外环 class 值 int width, int height // 图像宽和高 ); // Kernel 函数:_findInnerContourKer (修改内外环交界处点标记值,第一步) // 将内外环交界处点标记值置为 200,第一步 static __global__ void // Kernel 函数无返回值 _findInnerContourKer( int outflagset[], // 输入标记值数组 int *devflagset, // 输出标记值数组 int width, int height // 图像宽和高 ); // Kernel 函数:_findInnerContourSecondKer(修改内外环交界处点标记值,第二步) // 将内外环交界处点标记值置为 200,第二步 static __global__ void // Kernel 函数无返回值 _findInnerContourSecondKer( int outflagset[], // 输入标记值数组 int *devflagset, // 输出标记值数组 int width, int height // 图像宽和高 ); // Kernel 函数:_contourSetToimgKer (输出轮廓坐标至图像中) // 将轮廓坐标输出到图像中。 static __global__ void // Kernel 函数无返回值 _contourSetToimgKer( int inflagset[], // 输入标记值数组 ImageCuda contourimg // 输出轮廓图像 ); // Kernel 函数:_innerConSetToimgKer (输出内环内点坐标至图像中) // 将内环内点坐标输出到图像中。 static __global__ void // Kernel 函数无返回值 _innerConSetToimgKer( int inflagset[], // 输入标记值数组 ImageCuda innerimg // 输出内环内点图像 ); // Kernel 函数:_imginitKer(初始化输入图像第一步) static __global__ void _imginitKer(ImageCuda inimg) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的 // 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; int inidx = r * inimg.pitchBytes + c; // 将输入图像所有点的灰度值初始化为 0 。 inimg.imgMeta.imgData[inidx] = 0; } // Kernel 函数:_initInimgKer(初始化输入图像第二步) static __global__ void _initInimgKer(CoordiSet incoordiset, int xmin, int ymin, int radius, ImageCuda inimg) { // 计算该线程在块内的相对位置。 int inidx = blockIdx.x * blockDim.x + threadIdx.x; // 若线程在块内的相对位置大于输入坐标集大小,即点个数, // 则不执行任何操作,返回。 if (inidx >= incoordiset.count) return; // 计算坐标集中每个点的横纵坐标 int x = incoordiset.tplData[2 * inidx]; int y = incoordiset.tplData[2 * inidx + 1]; // 计算在新坐标系下的每个点的横纵坐标值 x = x - xmin + radius + 1; y = y - ymin + radius + 1; // 计算坐标点对应的图像数据数组下标。 int outidx = y * inimg.pitchBytes + x; // 将图像对应点的灰度值置为255。 inimg.imgMeta.imgData[outidx] = 255; } // Device 子程序:_findRootDev (查找根节点标记值) static __device__ int _findRootDev(int label[], int idx) { // 在 label 数组中查找 idx 下标对应的最小标记值, // 并将该值作为返回值。 int nexidx; do { nexidx = idx; idx = label[nexidx]; } while (idx < nexidx); // 处理完毕,返回根节点标记值。 return idx; } // Kernel 函数:_initLabelPerBlockKer (初始化各线程块内像素点的标记值) static __global__ void _initLabelPerBlockKer( ImageCuda inimg, int label[]) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量 (其中, c 表示 column; r 表示 row)。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; int i, j, k; // 计算输入坐标点在label数组中对应的数组下标 int idx = r * inimg.imgMeta.width + c; // 计算输入坐标点对应的图像数据数组下标 int inidx = r * inimg.pitchBytes + c, newidx; // 计算应申请的 shared memory 的步长 int spitch = blockDim.x + 2; // 计算当前坐标点在 shared memory 中对应的下标 int localidx = (threadIdx.y + 1) * spitch + threadIdx.x + 1; // oldlabel 用来记录当前点未经过八邻域判断前的标记值, // newlabel 用来记录经过一轮判断后当前点的最新标记值, // 当一个点的 oldlabel 与 newlabel 一致时,当前点对应的标记值为最终标记 // 初始时,每个点的标记值设为其在 shared memory 中的对应下标 int oldlabel, newlabel = localidx; // curvalue 用来记录当前点的灰度值,newvalue 用来记录其八邻域点的灰度值 unsigned char curvalue, newvalue; curvalue = inimg.imgMeta.imgData[inidx]; // 共享内存数据区,该部分包含了存储在共享内存中的像素点的标记值。 // 由于未对 Kernel 的尺寸做出假设,这里使用动态申请的 Shared // Memory(共享内存)。 extern __shared__ int slabel[]; // 共享变量 sflag 数组用来存储是否应停止循环信息。 // 当 sflag[0] 的值为 0 时,表示块内的迭代已经完成。 __shared__ int sflag[1]; // 由于 shared memory 的大小为 (blockDim.x + 2) * (blockDim.y + 2) // 在这里将 shared memory 的边界点(即 shared memory 中超出线程块的点) // 的标记值设为无穷大。 if (threadIdx.x == 0) slabel[localidx - 1] = GET_CONTOUR_SET_INI_IFI; if (threadIdx.x == blockDim.x - 1) slabel[localidx + 1] = GET_CONTOUR_SET_INI_IFI; if (threadIdx.y == 0) { slabel[localidx - spitch] = GET_CONTOUR_SET_INI_IFI; if (threadIdx.x == 0) slabel[localidx - spitch - 1] = GET_CONTOUR_SET_INI_IFI; if (threadIdx.x == blockDim.x - 1) slabel[localidx - spitch + 1] = GET_CONTOUR_SET_INI_IFI; } if (threadIdx.y == blockDim.y - 1) { slabel[localidx + spitch] = GET_CONTOUR_SET_INI_IFI; if (threadIdx.x == 0) slabel[localidx + spitch - 1] = GET_CONTOUR_SET_INI_IFI; if (threadIdx.x == blockDim.x - 1) slabel[localidx + spitch + 1] = GET_CONTOUR_SET_INI_IFI; } while (1) { // 将当前点的标记值设为其在 shared memory 中的数组下标 slabel[localidx] = newlabel; // 将 sflag[0] 标记值设为 0 if ((threadIdx.x | threadIdx.y) == 0) sflag[0] = 0; // 初始时,将 newlabel 值赋给 oldlabel oldlabel = newlabel; __syncthreads(); // 在当前点的八邻域范围内查找与其灰度值之差的绝对值小于阈值的点, // 并将这些点的最小标记值赋予记录在 newlabel 中 for (i = r - 1; i <= r + 1; i++) { for (j = c - 1; j <= c + 1; j++) { if (j == c && i == r) continue; newidx = i * inimg.pitchBytes + j; newvalue = inimg.imgMeta.imgData[newidx]; if ((i >= 0 && i < inimg.imgMeta.height && j >= 0 && j < inimg.imgMeta.width) && (curvalue == newvalue)) { k = localidx + (i - r) * spitch + j - c; newlabel = min(newlabel, slabel[k]); } } } __syncthreads(); // 若当前点的 oldlabel 值大于 newlabel 值, // 表明当前点的标记值不是最终的标记值 // 则将 sflag[0] 值设为 1,来继续进行循环判断,并通过原子操作 // 将 newlabel 与 slabel[oldlabel] 的较小值赋予 slabel[oldlabel] if (oldlabel > newlabel) { atomicMin(&slabel[oldlabel], newlabel); sflag[0] = 1; } __syncthreads(); // 当线程块内所有像素点对应的标记值不再改变, // 即 sflag[0] 的值为 0 时,循环结束。 if (sflag[0] == 0) break; // 计算 newlabel 对应的根节点标记值,并将该值赋给 newlabel newlabel = _findRootDev(slabel, newlabel); __syncthreads(); } // 将 newlabel 的值转换为其在 label 数组中的数组下标 j = newlabel / spitch; i = newlabel % spitch; i += blockIdx.x * blockDim.x - 1; j += blockIdx.y * blockDim.y - 1; newlabel = j * inimg.imgMeta.width + i; label[idx] = newlabel; } // Device 子程序:_unionDev (合并两个不同像素点以使它们位于同一连通区域中) static __device__ void _unionDev( int label[], unsigned char elenum1, unsigned char elenum2, int label1, int label2, int *flag) { int newlabel1, newlabel2; // 比较两个输入像素点的灰度值是否满足给定的阈值范围 if (elenum1 == elenum2) { // 若两个点满足指定条件,则分别计算这两个点的根节点标记值 // 计算第一个点的根节点标记值 newlabel1 = _findRootDev(label, label1); // 计算第二个点的根节点标记值 newlabel2 = _findRootDev(label, label2); // 将较小的标记值赋值给另一点在标记数组中的值 // 并将 flag[0] 置为 1 if (newlabel1 > newlabel2) { // 使用原子操作以保证操作的唯一性与正确性 atomicMin(&label[newlabel1], newlabel2); flag[0] = 1; } else if (newlabel2 > newlabel1) { atomicMin(&label[newlabel2], newlabel1); flag[0] = 1; } } } // Kernel 函数:_mergeBordersKer(合并不同块内像素点的标记值) static __global__ void _mergeBordersKer( ImageCuda inimg, int *label, int blockw, int blockh, int threadz_x, int threadz_y) { int idx, iterateTimes, i; int x, y; int curidx, newidx; unsigned char curvalue, newvalue; // 在这里以每次合并 4 * 4 = 16 个线程块的方式合并线程块 // 分别计算待合并线程块在 GRID 中的 x 和 y 向分量 int threadidx_x = blockDim.x * blockIdx.x + threadIdx.x; int threadidx_y = blockDim.y * blockIdx.y + threadIdx.y; // 共享数组变量,只含有一个元素,每当有两个像素点合并时,该数组 // 变量值置为 1。 __shared__ int sflag[1]; while (1) { // 设置 sflag[0] 的值为 0。 if ((threadIdx.x | threadIdx.y | threadIdx.z) == 0) sflag[0] = 0; __syncthreads(); // 合并上下相邻线程块的水平方向边界点 // 由于位于 GRID 中最后一行的线程块向下没有待合并的线程块 // 因而这里不处理最后一行的线程块 if ((threadIdx.y < blockDim.y - 1)) { // 计算为了合并一行线程块的迭代次数 iterateTimes = blockw / threadz_x; // 计算待合并像素点在源图像中的像素点坐标 x = threadidx_x * blockw + threadIdx.z; y = threadidx_y * blockh + blockh - 1; // 根据迭代次数合并块内线程标记值 for (i = 0; i < iterateTimes; i++) { if (threadIdx.z < threadz_x && x < inimg.imgMeta.width && y < inimg.imgMeta.height) { idx = y * inimg.imgMeta.width + x; // 计算当前像素点灰度值 curidx = y * inimg.pitchBytes + x; curvalue = inimg.imgMeta.imgData[curidx]; // 计算位于当前像素点下方像素点的灰度值, // 其坐标值为 (x, y + 1)。 newidx = curidx + inimg.pitchBytes; newvalue = inimg.imgMeta.imgData[newidx]; // 合并这两个像素点 _unionDev(label, curvalue, newvalue, idx, idx + inimg.imgMeta.width, sflag); // 若当前像素点不为最左侧像素点时,即 x != 0 时,合并 // 位于当前像素点左下方像素点,其坐标值为 (x - 1, y + 1)。 if (x - 1 >= 0) { newidx -= 1; newvalue = inimg.imgMeta.imgData[newidx]; _unionDev(label, curvalue, newvalue, idx, idx + inimg.imgMeta.width - 1, sflag); } // 若当前像素点不为最右侧像素点时,x != inimg.imgMeta.width // 时,合并位于当前像素点右下方像素点,其坐标值为 // (x + 1, y + 1)。 if (x + 1 < inimg.imgMeta.width) { newidx += 2; newvalue = inimg.imgMeta.imgData[newidx]; _unionDev(label, curvalue, newvalue, idx, idx + inimg.imgMeta.width + 1, sflag); } } // 计算下次迭代的起始像素点的 x 坐标 x += threadz_x; } } // 合并左右相邻线程块的竖直方向边界点 // 由于位于 GRID 中最后一列的线程块向右没有待合并的线程块 // 因而这里不处理最后一列的线程块 if ((threadIdx.x < blockDim.x - 1)) { // 计算为了合并一列线程块的迭代次数 iterateTimes = blockh / threadz_y; // 计算待合并像素点在源图像中的像素点坐标, // 由于处理的是每个线程块的最右一列像素点, // 因此 x 坐标值因在原基础上加上线程块宽度 - 1 x = threadidx_x * blockw + blockw - 1; y = threadidx_y * blockh + threadIdx.z; // 根据迭代次数合并块内线程标记值 for (i = 0; i < iterateTimes; i++) { if (threadIdx.z < threadz_y && x < inimg.imgMeta.width && y < inimg.imgMeta.height) { idx = y * inimg.imgMeta.width + x; // 计算当前像素点灰度值 curidx = y * inimg.pitchBytes + x; curvalue = inimg.imgMeta.imgData[curidx]; // 计算位于当前像素点右侧像素点的灰度值, // 其坐标值为 (x + 1, y)。 newidx = curidx + 1; newvalue = inimg.imgMeta.imgData[newidx]; // 合并这两个像素点 _unionDev(label, curvalue, newvalue, idx, idx + 1, sflag); // 若当前像素点不为最上侧像素点时,即 y != 0 时,合并 // 位于当前像素点右上方像素点,其坐标值为 (x + 1, y - 1)。 if (y - 1 >= 0) { newidx -= inimg.pitchBytes; newvalue = inimg.imgMeta.imgData[newidx]; _unionDev(label, curvalue, newvalue, idx, idx - inimg.imgMeta.width + 1, sflag); } // 若当前像素点不为最下侧像素点时, // 即 y != inimg.imgMeta.height时,合并位于当前像素点 // 右下方像素点,其坐标值为(x + 1, y + 1)。 if (y + 1 < inimg.imgMeta.height) { newidx = curidx + inimg.pitchBytes + 1; newvalue = inimg.imgMeta.imgData[newidx]; _unionDev(label, curvalue, newvalue, idx, idx + inimg.imgMeta.width + 1, sflag); } } // 计算下次迭代的起始像素点的 y 坐标 y += threadz_y; } } __syncthreads(); if (sflag[0] == 0) break; } } // Kernel 函数:_findFinalLabelKer (找到每个点对应的根节点标记值) static __global__ void _findFinalLabelKer(int *label, int width, int height) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标 // 的 x 和 y 分量 (其中, c 表示 column; r 表示 row)。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= width || r >= height) return; // 计算输入坐标点在label数组中对应的数组下标 int inidx = r * width + c; // 计算当前像素点的标记值 int curlabel = label[inidx]; // 将当前像素点标记值的根节点值赋给原像素点 int newlabel = _findRootDev(label, curlabel); label[inidx] = newlabel; } // Kernel 函数:_initFlagSetKer(初始化标记值数组) static __global__ void _initFlagSetKer( ImageCuda inimg, int inflagset[], int *outflagset) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的 // 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 计算输入坐标点在图像中对应的数组下标 int inidx = r * inimg.pitchBytes + c; // 计算输入坐标点在输入数组中对应的数组下标 int flagidx = r * inimg.imgMeta.width + c; unsigned char intemp; // 读取坐标点对应的像素值 intemp = inimg.imgMeta.imgData[inidx]; if (inflagset[flagidx] != OUTER_ORIGIN_CONTOUR) inflagset[flagidx] = INNER_ORIGIN_CONTOUR; // 若当前点像素点不为 0,则将标记值数组对应位置的值置为 50。 if (intemp) { inflagset[flagidx] = DILATE_CIRCLE; } outflagset[flagidx] = inflagset[flagidx]; } // Kernel 函数:_findOuterContourKer(修改外轮廓点标记值) static __global__ void _findOuterContourKer( int inflagset[], int *outflagset, int devclassarr[], int width, int height, int *classnum) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的 // 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= width || r >= height) return; // 计算输入坐标点在输入数组中对应的数组下标 int flagidx = r * width + c, newidx; if (inflagset[flagidx] == DILATE_CIRCLE) { for (int i = 0; i < 8; i++) { newidx = (_eightNeighDev[i][1] + r) * width + _eightNeighDev[i][0] + c; if (!inflagset[newidx]) { outflagset[flagidx] = OUTER_CONTOUR; *classnum = devclassarr[flagidx]; break; } } } } // Kernel 函数:_fillOuterCircleKer(将外环内所有点标记值置为 150) static __global__ void _fillOuterCircleKer( int inflagset[], int *outflagset, int devclassarr[], int classnum[], int width, int height) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的 // 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= width || r >= height) return; // 计算输入坐标点在输入数组中对应的数组下标 int flagidx = r * width + c, newidx; // num 用来存储当前点八邻域范围内标记值为 50 的点个数 // classtotalnum 用来存储当前点八邻域范围内 class 值为 *classNum 的点个数 int num = 0, classtotalnum = 0; if (inflagset[flagidx] == DILATE_CIRCLE) { for (int i = 0; i < 8; i++) { newidx = (_eightNeighDev[i][1] + r) * width + _eightNeighDev[i][0] + c; if (inflagset[newidx] == DILATE_CIRCLE) num++; if (devclassarr[newidx] == *classnum) classtotalnum++; } } // 若当前点八邻域范围内皆为膨胀环内点时,则设置当前点为外环上点。 if (num == 8 && classtotalnum == 8) { outflagset[flagidx] = OUTER_CIRCLE; } } // Kernel 函数:_findInnerContourKer(修改内外环交界处点标记值,第一步) static __global__ void _findInnerContourKer( int outflagset[], int *devflagset, int width, int height) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的 // 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= width || r >= height) return; // 计算输入坐标点在输入数组中对应的数组下标 int flagidx = r * width + c, newidx = 0; if (outflagset[flagidx]== DILATE_CIRCLE) { for (int i = 0; i < 8; i++) { newidx = (_eightNeighDev[i][1] + r) * width + _eightNeighDev[i][0] + c; if (outflagset[newidx] == OUTER_CIRCLE || outflagset[newidx] == OUTER_CONTOUR) { devflagset[flagidx] = INNER_CONTOUR; break; } } } } // Kernel 函数:_findInnerContourSecondKer(修改内外环交界处点标记值,第二步) static __global__ void _findInnerContourSecondKer( int outflagset[], int *devflagset, int width, int height) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的 // 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= width || r >= height) return; // 计算输入坐标点在输入数组中对应的数组下标 int flagidx = r * width + c; if (devflagset[flagidx] == INNER_CONTOUR) outflagset[flagidx] = INNER_CONTOUR; } // Kernel 函数:_contourSetToimgKer(将轮廓坐标输出到图像中) static __global__ void _contourSetToimgKer( int inflagset[], ImageCuda contourimg) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的 // 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= contourimg.imgMeta.width || r >= contourimg.imgMeta.height) return; // 计算输入坐标点在图像中对应的数组下标 int inidx = r * contourimg.pitchBytes + c; // 计算输入坐标点在输入数组中对应的数组下标 int flagidx = r * contourimg.imgMeta.width + c; contourimg.imgMeta.imgData[inidx] = 0; if (inflagset[flagidx] == INNER_CONTOUR) contourimg.imgMeta.imgData[inidx] = 255; } // Kernel 函数:_innerConSetToimgKer(将内环内点坐标输出到图像中) static __global__ void _innerConSetToimgKer(int inflagset[], ImageCuda innerimg) { // 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的 // 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.y + threadIdx.y; // 检查像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= innerimg.imgMeta.width || r >= innerimg.imgMeta.height) return; // 计算输入坐标点在图像中对应的数组下标 int inidx = r * innerimg.pitchBytes + c; // 计算输入坐标点在输入数组中对应的数组下标 int flagidx = r * innerimg.imgMeta.width + c; innerimg.imgMeta.imgData[inidx] = 0; if (inflagset[flagidx] == INNER_ORIGIN_CONTOUR || inflagset[flagidx] == DILATE_CIRCLE) innerimg.imgMeta.imgData[inidx] = 255; } // Host 成员方法:findMinMaxCoordinates (计算最左最右最上最下点坐标值) // 根据输入闭曲线分别找到曲线最左,最右,最上,最下点的对应坐标值 __host__ int GetContourSet::findMinMaxCoordinates(CoordiSet *incoordiset, int *resultcs) { // 声明局部变量。 int i; int errcode = NO_ERROR; // 将 incoordiSet 坐标集拷贝至 host 端。 errcode = CoordiSetBasicOp::copyToHost(incoordiset); if (errcode != NO_ERROR) return errcode; // 初始化 x 和 y 方向上的最小最大值。 resultcs[0] = resultcs[1] = incoordiset->tplData[0]; resultcs[2] = resultcs[3] = incoordiset->tplData[1]; for (i = 1; i < incoordiset->count; i++) { // resultcs[0] 存储坐标点横坐标最大值 if (resultcs[0] > incoordiset->tplData[2 * i]) resultcs[0] = incoordiset->tplData[2 * i]; // resultcs[1] 存储坐标点横坐标最小值 if (resultcs[1] < incoordiset->tplData[2 * i]) resultcs[1] = incoordiset->tplData[2 * i]; // resultcs[2] 存储坐标点纵坐标最大值 if (resultcs[2] > incoordiset->tplData[2 * i + 1]) resultcs[2] = incoordiset->tplData[2 * i + 1]; // resultcs[3] 存储坐标点纵坐标最小值 if (resultcs[3] < incoordiset->tplData[2 * i + 1]) resultcs[3] = incoordiset->tplData[2 * i + 1]; } return errcode; } // Host 成员方法:sortContour (按序输出坐标点集) // 根据输入 inArray 按顺时针方向顺序输出有序的点集,并将结果 // 输出到一个坐标集 outcoordiset 中。 __host__ int GetContourSet::sortContour(int inarray[], CoordiSet *outcoordiset, int width, int height) { int errcode = NO_ERROR; // 局部变量,错误码 CoordiSetBasicOp::makeAtHost(outcoordiset, width * height); // bFindStartPoint 表示是否是否找到起始点及回到起始点 // bFindPoint 表示是否扫描到一个边界点 bool bFindStartPoint, bFindPoint; // startPW 和 startPH 分别表示起始点对应横坐标以及纵坐标 // curPW 和 curPH 分别表示当前点对应横坐标以及纵坐标 int startPW, startPH, curPW, curPH; // 定义扫描的八邻域方向坐标 static int direction[8][2] = { { -1, -1 }, { 0, -1 }, { 1, -1 }, { 1, 0 }, { 1, 1 }, { 0, 1 }, { -1, 1 }, { -1, 0 } }; int beginDirect; bFindStartPoint = false; int index = 0; int curvalue; // 找到最左下方的边界点 for (int j = 1; j < height - 1 && !bFindStartPoint; j++) { for (int i = 1; i < width - 1 && !bFindStartPoint; i++) { curvalue = inarray[j * width + i]; if (curvalue == INNER_CONTOUR) { bFindStartPoint = true; startPW = i; startPH = j; } } } // 由于起始点是在左下方,故起始扫描沿左上方向 beginDirect = 0; bFindStartPoint = false; curPW = startPW; curPH = startPH; while (!bFindStartPoint) { // 从起始点一直找边界,直到再次找到起始点为止 bFindPoint = false; while (!bFindPoint) { // 沿扫描方向,获取左上方向像素点灰度值 curvalue = inarray[(curPH + direction[beginDirect][1]) * width + curPW + direction[beginDirect][0]]; if (curvalue == INNER_CONTOUR) { bFindPoint = true; outcoordiset->tplData[2 * index] = curPW; outcoordiset->tplData[2 * index + 1] = curPH; index++; curPW = curPW + direction[beginDirect][0]; curPH = curPH + direction[beginDirect][1]; if (curPH == startPH && curPW == startPW) bFindStartPoint = true; // 扫描的方向逆时针旋转两格 beginDirect--; if (beginDirect == -1) beginDirect = 7; beginDirect--; if (beginDirect == -1) beginDirect = 7; } else { // 扫描的方向顺时针旋转一格 beginDirect++; if (beginDirect == 8) beginDirect = 0; } } } // 修改输出坐标集的大小为轮廓点个数,即 index outcoordiset->count = index; return errcode; } // 宏:FAIL_GET_CONTOUR_SET_FREE // 如果出错,就释放之前申请的内存。 #define FAIL_GET_CONTOUR_SET_FREE do { \ if (alldatadev != NULL) \ cudaFree(alldatadev); \ if (resultcs != NULL) \ delete [] resultcs; \ if (tmpimg != NULL) \ ImageBasicOp::deleteImage(tmpimg); \ if (outimg != NULL) \ ImageBasicOp::deleteImage(outimg); \ if (coorForTorus != NULL) \ CoordiSetBasicOp::deleteCoordiSet(coorForTorus); \ if (torusClass != NULL) \ delete [] torusClass; \ if (classArr != NULL) \ delete [] classArr; \ } while (0) // Host 成员方法:getContourSet(执行有连接性的封闭轮廓的获得算法) // 根据输入输入闭曲线坐标点集,生成内环内所有点坐标点集到 innerCoordiset 中, // 生成内外环交界处坐标点集并输出到 contourCoordiset 中。 __host__ int GetContourSet::getContourSet(CoordiSet *incoordiset, CoordiSet *innercoordiset, CoordiSet *contourcoordiset) { // 检查输入输出坐标集是否为 NULL,如果为 NULL 直接报错返回。 if (incoordiset == NULL || innercoordiset == NULL || contourcoordiset == NULL) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 int *resultcs; // 声明需要的指针变量。 int *alldatadev = NULL; // 声明 Device 端需要的变量。 int *devFlagset = NULL, *outFlagset = NULL; int *devNum = NULL, *devClassArr = NULL; // 声明需要使用的中间图像 tmpimg 和 outimg Image *tmpimg, *outimg; // 声明调用二分类方法需要使用的临时变量, // 分别为 coorForTorus,torusClass 以及 classArr。 CoordiSet *coorForTorus; unsigned char *torusClass = NULL; int *classArr = NULL; // 为 resultcs 分配大小,用来存储输入闭曲线的最左最右最上最下点的坐标值。 resultcs = new int[4]; if (resultcs == NULL) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return OUT_OF_MEM; } // 在 host 端分配 tmpimg 以及 outimg errcode = ImageBasicOp::newImage(&tmpimg); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } errcode = ImageBasicOp::newImage(&outimg); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } // 分别找到输入闭曲线的最左最右最上最下点的坐标值, // 并将其输出到一个大小为 4 的数组中。 errcode = GetContourSet::findMinMaxCoordinates(incoordiset, resultcs); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } // 计算应生成图像的长和宽 int width = resultcs[1] - resultcs[0] + 2 * radiusForCurve + 3; int height = resultcs[3] - resultcs[2] + 2 * radiusForCurve + 3; // 将图像拷贝到 Device 内存中。 errcode = ImageBasicOp::makeAtCurrentDevice(tmpimg, width, height); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } // 将图像拷贝到 Host 内存中。 errcode = ImageBasicOp::makeAtHost(outimg, width, height); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } // 一次性申请 Device 端需要的所有空间。 errcode = cudaMalloc((void **)&alldatadev, (1 + 3 * width * height) * sizeof (int)); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return CUDA_ERROR; } // 初始化 GetContourSet 累加器在 Device 上的内存空间。 errcode = cudaMemset(alldatadev, 0, (1 + 3 * width * height) * sizeof (int)); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return CUDA_ERROR; } // 通过偏移得到各指针的地址。 devNum = alldatadev; devFlagset = alldatadev + 1; outFlagset = alldatadev + 1 + width * height; devClassArr = alldatadev + 1 + 2 * width * height; // 将 incoordiset 拷贝到 Device 内存中。 errcode = CoordiSetBasicOp::copyToCurrentDevice(incoordiset); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } // 提取图像的 ROI 子图像。 ImageCuda tmpsubimgCud; errcode = ImageBasicOp::roiSubImage(tmpimg, &tmpsubimgCud); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } tmpsubimgCud.imgMeta.width = width; tmpsubimgCud.imgMeta.height = height; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 grid, block; block.x = DEF_BLOCK_X; block.y = DEF_BLOCK_Y; block.z = 1; grid.x = (width + block.x - 1) / block.x; grid.y = (height + block.y - 1) / block.y; grid.z = 1; _imginitKer<<<grid, block>>>(tmpsubimgCud); if (cudaGetLastError() != cudaSuccess) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return CUDA_ERROR; } // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = 1; blocksize.z = 1; gridsize.x = (incoordiset->count + blocksize.x - 1) / blocksize.x; gridsize.y = 1; gridsize.z = 1; _initInimgKer<<<gridsize, blocksize>>>(*incoordiset, resultcs[0], resultcs[2], radiusForCurve, tmpsubimgCud); if (cudaGetLastError() != cudaSuccess) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return CUDA_ERROR; } // 将 tmpimg 拷贝至 host 端,便于执行后续的膨胀操作。 errcode = ImageBasicOp::copyToHost(tmpimg); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } // 调用已封装好的膨胀操作 morForCurve.dilate(tmpimg, outimg); // 声明 ImgConvert 类变量,进行图像转坐标集调用。 ImgConvert imgconForCurve; CoordiSetBasicOp::newCoordiSet(&coorForTorus); // 根据输入图像生成坐标集 coorForTorus errcode = imgconForCurve.imgConvertToCst(outimg, coorForTorus); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } // 为 torusClass 分配大小,用以存储生成的 class 数组。 torusClass = new unsigned char[coorForTorus->count]; if (torusClass == NULL) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return OUT_OF_MEM; } // 调用 TorusSegmentation 的 torusSegregate 操作,生成内外环 errcode = tsForCurve.torusSegregate(width, height, coorForTorus, torusClass); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } errcode = CoordiSetBasicOp::copyToHost(coorForTorus); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } // 为 classArr 分配大小,大小为 width * height 的整型数组, // 用以保存 torusClass 对应于图像索引的值 //(由于 torusClass 的大小为 coorForTorus->count) classArr = new int[width * height]; if (classArr == NULL) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return OUT_OF_MEM; } int x, y; for (int i = 0; i < coorForTorus->count; i++) { // 计算输入坐标集对应的横坐标与纵坐标 x = coorForTorus->tplData[2 * i]; y = coorForTorus->tplData[2 * i + 1]; // 将 torusClass 中的值赋值给 classArr 数组 classArr[y * width + x] = (int)torusClass[i]; } // 将 classArr 数组的值拷贝至 Device 端,便于进行 Device 端处理。 errcode = cudaMemcpy(devClassArr, classArr, width * height * sizeof (int), cudaMemcpyHostToDevice); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return CUDA_ERROR; } // 将膨胀生成的图像 outimg 拷贝至 Device 端,便于进行 Device 端处理。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } // 提取生成图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } outsubimgCud.imgMeta.width = width; outsubimgCud.imgMeta.height = height; // 计算初始化块内内存时,共享内存的大小。 int smsize = sizeof (int) * (block.x + 2) * (block.y + 2); // 调用核函数,初始化每个线程块内标记值 _initLabelPerBlockKer<<<grid, block, smsize>>>( outsubimgCud, devFlagset); if (cudaGetLastError() != cudaSuccess) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return CUDA_ERROR; } // 合并线程块时每次合并线程块的长、宽和高 int blockw, blockh, blockz; // 计算第一次合并时,应合并线程块的长、宽和高 // 第一次合并时,应合并线程块的长应为初始线程块长,宽为初始线程块宽 blockw = block.x; blockh = block.y; // 由于这里采用的是 3 维线程块,线程块的高设为初始线程块长和宽的较大者。 blockz = blockw; if (blockw < blockh) blockz = blockh; // 计算每次合并的线程块个数,在这里我们采用的是每次合并 4 × 4 的线程块, // 由于采用这种方式合并所需的迭代次数最少。 int xtiles = 4, ytiles = 4; // 计算合并线程块前 GRID 的长 int tilesizex = grid.x; // 计算合并线程块前 GRID 的宽 int tilesizey = grid.y; // 定义为进行线程块合并而采用的线程块与网格。 dim3 mrgblocksize, mrggridsize; // 由于每个线程块的大小限制为 1024,而 tilesizex * tilesizey * blockz 的值 // 为每次用来进行合并操作的三维线程块的最大大小,因此当该值不大于 1024 时, // 可将所有线程块放在一个三维线程块中合并,这样,我们就可以以该值是否 // 不大于 1024 来作为是否终止循环的判断条件。 while (tilesizex * tilesizey * blockz > 1024) { // 计算每次合并线程块时 GRID 的长,这里采用向上取整的方式 tilesizex = (tilesizex - 1) / xtiles + 1; // 计算每次合并线程块时 GRID 的宽,这里采用向上取整的方式 tilesizey = (tilesizey - 1) / ytiles + 1; // 设置为了合并而采用的三维线程块大小,这里采用的是 4 × 4 的方式, // 因此线程块的长为 4,宽也为 4,高则为 32。 mrgblocksize.x = xtiles; mrgblocksize.y = ytiles; mrgblocksize.z = blockz; // 设置为了合并而采用的二维网格的大小。 mrggridsize.x = tilesizex; mrggridsize.y = tilesizey; mrggridsize.z = 1; // 调用核函数,每次合并4 × 4 个线程块内的标记值 _mergeBordersKer<<<mrggridsize, mrgblocksize>>>( outsubimgCud, devFlagset, blockw, blockh, block.x, block.y); if (cudaGetLastError() != cudaSuccess) { FAIL_GET_CONTOUR_SET_FREE; return CUDA_ERROR; } // 在每次迭代后,修改应合并线程块的长和宽,因为每次合并 4 * 4 个线程块, // 因此,经过迭代后,应合并线程块的长和宽应分别乘 4。 blockw *= xtiles; blockh *= ytiles; } // 进行最后一轮线程块的合并 // 计算该轮应采用的三维线程块大小 mrgblocksize.x = tilesizex; mrgblocksize.y = tilesizey; mrgblocksize.z = blockz; // 设置该论应采用的网格大小,长宽高分别为1。 mrggridsize.x = 1; mrggridsize.y = 1;mrggridsize.z = 1; // 调用核函数,进行最后一轮线程块合并 _mergeBordersKer<<<mrggridsize, mrgblocksize>>>( outsubimgCud, devFlagset, blockw, blockh, block.x, block.y); if (cudaGetLastError() != cudaSuccess) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return CUDA_ERROR; } // 调用核函数,即找出每个结点对应的标记值, // 其中根节点的标记值与其自身在数组中的索引值一致 _findFinalLabelKer<<<grid, block>>>(devFlagset, width, height); if (cudaGetLastError() != cudaSuccess) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return CUDA_ERROR; } // 调用核函数,初始化标记值数组,将环外点置为 0,环内点置为 10 _initFlagSetKer<<<grid, block>>>(outsubimgCud, devFlagset, outFlagset); if (cudaGetLastError() != cudaSuccess) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return CUDA_ERROR; } // 调用核函数,将外环轮廓点标记值置为 100, // 同时将点的 class 值存入 classNum 变量中。 _findOuterContourKer<<<grid, block>>>(devFlagset, outFlagset, devClassArr,width, height, devNum); if (cudaGetLastError() != cudaSuccess) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return CUDA_ERROR; } // 调用核函数,将外环上点标记值置为 150 _fillOuterCircleKer<<<grid, block>>>(devFlagset, outFlagset, devClassArr, devNum, width, height); if (cudaGetLastError() != cudaSuccess) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return CUDA_ERROR; } // 调用核函数,将内外环交界处点标记值置为 200,第一步 _findInnerContourKer<<<grid, block>>>(outFlagset, devFlagset, width, height); if (cudaGetLastError() != cudaSuccess) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return CUDA_ERROR; } // 调用核函数,将内外环交界处点标记值置为 200,第二步 _findInnerContourSecondKer<<<grid, block>>>(outFlagset, devFlagset, width, height); if (cudaGetLastError() != cudaSuccess) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return CUDA_ERROR; } // 调用核函数,将内环内点坐标输出到图像中。 _innerConSetToimgKer<<<grid, block>>>(outFlagset, outsubimgCud); if (cudaGetLastError() != cudaSuccess) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return CUDA_ERROR; } errcode = ImageBasicOp::copyToHost(outimg); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } // 调用已封装好的图像转坐标集函数,生成内环内点坐标集。 errcode = imgconForCurve.clearAllConvertFlags(); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } errcode = imgconForCurve.setConvertFlag(255); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } errcode = imgconForCurve.imgConvertToCst(outimg, innercoordiset); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } errcode = CoordiSetBasicOp::copyToHost(innercoordiset); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } // 当 inorder 参数表示为有序时,调用 host 端 SortContour 函数输出数组, // 否则调用 reindex 实现乱序输出。 if (inorder) { int *hostFlagset = new int[width * height]; if (hostFlagset == NULL) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return OUT_OF_MEM; } errcode = cudaMemcpy(hostFlagset, outFlagset, width * height * sizeof (int), cudaMemcpyDeviceToHost); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return CUDA_ERROR; } errcode = sortContour(hostFlagset, contourcoordiset, width, height); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; delete [] hostFlagset; return errcode; } if (hostFlagset != NULL) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; delete [] hostFlagset; return errcode; } } else { ImageBasicOp::copyToCurrentDevice(outimg); // 调用核函数,将轮廓点坐标输出到图像中。 _contourSetToimgKer<<<grid, block>>>(outFlagset, outsubimgCud); if (cudaGetLastError() != cudaSuccess) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return CUDA_ERROR; } errcode = ImageBasicOp::copyToHost(outimg); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } errcode = imgconForCurve.clearAllConvertFlags(); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } errcode = imgconForCurve.setConvertFlag(255); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } // 调用已封装好的图像转坐标集函数,生成内环内点坐标集。 errcode = imgconForCurve.imgConvertToCst(outimg, contourcoordiset); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } errcode = CoordiSetBasicOp::copyToHost(contourcoordiset); if (errcode != NO_ERROR) { // 释放内存空间。 FAIL_GET_CONTOUR_SET_FREE; return errcode; } } // 释放内存 FAIL_GET_CONTOUR_SET_FREE; // 处理完毕,退出。 return NO_ERROR; }
the_stack
namespace dgl { namespace transform { namespace impl { /*! * \brief Utility class used to avoid linker errors with extern * unsized shared memory arrays with templated type */ template <typename Type> struct SharedMemory { __device__ inline operator Type* () { extern __shared__ int __smem[]; return reinterpret_cast<Type*>(__smem); } __device__ inline operator const Type* () const { extern __shared__ int __smem[]; return reinterpret_cast<Type*>(__smem); } }; // specialize for double to avoid unaligned memory // access compile errors template <> struct SharedMemory<double> { __device__ inline operator double* () { extern __shared__ double __smem_d[]; return reinterpret_cast<double*>(__smem_d); } __device__ inline operator const double* () const { extern __shared__ double __smem_d[]; return reinterpret_cast<double*>(__smem_d); } }; /*! \brief Compute Euclidean distance between two vectors in a cuda kernel */ template <typename FloatType, typename IdType> __device__ FloatType EuclideanDist(const FloatType* vec1, const FloatType* vec2, const int64_t dim) { FloatType dist = 0; IdType idx = 0; for (; idx < dim - 3; idx += 4) { FloatType diff0 = vec1[idx] - vec2[idx]; FloatType diff1 = vec1[idx + 1] - vec2[idx + 1]; FloatType diff2 = vec1[idx + 2] - vec2[idx + 2]; FloatType diff3 = vec1[idx + 3] - vec2[idx + 3]; dist += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3; } for (; idx < dim; ++idx) { FloatType diff = vec1[idx] - vec2[idx]; dist += diff * diff; } return dist; } /*! * \brief Compute Euclidean distance between two vectors in a cuda kernel, * return positive infinite value if the intermediate distance is greater * than the worst distance. */ template <typename FloatType, typename IdType> __device__ FloatType EuclideanDistWithCheck(const FloatType* vec1, const FloatType* vec2, const int64_t dim, const FloatType worst_dist) { FloatType dist = 0; IdType idx = 0; bool early_stop = false; for (; idx < dim - 3; idx += 4) { FloatType diff0 = vec1[idx] - vec2[idx]; FloatType diff1 = vec1[idx + 1] - vec2[idx + 1]; FloatType diff2 = vec1[idx + 2] - vec2[idx + 2]; FloatType diff3 = vec1[idx + 3] - vec2[idx + 3]; dist += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3; if (dist > worst_dist) { early_stop = true; idx = dim; break; } } for (; idx < dim; ++idx) { FloatType diff = vec1[idx] - vec2[idx]; dist += diff * diff; if (dist > worst_dist) { early_stop = true; break; } } if (early_stop) { return std::numeric_limits<FloatType>::max(); } else { return dist; } } template <typename FloatType, typename IdType> __device__ void BuildHeap(IdType* indices, FloatType* dists, int size) { for (int i = size / 2 - 1; i >= 0; --i) { IdType idx = i; while (true) { IdType largest = idx; IdType left = idx * 2 + 1; IdType right = left + 1; if (left < size && dists[left] > dists[largest]) { largest = left; } if (right < size && dists[right] > dists[largest]) { largest = right; } if (largest != idx) { IdType tmp_idx = indices[largest]; indices[largest] = indices[idx]; indices[idx] = tmp_idx; FloatType tmp_dist = dists[largest]; dists[largest] = dists[idx]; dists[idx] = tmp_dist; idx = largest; } else { break; } } } } template <typename FloatType, typename IdType> __device__ void HeapInsert(IdType* indices, FloatType* dist, IdType new_idx, FloatType new_dist, int size, bool check_repeat = false) { if (new_dist > dist[0]) return; // check if we have it if (check_repeat) { for (IdType i = 0; i < size; ++i) { if (indices[i] == new_idx) return; } } IdType left = 0, right = 0, idx = 0, largest = 0; dist[0] = new_dist; indices[0] = new_idx; while (true) { left = idx * 2 + 1; right = left + 1; if (left < size && dist[left] > dist[largest]) { largest = left; } if (right < size && dist[right] > dist[largest]) { largest = right; } if (largest != idx) { IdType tmp_idx = indices[idx]; indices[idx] = indices[largest]; indices[largest] = tmp_idx; FloatType tmp_dist = dist[idx]; dist[idx] = dist[largest]; dist[largest] = tmp_dist; idx = largest; } else { break; } } } template <typename FloatType, typename IdType> __device__ bool FlaggedHeapInsert(IdType* indices, FloatType* dist, bool* flags, IdType new_idx, FloatType new_dist, bool new_flag, int size, bool check_repeat = false) { if (new_dist > dist[0]) return false; // check if we have it if (check_repeat) { for (IdType i = 0; i < size; ++i) { if (indices[i] == new_idx) return false; } } IdType left = 0, right = 0, idx = 0, largest = 0; dist[0] = new_dist; indices[0] = new_idx; flags[0] = new_flag; while (true) { left = idx * 2 + 1; right = left + 1; if (left < size && dist[left] > dist[largest]) { largest = left; } if (right < size && dist[right] > dist[largest]) { largest = right; } if (largest != idx) { IdType tmp_idx = indices[idx]; indices[idx] = indices[largest]; indices[largest] = tmp_idx; FloatType tmp_dist = dist[idx]; dist[idx] = dist[largest]; dist[largest] = tmp_dist; bool tmp_flag = flags[idx]; flags[idx] = flags[largest]; flags[largest] = tmp_flag; idx = largest; } else { break; } } return true; } /*! * \brief Brute force kNN kernel. Compute distance for each pair of input points and get * the result directly (without a distance matrix). */ template <typename FloatType, typename IdType> __global__ void BruteforceKnnKernel(const FloatType* data_points, const IdType* data_offsets, const FloatType* query_points, const IdType* query_offsets, const int k, FloatType* dists, IdType* query_out, IdType* data_out, const int64_t num_batches, const int64_t feature_size) { const IdType q_idx = blockIdx.x * blockDim.x + threadIdx.x; if (q_idx >= query_offsets[num_batches]) return; IdType batch_idx = 0; for (IdType b = 0; b < num_batches + 1; ++b) { if (query_offsets[b] > q_idx) { batch_idx = b - 1; break; } } const IdType data_start = data_offsets[batch_idx], data_end = data_offsets[batch_idx + 1]; for (IdType k_idx = 0; k_idx < k; ++k_idx) { query_out[q_idx * k + k_idx] = q_idx; dists[q_idx * k + k_idx] = std::numeric_limits<FloatType>::max(); } FloatType worst_dist = std::numeric_limits<FloatType>::max(); for (IdType d_idx = data_start; d_idx < data_end; ++d_idx) { FloatType tmp_dist = EuclideanDistWithCheck<FloatType, IdType>( query_points + q_idx * feature_size, data_points + d_idx * feature_size, feature_size, worst_dist); IdType out_offset = q_idx * k; HeapInsert<FloatType, IdType>(data_out + out_offset, dists + out_offset, d_idx, tmp_dist, k); worst_dist = dists[q_idx * k]; } } /*! * \brief Same as BruteforceKnnKernel, but use shared memory as buffer. * This kernel divides query points and data points into blocks. For each * query block, it will make a loop over all data blocks and compute distances. * This kernel is faster when the dimension of input points is not large. */ template <typename FloatType, typename IdType> __global__ void BruteforceKnnShareKernel(const FloatType* data_points, const IdType* data_offsets, const FloatType* query_points, const IdType* query_offsets, const IdType* block_batch_id, const IdType* local_block_id, const int k, FloatType* dists, IdType* query_out, IdType* data_out, const int64_t num_batches, const int64_t feature_size) { const IdType block_idx = static_cast<IdType>(blockIdx.x); const IdType block_size = static_cast<IdType>(blockDim.x); const IdType batch_idx = block_batch_id[block_idx]; const IdType local_bid = local_block_id[block_idx]; const IdType query_start = query_offsets[batch_idx] + block_size * local_bid; const IdType query_end = min(query_start + block_size, query_offsets[batch_idx + 1]); if (query_start >= query_end) return; const IdType query_idx = query_start + threadIdx.x; const IdType data_start = data_offsets[batch_idx]; const IdType data_end = data_offsets[batch_idx + 1]; // shared memory: points in block + distance buffer + result buffer FloatType* data_buff = SharedMemory<FloatType>(); FloatType* query_buff = data_buff + block_size * feature_size; FloatType* dist_buff = query_buff + block_size * feature_size; IdType* res_buff = reinterpret_cast<IdType*>(dist_buff + block_size * k); FloatType worst_dist = std::numeric_limits<FloatType>::max(); // initialize dist buff with inf value for (auto i = 0; i < k; ++i) { dist_buff[threadIdx.x * k + i] = std::numeric_limits<FloatType>::max(); } // load query data to shared memory if (query_idx < query_end) { for (auto i = 0; i < feature_size; ++i) { // to avoid bank conflict, we use transpose here query_buff[threadIdx.x + i * block_size] = query_points[query_idx * feature_size + i]; } } // perform computation on each tile for (auto tile_start = data_start; tile_start < data_end; tile_start += block_size) { // each thread load one data point into the shared memory IdType load_idx = tile_start + threadIdx.x; if (load_idx < data_end) { for (auto i = 0; i < feature_size; ++i) { data_buff[threadIdx.x * feature_size + i] = data_points[load_idx * feature_size + i]; } } __syncthreads(); // compute distance for one tile IdType true_block_size = min(data_end - tile_start, block_size); if (query_idx < query_end) { for (IdType d_idx = 0; d_idx < true_block_size; ++d_idx) { FloatType tmp_dist = 0; bool early_stop = false; IdType dim_idx = 0; for (; dim_idx < feature_size - 3; dim_idx += 4) { FloatType diff0 = query_buff[threadIdx.x + block_size * (dim_idx)] - data_buff[d_idx * feature_size + dim_idx]; FloatType diff1 = query_buff[threadIdx.x + block_size * (dim_idx + 1)] - data_buff[d_idx * feature_size + dim_idx + 1]; FloatType diff2 = query_buff[threadIdx.x + block_size * (dim_idx + 2)] - data_buff[d_idx * feature_size + dim_idx + 2]; FloatType diff3 = query_buff[threadIdx.x + block_size * (dim_idx + 3)] - data_buff[d_idx * feature_size + dim_idx + 3]; tmp_dist += diff0 * diff0 + diff1 * diff1 + diff2 * diff2 + diff3 * diff3; if (tmp_dist > worst_dist) { early_stop = true; dim_idx = feature_size; break; } } for (; dim_idx < feature_size; ++dim_idx) { const FloatType diff = query_buff[threadIdx.x + dim_idx * block_size] - data_buff[d_idx * feature_size + dim_idx]; tmp_dist += diff * diff; if (tmp_dist > worst_dist) { early_stop = true; break; } } if (early_stop) continue; HeapInsert<FloatType, IdType>( res_buff + threadIdx.x * k, dist_buff + threadIdx.x * k, d_idx + tile_start, tmp_dist, k); worst_dist = dist_buff[threadIdx.x * k]; } } } // copy result to global memory if (query_idx < query_end) { for (auto i = 0; i < k; ++i) { dists[query_idx * k + i] = dist_buff[threadIdx.x * k + i]; data_out[query_idx * k + i] = res_buff[threadIdx.x * k + i]; query_out[query_idx * k + i] = query_idx; } } } /*! \brief determine the number of blocks for each segment */ template <typename IdType> __global__ void GetNumBlockPerSegment(const IdType* offsets, IdType* out, const int64_t batch_size, const int64_t block_size) { const IdType idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < batch_size) { out[idx] = (offsets[idx + 1] - offsets[idx] - 1) / block_size + 1; } } /*! \brief Get the batch index and local index in segment for each block */ template <typename IdType> __global__ void GetBlockInfo(const IdType* num_block_prefixsum, IdType* block_batch_id, IdType* local_block_id, size_t batch_size, size_t num_blocks) { const IdType idx = blockIdx.x * blockDim.x + threadIdx.x; IdType i = 0; if (idx < num_blocks) { for (; i < batch_size; ++i) { if (num_block_prefixsum[i] > idx) break; } i--; block_batch_id[idx] = i; local_block_id[idx] = idx - num_block_prefixsum[i]; } } /*! * \brief Brute force kNN. Compute distance for each pair of input points and get * the result directly (without a distance matrix). * * \tparam FloatType The type of input points. * \tparam IdType The type of id. * \param data_points NDArray of dataset points. * \param data_offsets offsets of point index in data points. * \param query_points NDArray of query points * \param query_offsets offsets of point index in query points. * \param k the number of nearest points * \param result output array */ template <typename FloatType, typename IdType> void BruteForceKNNCuda(const NDArray& data_points, const IdArray& data_offsets, const NDArray& query_points, const IdArray& query_offsets, const int k, IdArray result) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const auto& ctx = data_points->ctx; auto device = runtime::DeviceAPI::Get(ctx); const int64_t batch_size = data_offsets->shape[0] - 1; const int64_t feature_size = data_points->shape[1]; const IdType* data_offsets_data = data_offsets.Ptr<IdType>(); const IdType* query_offsets_data = query_offsets.Ptr<IdType>(); const FloatType* data_points_data = data_points.Ptr<FloatType>(); const FloatType* query_points_data = query_points.Ptr<FloatType>(); IdType* query_out = result.Ptr<IdType>(); IdType* data_out = query_out + k * query_points->shape[0]; FloatType* dists = static_cast<FloatType*>(device->AllocWorkspace( ctx, k * query_points->shape[0] * sizeof(FloatType))); const int64_t block_size = cuda::FindNumThreads(query_points->shape[0]); const int64_t num_blocks = (query_points->shape[0] - 1) / block_size + 1; CUDA_KERNEL_CALL(BruteforceKnnKernel, num_blocks, block_size, 0, thr_entry->stream, data_points_data, data_offsets_data, query_points_data, query_offsets_data, k, dists, query_out, data_out, batch_size, feature_size); device->FreeWorkspace(ctx, dists); } /*! * \brief Brute force kNN with shared memory. * This function divides query points and data points into blocks. For each * query block, it will make a loop over all data blocks and compute distances. * It will be faster when the dimension of input points is not large. * * \tparam FloatType The type of input points. * \tparam IdType The type of id. * \param data_points NDArray of dataset points. * \param data_offsets offsets of point index in data points. * \param query_points NDArray of query points * \param query_offsets offsets of point index in query points. * \param k the number of nearest points * \param result output array */ template <typename FloatType, typename IdType> void BruteForceKNNSharedCuda(const NDArray& data_points, const IdArray& data_offsets, const NDArray& query_points, const IdArray& query_offsets, const int k, IdArray result) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const auto& ctx = data_points->ctx; auto device = runtime::DeviceAPI::Get(ctx); const int64_t batch_size = data_offsets->shape[0] - 1; const int64_t feature_size = data_points->shape[1]; const IdType* data_offsets_data = data_offsets.Ptr<IdType>(); const IdType* query_offsets_data = query_offsets.Ptr<IdType>(); const FloatType* data_points_data = data_points.Ptr<FloatType>(); const FloatType* query_points_data = query_points.Ptr<FloatType>(); IdType* query_out = result.Ptr<IdType>(); IdType* data_out = query_out + k * query_points->shape[0]; // get max shared memory per block in bytes // determine block size according to this value int max_sharedmem_per_block = 0; CUDA_CALL(cudaDeviceGetAttribute( &max_sharedmem_per_block, cudaDevAttrMaxSharedMemoryPerBlock, ctx.device_id)); const int64_t single_shared_mem = (k + 2 * feature_size) * sizeof(FloatType) + k * sizeof(IdType); const int64_t block_size = cuda::FindNumThreads(max_sharedmem_per_block / single_shared_mem); // Determine the number of blocks. We first get the number of blocks for each // segment. Then we get the block id offset via prefix sum. IdType* num_block_per_segment = static_cast<IdType*>( device->AllocWorkspace(ctx, batch_size * sizeof(IdType))); IdType* num_block_prefixsum = static_cast<IdType*>( device->AllocWorkspace(ctx, batch_size * sizeof(IdType))); // block size for GetNumBlockPerSegment computation int64_t temp_block_size = cuda::FindNumThreads(batch_size); int64_t temp_num_blocks = (batch_size - 1) / temp_block_size + 1; CUDA_KERNEL_CALL(GetNumBlockPerSegment, temp_num_blocks, temp_block_size, 0, thr_entry->stream, query_offsets_data, num_block_per_segment, batch_size, block_size); size_t prefix_temp_size = 0; CUDA_CALL(cub::DeviceScan::ExclusiveSum( nullptr, prefix_temp_size, num_block_per_segment, num_block_prefixsum, batch_size)); void* prefix_temp = device->AllocWorkspace(ctx, prefix_temp_size); CUDA_CALL(cub::DeviceScan::ExclusiveSum( prefix_temp, prefix_temp_size, num_block_per_segment, num_block_prefixsum, batch_size, thr_entry->stream)); device->FreeWorkspace(ctx, prefix_temp); int64_t num_blocks = 0, final_elem = 0, copyoffset = (batch_size - 1) * sizeof(IdType); device->CopyDataFromTo( num_block_prefixsum, copyoffset, &num_blocks, 0, sizeof(IdType), ctx, DLContext{kDLCPU, 0}, query_offsets->dtype, thr_entry->stream); device->CopyDataFromTo( num_block_per_segment, copyoffset, &final_elem, 0, sizeof(IdType), ctx, DLContext{kDLCPU, 0}, query_offsets->dtype, thr_entry->stream); num_blocks += final_elem; device->FreeWorkspace(ctx, num_block_per_segment); device->FreeWorkspace(ctx, num_block_prefixsum); // get batch id and local id in segment temp_block_size = cuda::FindNumThreads(num_blocks); temp_num_blocks = (num_blocks - 1) / temp_block_size + 1; IdType* block_batch_id = static_cast<IdType*>(device->AllocWorkspace( ctx, num_blocks * sizeof(IdType))); IdType* local_block_id = static_cast<IdType*>(device->AllocWorkspace( ctx, num_blocks * sizeof(IdType))); CUDA_KERNEL_CALL( GetBlockInfo, temp_num_blocks, temp_block_size, 0, thr_entry->stream, num_block_prefixsum, block_batch_id, local_block_id, batch_size, num_blocks); FloatType* dists = static_cast<FloatType*>(device->AllocWorkspace( ctx, k * query_points->shape[0] * sizeof(FloatType))); CUDA_KERNEL_CALL(BruteforceKnnShareKernel, num_blocks, block_size, single_shared_mem * block_size, thr_entry->stream, data_points_data, data_offsets_data, query_points_data, query_offsets_data, block_batch_id, local_block_id, k, dists, query_out, data_out, batch_size, feature_size); device->FreeWorkspace(ctx, dists); device->FreeWorkspace(ctx, local_block_id); device->FreeWorkspace(ctx, block_batch_id); } /*! \brief Setup rng state for nn-descent */ __global__ void SetupRngKernel(curandState* states, const uint64_t seed, const size_t n) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { curand_init(seed, id, 0, states + id); } } /*! * \brief Randomly initialize neighbors (sampling without replacement) * for each nodes */ template <typename FloatType, typename IdType> __global__ void RandomInitNeighborsKernel(const FloatType* points, const IdType* offsets, IdType* central_nodes, IdType* neighbors, FloatType* dists, bool* flags, const int k, const int64_t feature_size, const int64_t batch_size, const uint64_t seed) { const IdType point_idx = blockIdx.x * blockDim.x + threadIdx.x; IdType batch_idx = 0; if (point_idx >= offsets[batch_size]) return; curandState state; curand_init(seed, point_idx, 0, &state); // find the segment location in the input batch for (IdType b = 0; b < batch_size + 1; ++b) { if (offsets[b] > point_idx) { batch_idx = b - 1; break; } } const IdType segment_size = offsets[batch_idx + 1] - offsets[batch_idx]; IdType* current_neighbors = neighbors + point_idx * k; IdType* current_central_nodes = central_nodes + point_idx * k; bool* current_flags = flags + point_idx * k; FloatType* current_dists = dists + point_idx * k; IdType segment_start = offsets[batch_idx]; // reservoir sampling for (IdType i = 0; i < k; ++i) { current_neighbors[i] = i + segment_start; current_central_nodes[i] = point_idx; } for (IdType i = k; i < segment_size; ++i) { const IdType j = static_cast<IdType>(curand(&state) % (i + 1)); if (j < k) current_neighbors[j] = i + segment_start; } // compute distances and set flags for (IdType i = 0; i < k; ++i) { current_flags[i] = true; current_dists[i] = EuclideanDist<FloatType, IdType>( points + point_idx * feature_size, points + current_neighbors[i] * feature_size, feature_size); } // build heap BuildHeap<FloatType, IdType>(neighbors + point_idx * k, current_dists, k); } /*! \brief Randomly select candidates from current knn and reverse-knn graph for nn-descent */ template <typename IdType> __global__ void FindCandidatesKernel(const IdType* offsets, IdType* new_candidates, IdType* old_candidates, IdType* neighbors, bool* flags, const uint64_t seed, const int64_t batch_size, const int num_candidates, const int k) { const IdType point_idx = blockIdx.x * blockDim.x + threadIdx.x; IdType batch_idx = 0; if (point_idx >= offsets[batch_size]) return; curandState state; curand_init(seed, point_idx, 0, &state); // find the segment location in the input batch for (IdType b = 0; b < batch_size + 1; ++b) { if (offsets[b] > point_idx) { batch_idx = b - 1; break; } } IdType segment_start = offsets[batch_idx], segment_end = offsets[batch_idx + 1]; IdType* current_neighbors = neighbors + point_idx * k; bool* current_flags = flags + point_idx * k; // reset candidates IdType* new_candidates_ptr = new_candidates + point_idx * (num_candidates + 1); IdType* old_candidates_ptr = old_candidates + point_idx * (num_candidates + 1); new_candidates_ptr[0] = 0; old_candidates_ptr[0] = 0; // select candidates from current knn graph // here we use candidate[0] for reservoir sampling temporarily for (IdType i = 0; i < k; ++i) { IdType candidate = current_neighbors[i]; IdType* candidate_array = current_flags[i] ? new_candidates_ptr : old_candidates_ptr; IdType curr_num = candidate_array[0]; IdType* candidate_data = candidate_array + 1; // reservoir sampling if (curr_num < num_candidates) { candidate_data[curr_num] = candidate; } else { IdType pos = static_cast<IdType>(curand(&state) % (curr_num + 1)); if (pos < num_candidates) candidate_data[pos] = candidate; } ++candidate_array[0]; } // select candidates from current reverse knn graph // here we use candidate[0] for reservoir sampling temporarily IdType index_start = segment_start * k, index_end = segment_end * k; for (IdType i = index_start; i < index_end; ++i) { if (neighbors[i] == point_idx) { IdType reverse_candidate = (i - index_start) / k + segment_start; IdType* candidate_array = flags[i] ? new_candidates_ptr : old_candidates_ptr; IdType curr_num = candidate_array[0]; IdType* candidate_data = candidate_array + 1; // reservoir sampling if (curr_num < num_candidates) { candidate_data[curr_num] = reverse_candidate; } else { IdType pos = static_cast<IdType>(curand(&state) % (curr_num + 1)); if (pos < num_candidates) candidate_data[pos] = reverse_candidate; } ++candidate_array[0]; } } // set candidate[0] back to length if (new_candidates_ptr[0] > num_candidates) new_candidates_ptr[0] = num_candidates; if (old_candidates_ptr[0] > num_candidates) old_candidates_ptr[0] = num_candidates; // mark new_candidates as old IdType num_new_candidates = new_candidates_ptr[0]; for (IdType i = 0; i < k; ++i) { IdType neighbor_idx = current_neighbors[i]; if (current_flags[i]) { for (IdType j = 1; j < num_new_candidates + 1; ++j) { if (new_candidates_ptr[j] == neighbor_idx) { current_flags[i] = false; break; } } } } } /*! \brief Update knn graph according to selected candidates for nn-descent */ template <typename FloatType, typename IdType> __global__ void UpdateNeighborsKernel(const FloatType* points, const IdType* offsets, IdType* neighbors, IdType* new_candidates, IdType* old_candidates, FloatType* distances, bool* flags, IdType* num_updates, const int64_t batch_size, const int num_candidates, const int k, const int64_t feature_size) { const IdType point_idx = blockIdx.x * blockDim.x + threadIdx.x; if (point_idx >= offsets[batch_size]) return; IdType* current_neighbors = neighbors + point_idx * k; bool* current_flags = flags + point_idx * k; FloatType* current_dists = distances + point_idx * k; IdType* new_candidates_ptr = new_candidates + point_idx * (num_candidates + 1); IdType* old_candidates_ptr = old_candidates + point_idx * (num_candidates + 1); IdType num_new_candidates = new_candidates_ptr[0]; IdType num_old_candidates = old_candidates_ptr[0]; IdType current_num_updates = 0; // process new candidates for (IdType i = 1; i <= num_new_candidates; ++i) { IdType new_c = new_candidates_ptr[i]; // new/old candidates of the current new candidate IdType* twohop_new_ptr = new_candidates + new_c * (num_candidates + 1); IdType* twohop_old_ptr = old_candidates + new_c * (num_candidates + 1); IdType num_twohop_new = twohop_new_ptr[0]; IdType num_twohop_old = twohop_old_ptr[0]; FloatType worst_dist = current_dists[0]; // new - new for (IdType j = 1; j <= num_twohop_new; ++j) { IdType twohop_new_c = twohop_new_ptr[j]; FloatType new_dist = EuclideanDistWithCheck<FloatType, IdType>( points + point_idx * feature_size, points + twohop_new_c * feature_size, feature_size, worst_dist); if (FlaggedHeapInsert<FloatType, IdType>( current_neighbors, current_dists, current_flags, twohop_new_c, new_dist, true, k, true)) { ++current_num_updates; worst_dist = current_dists[0]; } } // new - old for (IdType j = 1; j <= num_twohop_old; ++j) { IdType twohop_old_c = twohop_old_ptr[j]; FloatType new_dist = EuclideanDistWithCheck<FloatType, IdType>( points + point_idx * feature_size, points + twohop_old_c * feature_size, feature_size, worst_dist); if (FlaggedHeapInsert<FloatType, IdType>( current_neighbors, current_dists, current_flags, twohop_old_c, new_dist, true, k, true)) { ++current_num_updates; worst_dist = current_dists[0]; } } } // process old candidates for (IdType i = 1; i <= num_old_candidates; ++i) { IdType old_c = old_candidates_ptr[i]; // new candidates of the current old candidate IdType* twohop_new_ptr = new_candidates + old_c * (num_candidates + 1); IdType num_twohop_new = twohop_new_ptr[0]; FloatType worst_dist = current_dists[0]; // old - new for (IdType j = 1; j <= num_twohop_new; ++j) { IdType twohop_new_c = twohop_new_ptr[j]; FloatType new_dist = EuclideanDistWithCheck<FloatType, IdType>( points + point_idx * feature_size, points + twohop_new_c * feature_size, feature_size, worst_dist); if (FlaggedHeapInsert<FloatType, IdType>( current_neighbors, current_dists, current_flags, twohop_new_c, new_dist, true, k, true)) { ++current_num_updates; worst_dist = current_dists[0]; } } } num_updates[point_idx] = current_num_updates; } } // namespace impl template <DLDeviceType XPU, typename FloatType, typename IdType> void KNN(const NDArray& data_points, const IdArray& data_offsets, const NDArray& query_points, const IdArray& query_offsets, const int k, IdArray result, const std::string& algorithm) { if (algorithm == std::string("bruteforce")) { impl::BruteForceKNNCuda<FloatType, IdType>( data_points, data_offsets, query_points, query_offsets, k, result); } else if (algorithm == std::string("bruteforce-sharemem")) { impl::BruteForceKNNSharedCuda<FloatType, IdType>( data_points, data_offsets, query_points, query_offsets, k, result); } else { LOG(FATAL) << "Algorithm " << algorithm << " is not supported on CUDA."; } } template <DLDeviceType XPU, typename FloatType, typename IdType> void NNDescent(const NDArray& points, const IdArray& offsets, IdArray result, const int k, const int num_iters, const int num_candidates, const double delta) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const auto& ctx = points->ctx; auto device = runtime::DeviceAPI::Get(ctx); const int64_t num_nodes = points->shape[0]; const int64_t feature_size = points->shape[1]; const int64_t batch_size = offsets->shape[0] - 1; const IdType* offsets_data = offsets.Ptr<IdType>(); const FloatType* points_data = points.Ptr<FloatType>(); IdType* central_nodes = result.Ptr<IdType>(); IdType* neighbors = central_nodes + k * num_nodes; uint64_t seed; int warp_size = 0; CUDA_CALL(cudaDeviceGetAttribute( &warp_size, cudaDevAttrWarpSize, ctx.device_id)); // We don't need large block sizes, since there's not much inter-thread communication int64_t block_size = warp_size; int64_t num_blocks = (num_nodes - 1) / block_size + 1; // allocate space for candidates, distances and flags // we use the first element in candidate array to represent length IdType* new_candidates = static_cast<IdType*>( device->AllocWorkspace(ctx, num_nodes * (num_candidates + 1) * sizeof(IdType))); IdType* old_candidates = static_cast<IdType*>( device->AllocWorkspace(ctx, num_nodes * (num_candidates + 1) * sizeof(IdType))); IdType* num_updates = static_cast<IdType*>( device->AllocWorkspace(ctx, num_nodes * sizeof(IdType))); FloatType* distances = static_cast<FloatType*>( device->AllocWorkspace(ctx, num_nodes * k * sizeof(IdType))); bool* flags = static_cast<bool*>( device->AllocWorkspace(ctx, num_nodes * k * sizeof(IdType))); size_t sum_temp_size = 0; IdType total_num_updates = 0; IdType* total_num_updates_d = static_cast<IdType*>( device->AllocWorkspace(ctx, sizeof(IdType))); CUDA_CALL(cub::DeviceReduce::Sum( nullptr, sum_temp_size, num_updates, total_num_updates_d, num_nodes)); IdType* sum_temp_storage = static_cast<IdType*>( device->AllocWorkspace(ctx, sum_temp_size)); // random initialize neighbors seed = RandomEngine::ThreadLocal()->RandInt<uint64_t>( std::numeric_limits<uint64_t>::max()); CUDA_KERNEL_CALL( impl::RandomInitNeighborsKernel, num_blocks, block_size, 0, thr_entry->stream, points_data, offsets_data, central_nodes, neighbors, distances, flags, k, feature_size, batch_size, seed); for (int i = 0; i < num_iters; ++i) { // select candidates seed = RandomEngine::ThreadLocal()->RandInt<uint64_t>( std::numeric_limits<uint64_t>::max()); CUDA_KERNEL_CALL( impl::FindCandidatesKernel, num_blocks, block_size, 0, thr_entry->stream, offsets_data, new_candidates, old_candidates, neighbors, flags, seed, batch_size, num_candidates, k); // update CUDA_KERNEL_CALL( impl::UpdateNeighborsKernel, num_blocks, block_size, 0, thr_entry->stream, points_data, offsets_data, neighbors, new_candidates, old_candidates, distances, flags, num_updates, batch_size, num_candidates, k, feature_size); total_num_updates = 0; CUDA_CALL(cub::DeviceReduce::Sum( sum_temp_storage, sum_temp_size, num_updates, total_num_updates_d, num_nodes)); device->CopyDataFromTo( total_num_updates_d, 0, &total_num_updates, 0, sizeof(IdType), ctx, DLContext{kDLCPU, 0}, offsets->dtype, thr_entry->stream); if (total_num_updates <= static_cast<IdType>(delta * k * num_nodes)) { break; } } device->FreeWorkspace(ctx, new_candidates); device->FreeWorkspace(ctx, old_candidates); device->FreeWorkspace(ctx, num_updates); device->FreeWorkspace(ctx, distances); device->FreeWorkspace(ctx, flags); device->FreeWorkspace(ctx, total_num_updates_d); device->FreeWorkspace(ctx, sum_temp_storage); } template void KNN<kDLGPU, float, int32_t>( const NDArray& data_points, const IdArray& data_offsets, const NDArray& query_points, const IdArray& query_offsets, const int k, IdArray result, const std::string& algorithm); template void KNN<kDLGPU, float, int64_t>( const NDArray& data_points, const IdArray& data_offsets, const NDArray& query_points, const IdArray& query_offsets, const int k, IdArray result, const std::string& algorithm); template void KNN<kDLGPU, double, int32_t>( const NDArray& data_points, const IdArray& data_offsets, const NDArray& query_points, const IdArray& query_offsets, const int k, IdArray result, const std::string& algorithm); template void KNN<kDLGPU, double, int64_t>( const NDArray& data_points, const IdArray& data_offsets, const NDArray& query_points, const IdArray& query_offsets, const int k, IdArray result, const std::string& algorithm); template void NNDescent<kDLGPU, float, int32_t>( const NDArray& points, const IdArray& offsets, IdArray result, const int k, const int num_iters, const int num_candidates, const double delta); template void NNDescent<kDLGPU, float, int64_t>( const NDArray& points, const IdArray& offsets, IdArray result, const int k, const int num_iters, const int num_candidates, const double delta); template void NNDescent<kDLGPU, double, int32_t>( const NDArray& points, const IdArray& offsets, IdArray result, const int k, const int num_iters, const int num_candidates, const double delta); template void NNDescent<kDLGPU, double, int64_t>( const NDArray& points, const IdArray& offsets, IdArray result, const int k, const int num_iters, const int num_candidates, const double delta); } // namespace transform } // namespace dgl
the_stack
#include "nnbilinearsampler.hpp" #include "datacu.hpp" #include "impl/dispatcher.hpp" #include <cassert> // ------------------------------------------------------------------- // Helpers // ------------------------------------------------------------------- // maximum size of each grid dimension: #define MAX_GRID_DIM 65535 // this is probably a bad idea.. /* 2D grid of 1D blocks. */ __device__ int getGlobalIdx_2D_1D() { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int threadId = blockId * blockDim.x + threadIdx.x; return threadId ; } // todo: fix such assumptions either in doc or by clearing memory // probably all these functions should have the option to accumulate, so... // assumption: derInputData is cleared before calling this code template<typename type, bool backwardData> __global__ void forward_backward_kernel (type* output, type* derInputData, type const* data, type const* grid, type const* derOutput, int outHeight, int outWidth, int outDepth, int outCardinality, int inHeight, int inWidth, int inCardinality) { const int offset = getGlobalIdx_2D_1D(); const int nOut = outWidth * outHeight * outDepth * outCardinality ; if (offset >= nOut) { return ; } bool backward = backwardData; // get the index of the output image, feature channel, and pixel int k = offset ; int c = k / (outHeight * outWidth) ; int n = c / outDepth ; // out image index k %= (outHeight * outWidth) ; // out spatial index c %= outDepth ; // out channel index // get the index of the input image int groupSize = outCardinality / inCardinality ; // num of transformations/image int nInputImage = n / groupSize ; // index of the input image int inputOffset = (inHeight * inWidth)*(outDepth * nInputImage + c) ; // location of the start of the input image int gridOffset = 2 * ((outHeight * outWidth) * n + k) ; //+ 1; // location of the first grid coordinate for this output pixel //int gridOffset = 2*k+1 ; // get the grid for this output image type py = grid[gridOffset + 0] ; type px = grid[gridOffset + 1] ; py = type(0.5)*(py + type(1.0)) * (inHeight - 1) ; px = type(0.5)*(px + type(1.0)) * (inWidth - 1) ; const int sx = floor(px); // todo: check floor vs floorf const int sy = floor(py); type acc = 0 ; type dy ; if (!backward) { data += inputOffset ; } if (backwardData) { derInputData += inputOffset ; } if (backward) { dy = derOutput[offset] ; } if (-1 <= sy && sy < inHeight && -1 <= sx && sx < inWidth) { // get the interpolation weights const type wx = px - sx ; const type wy = py - sy ; #pragma unroll for (int j=0; j < 2; j++) { #pragma unroll for (int i=0; i < 2; i++) { int ssy = sy + i ; int ssx = sx + j ; if (ssy < 0 || ssy >= inHeight || ssx < 0 || ssx >= inWidth) { continue ; } type wwx = (1-j)*(1-wx) + j*wx ; type wwy = (1-i)*(1-wy) + i*wy ; type ww = wwx * wwy ; if (!backward) { acc += ww * data[ssy + ssx * inHeight]; } else { if (backwardData) { atomicAdd(derInputData + ssy + ssx * inHeight, ww * dy) ; } } } } if (!backward) { output[offset] = acc ; } } } template<typename type> __global__ void grid_backward_kernel (type* derGrid, type const* data, type const* grid, type const* derOutput, int outHeight, int outWidth, int outDepth, int outCardinality, int inHeight, int inWidth, int inCardinality) { const int offset = getGlobalIdx_2D_1D(); const int nOut = outWidth * outHeight * outCardinality ; if (offset >= nOut) { return ; } // get the index of the output image, feature channel, and pixel int k = offset ; int n = k / (outHeight * outWidth) ; // out image index k %= (outHeight * outWidth) ; // out spatial index // get the grid offset: // --> location of the first grid coordinate for this output pixel int gridOffset = 2 * ((outHeight * outWidth) * n + k) ; //+ 1; // get the index of the input image const int groupSize = outCardinality / inCardinality ; // num of transformations/image const int nInputImage = n / groupSize ; // index of the input image const int inputOffset = inHeight * inWidth * outDepth * nInputImage ; // location of the start of the input image // get the grid for this output image type py = grid[gridOffset + 0] ; type px = grid[gridOffset + 1] ; py = type(0.5)*(py + type(1.0)) * (inHeight - 1) ; px = type(0.5)*(px + type(1.0)) * (inWidth - 1) ; const int sx = floor(px); // todo: check floor vs floorf const int sy = floor(py); type dgridx = 0 ; type dgridy = 0 ; data += inputOffset ; derOutput += k + n * outWidth * outHeight * outDepth ; if (-1 <= sy && sy < inHeight && -1 <= sx && sx < inWidth) { // get the interpolation weights const type wx = px - sx ; const type wy = py - sy ; #pragma unroll for (int j=0; j < 2; j++) { #pragma unroll for (int i=0; i < 2; i++) { int ssy = sy + i ; int ssx = sx + j ; if (ssy < 0 || ssy >= inHeight || ssx < 0 || ssx >= inWidth) { continue ; } const type wwx = (2*i-1) * ( (1-j)*(1-wx) + j*wx ) ; const type wwy = (2*j-1) * ( (1-i)*(1-wy) + i*wy ) ; for (int ic=0; ic < outDepth; ic++) { const type dy = derOutput[ic * outHeight * outWidth]; const type x = data[ssy + ssx * inHeight + ic * inHeight * inWidth]; dgridy += wwx * dy * x ; dgridx += wwy * dy * x ; } } } derGrid[gridOffset + 0] = type(0.5)*(inHeight - 1) * dgridy ; derGrid[gridOffset + 1] = type(0.5)*(inWidth - 1) * dgridx ; } } /** get the number of threads (1D) and blocks (2D). **/ vl::ErrorCode get_launch_params(const int& N, int& nTh, int& nGx, int& nGy) { nGx = vl::divideAndRoundUp(N, VL_CUDA_NUM_THREADS); if (nGx == 1) { nTh = N; nGy = 1; } else { nTh = VL_CUDA_NUM_THREADS; if (nGx <= MAX_GRID_DIM) { nGy = 1; } else { nGy = vl::divideAndRoundUp(nGx, MAX_GRID_DIM); nGx = MAX_GRID_DIM; if (nGy > MAX_GRID_DIM) { // the following print statement is probably not // shown in the matlab JVM console: std::printf("BilinearSamper: output volume should be smaller."); return vl::VLE_Cuda; } } } return vl::VLE_Success; } // use a template to define both directions as they are nearly identical code-wise template<typename type, bool backwardData, bool backwardGrid> static vl::ErrorCode forward_backward_gpu (vl::Context& context, type* output, type* derInputData, type* derGrid, type const* data, type const* grid, type const* derOutput, size_t outHeight, size_t outWidth, size_t outDepth, size_t outCardinality, size_t inHeight, size_t inWidth, size_t inCardinality) { //bool backward = backwardData || backwardGrid ; // common conditions assert(grid) ; assert(divides(inCardinality, outCardinality)) ; // forward conditions //assert(backward || data) ; //assert(backward || output) ; // backward conditions //assert(!backward || derOutput) ; assert(!backwardData || derInputData) ; assert(!backwardGrid || derGrid) ; assert(!backwardGrid || data) ; // if (backwardData) { // //memset(derInputData, 0, inHeight * inWidth * outDepth * inCardinality * sizeof(type)) ; // } // setup and launch the kernel for DER-DATA: int nTh, nGx, nGy; const int outVolume = outHeight * outWidth * outDepth * outCardinality ; vl::ErrorCode volume_ok = get_launch_params(outVolume, nTh, nGx, nGy); if (volume_ok != vl::VLE_Success) { return volume_ok;} dim3 gridDim(nGx,nGy); // grid-dimensions forward_backward_kernel <type, backwardData> <<< gridDim, nTh >>> (output, derInputData, data, grid, derOutput, outHeight, outWidth, outDepth, outCardinality, inHeight, inWidth, inCardinality) ; cudaError_t status = cudaPeekAtLastError() ; if (status != cudaSuccess) { return vl::VLE_Cuda; } if (backwardGrid) { // setup and launch kernel for DER-GRID: const int outN = outHeight * outWidth * outCardinality; volume_ok = get_launch_params(outN, nTh, nGx, nGy); if (volume_ok != vl::VLE_Success) { return volume_ok;} gridDim.x = nGx; gridDim.y = nGy; // grid-dimensions grid_backward_kernel <type> <<< gridDim, nTh >>> ( derGrid, data, grid, derOutput, outHeight, outWidth, outDepth, outCardinality, inHeight, inWidth, inCardinality ) ; status = cudaPeekAtLastError() ; } // catch any errors: return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } // ------------------------------------------------------------------- // Forward // ------------------------------------------------------------------- template<DataType dataType> struct BilinearSamplerForward<VLDT_GPU,dataType> { vl::ErrorCode operator() (BilinearSampler &op, Tensor &output, Tensor const &input, Tensor const &grid) { typedef typename DataTypeTraits<dataType>::type type ; auto outHeight = output.getHeight() ; auto outWidth = output.getWidth() ; auto outDepth = output.getDepth() ; auto outCardinality = output.getSize() ; auto inHeight = input.getHeight() ; auto inWidth = input.getWidth() ; auto inCardinality = input.getSize() ; auto outputData = (type*)output.getMemory() ; auto inputData = (type const*)input.getMemory() ; auto gridData = (type const*)grid.getMemory() ; return forward_backward_gpu<type, false, false> (op.context, outputData, NULL, NULL, inputData, gridData, NULL, outHeight, outWidth, outDepth, outCardinality, inHeight, inWidth,inCardinality) ; } } ; // ------------------------------------------------------------------- // Backward // ------------------------------------------------------------------- #undef DISPATCH #define DISPATCH(bwData, bwGrid) \ error = forward_backward_gpu<type, bwData, bwGrid> \ (op.context, NULL, derInputData, derGridData, inputData, gridData, derOutputData, \ outHeight, outWidth, outDepth, outCardinality, \ inHeight, inWidth,inCardinality) ; template<DataType dataType> struct BilinearSamplerBackward<VLDT_GPU,dataType> { vl::ErrorCode operator() (BilinearSampler &op, Tensor &derInput, Tensor &derGrid, Tensor const &input, Tensor const &grid, Tensor const &derOutput) { typedef typename DataTypeTraits<dataType>::type type ; auto outHeight = derOutput.getHeight() ; auto outWidth = derOutput.getWidth() ; auto outDepth = derOutput.getDepth() ; auto outCardinality = derOutput.getSize() ; auto inHeight = input.getHeight() ; auto inWidth = input.getWidth() ; auto inCardinality = input.getSize() ; auto derInputData = (type*)derInput.getMemory() ; auto derGridData = (type*)derGrid.getMemory() ; auto inputData = (type const*)input.getMemory() ; auto gridData = (type const*)grid.getMemory() ; auto derOutputData = (type const*)derOutput.getMemory() ; vl::ErrorCode error = VLE_Success ; // optimized codepaths depending on what needs to be comptued if (derInput && !derGrid) { DISPATCH(true, false) ; } else if (!derInput && derGrid) { DISPATCH(false, true) ; } else if (derInput && derGrid) { DISPATCH(true, true) ; } return error ; } } ;
the_stack
#include <algorithm> #include <numeric> #include <utility> #include <vector> #include "k2/csrc/array.h" #include "k2/csrc/array_ops.h" #include "k2/csrc/context.h" #include "k2/csrc/math.h" #include "k2/csrc/ragged.h" #include "k2/csrc/ragged_ops.h" #include "k2/csrc/tensor.h" namespace { // TODO(haowen): may move below functions to some file like `test_utils.h`, // in case other Tests may use it? template <typename T> static void CheckArrayData(const k2::Array1<T> &array, const std::vector<T> &target) { ASSERT_EQ(array.Dim(), target.size()); const T *array_data = array.Data(); // copy data from CPU/GPU to CPU auto kind = k2::GetMemoryCopyKind(*array.Context(), *k2::GetCpuContext()); std::vector<T> cpu_data(array.Dim()); k2::MemoryCopy(static_cast<void *>(cpu_data.data()), static_cast<const void *>(array_data), array.Dim() * array.ElementSize(), kind, nullptr); EXPECT_EQ(cpu_data, target); } static void CheckRowSplits(k2::RaggedShape &shape, const std::vector<std::vector<int32_t>> &target) { for (int32_t i = 1; i < shape.NumAxes(); ++i) { k2::Array1<int32_t> curr_row_splits = shape.RowSplits(i); CheckArrayData<int32_t>(curr_row_splits, target[i - 1]); } } // check if `array` and `target` have the same values template <typename T> static void CheckArrayData(const k2::Array1<T> &array, const k2::Array1<T> &target) { ASSERT_EQ(array.Dim(), target.Dim()); int32_t dim = array.Dim(); k2::ContextPtr cpu = k2::GetCpuContext(); k2::Array1<T> cpu_array = array.To(cpu); k2::Array1<T> cpu_target = target.To(cpu); std::vector<T> array_data(cpu_array.Data(), cpu_array.Data() + dim); std::vector<T> target_data(cpu_target.Data(), cpu_target.Data() + dim); EXPECT_EQ(array_data, target_data); } } // namespace namespace k2 { class RaggedShapeOpsSuiteTest : public ::testing::Test { protected: RaggedShapeOpsSuiteTest() { ContextPtr context = GetCpuContext(); const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; const std::vector<int32_t> row_splits3 = {0, 2, 3, 5, 8, 9, 12, 13, 15, 15, 16}; const std::vector<int32_t> row_ids3 = {0, 0, 1, 2, 2, 3, 3, 3, 4, 5, 5, 5, 6, 7, 7, 9}; std::vector<RaggedShapeDim> axes; axes.emplace_back(RaggedShapeDim{Array1<int32_t>(context, row_splits1), Array1<int32_t>(context, row_ids1), static_cast<int32_t>(row_ids1.size())}); axes.emplace_back(RaggedShapeDim{Array1<int32_t>(context, row_splits2), Array1<int32_t>(context, row_ids2), static_cast<int32_t>(row_ids2.size())}); axes.emplace_back(RaggedShapeDim{Array1<int32_t>(context, row_splits3), Array1<int32_t>(context, row_ids3), static_cast<int32_t>(row_ids3.size())}); simple_shape_ = RaggedShape(axes, true); // random_shape_ is on CPU random_shape_ = RandomRaggedShape(true, // set_row_ids 3, // min_num_axes 4, // max_num_axes 0, // min_num_elements 1000); // max_num_elements } RaggedShape simple_shape_; RaggedShape random_shape_; }; template <typename T, DeviceType d> void TestMaxPerSubListTest() { ContextPtr cpu = GetCpuContext(); // will use to copy data ContextPtr context = nullptr; if (d == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(d, kCuda); context = GetCudaContext(); } { // empty case const std::vector<int32_t> row_splits = {0}; RaggedShapeDim shape_dim; shape_dim.row_splits = Array1<int32_t>(context, row_splits); shape_dim.cached_tot_size = 0; std::vector<RaggedShapeDim> axes = {shape_dim}; RaggedShape shape(axes, true); Array1<T> values(context, 0); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); ASSERT_EQ(num_rows, 0); Array1<T> max_values(context, num_rows); // just run to check if there's any error MaxPerSublist(ragged, 1, &max_values); EXPECT_EQ(max_values.Dim(), 0); } { const std::vector<int32_t> row_splits = {0, 2, 2, 5, 6}; RaggedShapeDim shape_dim; shape_dim.row_splits = Array1<int32_t>(context, row_splits); shape_dim.cached_tot_size = row_splits.back(); std::vector<RaggedShapeDim> axes = {shape_dim}; RaggedShape shape(axes, true); const std::vector<T> values_vec = {1, 3, 2, 8, 0, -1}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> max_values(context, num_rows); T default_value = 2; MaxPerSublist(ragged, default_value, &max_values); // copy memory from GPU/CPU to CPU std::vector<T> cpu_data(max_values.Dim()); auto kind = GetMemoryCopyKind(*max_values.Context(), *cpu); MemoryCopy(static_cast<void *>(cpu_data.data()), static_cast<const void *>(max_values.Data()), max_values.Dim() * max_values.ElementSize(), kind, nullptr); std::vector<T> expected_data = {3, default_value, 8, default_value}; EXPECT_EQ(cpu_data, expected_data); } { // test with random large size const int32_t min_num_elements = 2000; // not random shape is on CPU RaggedShape shape = RandomRaggedShape(false, 2, 2, min_num_elements, 5000); ASSERT_EQ(shape.NumAxes(), 2); RaggedShape gpu_shape; if (d == kCuda) { // copy shape to GPU const Array1<T> &row_splits = shape.RowSplits(1); RaggedShapeDim shape_dim; shape_dim.row_splits = row_splits.To(GetCudaContext()); shape_dim.cached_tot_size = shape.NumElements(); std::vector<RaggedShapeDim> axes = {shape_dim}; gpu_shape = RaggedShape(axes, true); } int32_t num_elems = shape.NumElements(); std::vector<T> data(num_elems); for (int32_t i = 0; i != 10; ++i) { std::iota(data.begin(), data.end(), 0); // randomly set data[pos] = num_elems which is // greater than any element in data int32_t pos = RandInt(0, num_elems - 1); data[pos] = num_elems; // find the corresponding row int32_t num_rows = shape.Dim0(); const int32_t *row_splits_data = shape.RowSplits(1).Data(); int32_t row = 0; for (int32_t i = 0; i < num_rows; ++i) { if (pos >= row_splits_data[i] && pos < row_splits_data[i + 1]) { row = i; break; } } Array1<T> values(context, data); Ragged<T> ragged(d == kCuda ? gpu_shape : shape, values); Array1<T> max_values(context, num_rows); T default_value = 0; MaxPerSublist(ragged, default_value, &max_values); EXPECT_EQ(max_values[row], num_elems); } } } TEST(RaggedShapeOpsTest, MaxPerSubListTest) { TestMaxPerSubListTest<int32_t, kCpu>(); TestMaxPerSubListTest<int32_t, kCuda>(); } template <typename T, DeviceType d> void TestMinPerSubListTest() { ContextPtr cpu = GetCpuContext(); // will use to copy data ContextPtr context = nullptr; if (d == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(d, kCuda); context = GetCudaContext(); } { // empty case std::vector<int32_t> row_splits_vec = {0}; Array1<T> row_splits(context, row_splits_vec); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); Array1<T> values(context, 0); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); ASSERT_EQ(num_rows, 0); Array1<T> min_values(context, num_rows); // just run to check if there's any error MinPerSublist(ragged, 1, &min_values); EXPECT_EQ(min_values.Dim(), 0); } { std::vector<int32_t> row_splits_vec = {0, 2, 2, 5, 6}; Array1<T> row_splits(context, row_splits_vec); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); const std::vector<T> values_vec = {1, 3, 3, 8, 4, -1}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> min_values(context, num_rows); T default_value = 2; MinPerSublist(ragged, default_value, &min_values); // copy memory from GPU/CPU to CPU min_values = min_values.To(cpu); std::vector<T> cpu_data(min_values.Data(), min_values.Data() + min_values.Dim()); std::vector<T> expected_data = {1, default_value, default_value, -1}; EXPECT_EQ(cpu_data, expected_data); } // May add tests for random large size? (but maybe it's fine to not add as we // have tested large cases in MaxPerSubList) } TEST(RaggedShapeOpsTest, MinPerSubListTest) { TestMinPerSubListTest<int32_t, kCpu>(); TestMinPerSubListTest<int32_t, kCuda>(); } template <typename T, DeviceType d> void TestAndOrPerSubListTest() { ContextPtr cpu = GetCpuContext(); // will use to copy data ContextPtr context = nullptr; if (d == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(d, kCuda); context = GetCudaContext(); } { // And const std::vector<int32_t> row_splits = {0, 2, 2, 5, 6}; RaggedShapeDim shape_dim; shape_dim.row_splits = Array1<int32_t>(context, row_splits); shape_dim.cached_tot_size = row_splits.back(); std::vector<RaggedShapeDim> axes = {shape_dim}; RaggedShape shape(axes, true); const std::vector<T> values_vec = {1, 3, 3, 6, 11, 0}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> dst(context, num_rows); T default_value = -1; AndPerSublist(ragged, default_value, &dst); // copy memory from GPU/CPU to CPU dst = dst.To(cpu); std::vector<T> cpu_data(dst.Data(), dst.Data() + dst.Dim()); std::vector<T> expected_data = {1, -1, 2, 0}; EXPECT_EQ(cpu_data, expected_data); } { // Or const std::vector<int32_t> row_splits = {0, 2, 2, 5, 6}; RaggedShapeDim shape_dim; shape_dim.row_splits = Array1<int32_t>(context, row_splits); shape_dim.cached_tot_size = row_splits.back(); std::vector<RaggedShapeDim> axes = {shape_dim}; RaggedShape shape(axes, true); const std::vector<T> values_vec = {1, 3, 3, 4, 6, 0}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> dst(context, num_rows); T default_value = 0; OrPerSublist(ragged, default_value, &dst); // copy memory from GPU/CPU to CPU dst = dst.To(cpu); std::vector<T> cpu_data(dst.Data(), dst.Data() + dst.Dim()); std::vector<T> expected_data = {3, 0, 7, 0}; EXPECT_EQ(cpu_data, expected_data); } } TEST(RagedShapeOpsTest, AndOrPerSubListTest) { TestAndOrPerSubListTest<int32_t, kCpu>(); TestAndOrPerSubListTest<int32_t, kCuda>(); } void TestUnsqueeze(ContextPtr context, const RaggedShape &input_shape) { RaggedShape src_shape = input_shape.To(context); src_shape.Populate(); // set row_ids { // axis = 0. RaggedShape shape = Unsqueeze(src_shape, 0); int32_t dim0 = src_shape.Dim0(); const std::vector<RaggedShapeDim> &src_axes = src_shape.Axes(); const std::vector<RaggedShapeDim> &dest_axes = shape.Axes(); { const Array1<int32_t> &row_splits0 = dest_axes[0].row_splits; std::vector<int32_t> data = {0, dim0}; CheckArrayData(row_splits0, data); } { const Array1<int32_t> &row_ids0 = dest_axes[0].row_ids; std::vector<int32_t> data(dim0, 0); CheckArrayData(row_ids0, data); } { for (auto i = 0; i != src_axes.size(); ++i) { CheckArrayData(src_axes[i].row_splits, dest_axes[i + 1].row_splits); CheckArrayData(src_axes[i].row_ids, dest_axes[i + 1].row_ids); } } } { // axis = 1 int32_t axis = 1; RaggedShape shape = Unsqueeze(src_shape, axis); int32_t tot_size = shape.TotSize(axis); const std::vector<RaggedShapeDim> &src_axes = src_shape.Axes(); const std::vector<RaggedShapeDim> &dest_axes = shape.Axes(); { for (auto i = 0; i < axis; ++i) { CheckArrayData(src_axes[i].row_splits, dest_axes[i].row_splits); CheckArrayData(src_axes[i].row_ids, dest_axes[i].row_ids); } } { const Array1<int32_t> &row_splits = dest_axes[axis].row_splits; std::vector<int32_t> data(tot_size + 1); std::iota(data.begin(), data.end(), 0); CheckArrayData(row_splits, data); } { const Array1<int32_t> &row_ids = dest_axes[axis].row_ids; std::vector<int32_t> data(tot_size); std::iota(data.begin(), data.end(), 0); CheckArrayData(row_ids, data); } { for (auto i = axis; i < src_axes.size(); ++i) { CheckArrayData(src_axes[i].row_splits, dest_axes[i + 1].row_splits); CheckArrayData(src_axes[i].row_ids, dest_axes[i + 1].row_ids); } } } } TEST_F(RaggedShapeOpsSuiteTest, TestUnsqueezeCpu) { TestUnsqueeze(GetCpuContext(), simple_shape_); TestUnsqueeze(GetCpuContext(), random_shape_); } TEST_F(RaggedShapeOpsSuiteTest, TestUnsqueezeGpu) { TestUnsqueeze(GetCudaContext(), simple_shape_); TestUnsqueeze(GetCudaContext(), random_shape_); } void TestRemoveAxis(ContextPtr context, const RaggedShape &input_shape) { RaggedShape src_shape = input_shape.To(context); ASSERT_EQ(src_shape.NumAxes(), 4); { // axis = 0. int32_t axis = 0; RaggedShape shape = RemoveAxis(src_shape, axis); const std::vector<RaggedShapeDim> &src_axes = src_shape.Axes(); const std::vector<RaggedShapeDim> &dest_axes = shape.Axes(); ASSERT_EQ(src_axes.size(), 3); ASSERT_EQ(dest_axes.size(), 2); { for (auto i = 0; i != dest_axes.size(); ++i) { CheckArrayData(dest_axes[i].row_splits, src_axes[i + 1].row_splits); CheckArrayData(dest_axes[i].row_ids, src_axes[i + 1].row_ids); } } } { // axis = 1 int32_t axis = 1; RaggedShape shape = RemoveAxis(src_shape, axis); const std::vector<RaggedShapeDim> &src_axes = src_shape.Axes(); const std::vector<RaggedShapeDim> &dest_axes = shape.Axes(); ASSERT_EQ(src_axes.size(), 3); ASSERT_EQ(dest_axes.size(), 2); { const Array1<int32_t> &row_splits0 = dest_axes[0].row_splits; std::vector<int32_t> data = {0, 3, 7, 10}; CheckArrayData(row_splits0, data); } { const Array1<int32_t> &row_ids0 = dest_axes[0].row_ids; std::vector<int32_t> data = {0, 0, 0, 1, 1, 1, 1, 2, 2, 2}; CheckArrayData(row_ids0, data); } { for (auto i = 1; i != dest_axes.size(); ++i) { CheckArrayData(dest_axes[i].row_splits, src_axes[i + 1].row_splits); CheckArrayData(dest_axes[i].row_ids, src_axes[i + 1].row_ids); } } } { // axis = 3 int32_t axis = 3; // the last axis RaggedShape shape = RemoveAxis(src_shape, axis); const std::vector<RaggedShapeDim> &src_axes = src_shape.Axes(); const std::vector<RaggedShapeDim> &dest_axes = shape.Axes(); ASSERT_EQ(src_axes.size(), 3); ASSERT_EQ(dest_axes.size(), 2); { for (auto i = 0; i != dest_axes.size(); ++i) { CheckArrayData(dest_axes[i].row_splits, src_axes[i].row_splits); CheckArrayData(dest_axes[i].row_ids, src_axes[i].row_ids); } } } } TEST_F(RaggedShapeOpsSuiteTest, TestRemoveAxisCpu) { TestRemoveAxis(GetCpuContext(), simple_shape_); } TEST_F(RaggedShapeOpsSuiteTest, TestRemoveAxisGpu) { TestRemoveAxis(GetCudaContext(), simple_shape_); } void TestGetOffsets(ContextPtr context) { for (int32_t i = 0; i != 2; ++i) { int32_t num_shape = RandInt(10, 100); int32_t num_axes = RandInt(2, 4); std::vector<RaggedShape> shape_vec(num_shape); std::vector<RaggedShape *> shapes(num_shape); for (int32_t j = 0; j != num_shape; ++j) { shape_vec[j] = RandomRaggedShape(false, num_axes, num_axes, 0, 1000).To(context); shapes[j] = &shape_vec[j]; } RaggedShape **shapes_ptr = shapes.data(); Array2<int32_t> offsets = GetOffsets(num_shape, shapes_ptr); ASSERT_EQ(offsets.Dim0(), num_axes + 1); ASSERT_EQ(offsets.Dim1(), num_shape + 1); auto acc = offsets.Accessor(); for (int32_t axis = 0; axis <= num_axes; ++axis) { int32_t sum = 0; for (int32_t j = 0; j <= num_shape; ++j) { EXPECT_EQ(acc(axis, j), sum); if (j < num_shape) { sum += (axis == 0 ? 1 : shape_vec[j].TotSize(axis - 1)); } } } } } TEST(RaggedShapeOpsTest, TestGetOffsets) { TestGetOffsets(GetCpuContext()); TestGetOffsets(GetCudaContext()); } // returns a random ragged shape where the dims on axis 1 are all the same // (so: can be transposed). RaggedShape RandomRaggedShapeToTranspose(ContextPtr c) { ContextPtr c_cpu = GetCpuContext(); RaggedShape random = RandomRaggedShape(false, 2, 4, 0, 5000).To(c); int32_t input_dim0 = random.Dim0(), divisor = 1; for (int32_t i = 1; i * i <= input_dim0; i++) { if (input_dim0 % i == 0 && i > divisor) divisor = i; } int32_t output_dim0 = divisor, output_dim1 = input_dim0 / divisor; Array1<int32_t> row_splits = Range<int32_t>(c, output_dim0 + 1, 0, output_dim1); int32_t cached_tot_size = input_dim0; RaggedShape top_level_shape = RaggedShape2(&row_splits, nullptr, cached_tot_size); return ComposeRaggedShapes(top_level_shape, random); } template <DeviceType d> void TestTranspose() { ContextPtr cpu = GetCpuContext(); // will use to copy data ContextPtr context = nullptr; if (d == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(d, kCuda); context = GetCudaContext(); } { const std::vector<int32_t> row_splits1_vec = {0, 2, 4, 6}; const std::vector<int32_t> row_splits2_vec = {0, 3, 4, 7, 8, 10, 12}; Array1<int32_t> row_splits1(context, row_splits1_vec); Array1<int32_t> row_splits2(context, row_splits2_vec); RaggedShape src_shape = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); ASSERT_EQ(src_shape.Dim0(), 3); ASSERT_EQ(src_shape.TotSize(1), 6); RaggedShape shape = Transpose(src_shape); EXPECT_EQ(shape.Dim0(), 2); ASSERT_EQ(shape.TotSize(1), 6); const std::vector<int32_t> expected_row_splits = {0, 3, 6}; const std::vector<int32_t> expected_row_ids = {0, 0, 0, 1, 1, 1}; CheckArrayData(shape.RowSplits(1), expected_row_splits); CheckArrayData(shape.RowIds(1), expected_row_ids); CheckArrayData(shape.RowSplits(2), {0, 3, 6, 8, 9, 10, 12}); CheckArrayData(shape.RowIds(2), {0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 5, 5}); } { // random case for (int32_t j = 0; j != 2; ++j) { RaggedShape to_transpose = RandomRaggedShapeToTranspose(context); RaggedShape transposed = Transpose(to_transpose); if (d != kCpu) { to_transpose = to_transpose.To(cpu); transposed = transposed.To(cpu); } for (auto iter = transposed.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); int32_t i = transposed[index]; // Just make sure this doesn't crash, // dont need the value. std::swap(index[0], index[1]); i = to_transpose[index]; // don't need the value, just need to make // sure it's an allowable index. } for (auto iter = to_transpose.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); std::swap(index[0], index[1]); int32_t i = transposed[index]; // don't need the value, just need to // make sure it's an allowable index. } } } } TEST(RaggedShapeOpsTest, TestTranspose) { TestTranspose<kCpu>(); TestTranspose<kCuda>(); } template <DeviceType d, typename T> void TestTransposeRagged() { ContextPtr cpu = GetCpuContext(); // will use to copy data ContextPtr context = nullptr; if (d == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(d, kCuda); context = GetCudaContext(); } { const std::vector<int32_t> row_splits1_vec = {0, 2, 4, 6}; const std::vector<int32_t> row_splits2_vec = {0, 3, 4, 7, 8, 10, 12}; Array1<int32_t> row_splits1(context, row_splits1_vec); Array1<int32_t> row_splits2(context, row_splits2_vec); RaggedShape src_shape = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); ASSERT_EQ(src_shape.Dim0(), 3); ASSERT_EQ(src_shape.TotSize(1), 6); std::vector<T> values = {0, 1, 2, 3, 4, 5, 8, 7, 6, 9, 10, 15}; ASSERT_EQ(values.size(), src_shape.NumElements()); Array1<T> values_array(context, values); Ragged<T> ragged(src_shape, values_array); Ragged<T> ans = Transpose(ragged); RaggedShape shape = ans.shape; // Check shape ASSERT_EQ(shape.Dim0(), 2); ASSERT_EQ(shape.TotSize(1), 6); const std::vector<int32_t> expected_row_splits = {0, 3, 6}; const std::vector<int32_t> expected_row_ids = {0, 0, 0, 1, 1, 1}; CheckArrayData(shape.RowSplits(1), expected_row_splits); CheckArrayData(shape.RowIds(1), expected_row_ids); CheckArrayData(shape.RowSplits(2), {0, 3, 6, 8, 9, 10, 12}); CheckArrayData(shape.RowIds(2), {0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 5, 5}); // Check values CheckArrayData(ans.values, {0, 1, 2, 4, 5, 8, 6, 9, 3, 7, 10, 15}); } { // random case for (int32_t j = 0; j != 2; ++j) { RaggedShape to_transpose = RandomRaggedShapeToTranspose(context); int32_t num_elems = to_transpose.NumElements(); Array1<T> src_values = RandUniformArray1<T>(context, num_elems, 0, 10000); Ragged<T> src(to_transpose, src_values); Ragged<T> ans = Transpose(src); if (d != kCpu) { src = src.To(cpu); ans = ans.To(cpu); to_transpose = to_transpose.To(cpu); } RaggedShape transposed = ans.shape; for (auto iter = transposed.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); T value = ans[index]; std::swap(index[0], index[1]); EXPECT_EQ(value, src[index]); } for (auto iter = to_transpose.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); T value = src[index]; std::swap(index[0], index[1]); EXPECT_EQ(value, ans[index]); } } } } TEST(RaggedTest, TestTransposeRagged) { TestTransposeRagged<kCpu, int32_t>(); TestTransposeRagged<kCuda, int32_t>(); TestTransposeRagged<kCpu, double>(); TestTransposeRagged<kCuda, double>(); } template <DeviceType d> void TestRowSplitsPtr() { ContextPtr cpu = GetCpuContext(); // will use to copy data ContextPtr context = nullptr; if (d == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(d, kCuda); context = GetCudaContext(); } RaggedShape shape = RandomRaggedShape().To(context); ASSERT_GE(shape.NumAxes(), 2); Array1<int32_t *> ptrs = GetRowSplitsPtr(shape); ASSERT_EQ(ptrs.Dim(), shape.NumAxes() - 1); // as num_axes is not so big, access (may copy memory) it in a loop is fine. for (int32_t i = 0; i != ptrs.Dim(); ++i) { EXPECT_EQ(ptrs[i], shape.RowSplits(i + 1).Data()); } } TEST(RaggedShapeOpsTest, TestRowSplitsPtr) { TestRowSplitsPtr<kCpu>(); TestRowSplitsPtr<kCuda>(); } void TestRaggedShape2(ContextPtr context, const RaggedShape &shape) { RaggedShape src_shape = shape.To(context); src_shape.Populate(); ASSERT_GE(src_shape.NumAxes(), 2); Array1<int32_t> row_splits = src_shape.RowSplits(1); Array1<int32_t> row_ids = src_shape.RowIds(1); int32_t cached_tot_size = src_shape.TotSize(1); { // both row_splits and row_ids are non-null RaggedShape result = RaggedShape2(&row_splits, &row_ids, cached_tot_size); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } { // both row_splits and row_ids are non-null, cached_tot_size = -1 RaggedShape result = RaggedShape2(&row_splits, &row_ids, -1); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } { // row_ids is null RaggedShape result = RaggedShape2(&row_splits, nullptr, cached_tot_size); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } { // row_ids is null, cached_tot_size = -1 RaggedShape result = RaggedShape2(&row_splits, nullptr, -1); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } // note if row_splits == null, then we suppose there's no empty rows after the // last row-id in row_ids if (row_splits.Dim() == (row_ids.Dim() == 0 ? 1 : row_ids.Back() + 2)) { { // row_splits is null RaggedShape result = RaggedShape2(nullptr, &row_ids, cached_tot_size); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } { // row_splits is null, cached_tot_size = -1 RaggedShape result = RaggedShape2(nullptr, &row_ids, -1); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } } } TEST_F(RaggedShapeOpsSuiteTest, TestRaggedShape2Cpu) { TestRaggedShape2(GetCpuContext(), simple_shape_); TestRaggedShape2(GetCpuContext(), random_shape_); } TEST_F(RaggedShapeOpsSuiteTest, TestRaggedShape2Gpu) { TestRaggedShape2(GetCudaContext(), simple_shape_); TestRaggedShape2(GetCudaContext(), random_shape_); } void TestRaggedShape3(ContextPtr context, const RaggedShape &shape) { RaggedShape src_shape = shape.To(context); src_shape.Populate(); ASSERT_GE(src_shape.NumAxes(), 3); Array1<int32_t> row_splits1 = src_shape.RowSplits(1); Array1<int32_t> row_ids1 = src_shape.RowIds(1); int32_t cached_tot_size1 = src_shape.TotSize(1); Array1<int32_t> row_splits2 = src_shape.RowSplits(2); Array1<int32_t> row_ids2 = src_shape.RowIds(2); int32_t cached_tot_size2 = src_shape.TotSize(2); { // both row_splits and row_ids are non-null RaggedShape result = RaggedShape3(&row_splits1, &row_ids1, cached_tot_size1, &row_splits2, &row_ids2, cached_tot_size2); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowIds(1), row_ids1); EXPECT_EQ(result.TotSize(1), cached_tot_size1); CheckArrayData(result.RowSplits(2), row_splits2); CheckArrayData(result.RowIds(2), row_ids2); EXPECT_EQ(result.TotSize(2), cached_tot_size2); } { // row_ids is non-null, cached_tot_size = -1 RaggedShape result = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowIds(1), row_ids1); EXPECT_EQ(result.TotSize(1), cached_tot_size1); CheckArrayData(result.RowSplits(2), row_splits2); CheckArrayData(result.RowIds(2), row_ids2); EXPECT_EQ(result.TotSize(2), cached_tot_size2); } // note if row_splits == null, then we suppose there's no empty rows after the // last row-id in row_ids bool valid1 = (row_splits1.Dim() == (row_ids1.Dim() == 0 ? 1 : row_ids1.Back() + 2)); bool valid2 = (row_splits2.Dim() == (row_ids2.Dim() == 0 ? 1 : row_ids2.Back() + 2)); if (valid1 && valid2) { RaggedShape result = RaggedShape3(nullptr, &row_ids1, -1, nullptr, &row_ids2, -1); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowIds(1), row_ids1); EXPECT_EQ(result.TotSize(1), cached_tot_size1); CheckArrayData(result.RowSplits(2), row_splits2); CheckArrayData(result.RowIds(2), row_ids2); EXPECT_EQ(result.TotSize(2), cached_tot_size2); } // TODO(haowen): add more cases for other branches } TEST_F(RaggedShapeOpsSuiteTest, TestRaggedShape3Cpu) { TestRaggedShape3(GetCpuContext(), simple_shape_); TestRaggedShape3(GetCpuContext(), random_shape_); } TEST_F(RaggedShapeOpsSuiteTest, TestRaggedShape3Gpu) { TestRaggedShape3(GetCudaContext(), simple_shape_); TestRaggedShape3(GetCudaContext(), random_shape_); } void TestComposeShape(ContextPtr context, const RaggedShape &shape) { RaggedShape src_shape = shape.To(context); ASSERT_GE(src_shape.NumAxes(), 3); Array1<int32_t> row_splits1 = src_shape.RowSplits(1); Array1<int32_t> row_ids1 = src_shape.RowIds(1); Array1<int32_t> row_splits2 = src_shape.RowSplits(2); Array1<int32_t> row_ids2 = src_shape.RowIds(2); RaggedShape shape1 = RaggedShape2(&row_splits1, nullptr, -1); RaggedShape shape2 = RaggedShape2(&row_splits2, nullptr, -1); RaggedShape result = ComposeRaggedShapes(shape1, shape2); ASSERT_EQ(result.NumAxes(), 3); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowIds(1), row_ids1); CheckArrayData(result.RowSplits(2), row_splits2); CheckArrayData(result.RowIds(2), row_ids2); } TEST_F(RaggedShapeOpsSuiteTest, TestComposeShapeCpu) { TestComposeShape(GetCpuContext(), simple_shape_); TestComposeShape(GetCpuContext(), random_shape_); } TEST_F(RaggedShapeOpsSuiteTest, TestComposeShapeGpu) { TestComposeShape(GetCudaContext(), simple_shape_); TestComposeShape(GetCudaContext(), random_shape_); } void TestShapeFromTotSize(ContextPtr context, const RaggedShape &shape) { RaggedShape src_shape = shape.To(context); ASSERT_GE(src_shape.NumAxes(), 2); int32_t num_axes = src_shape.NumAxes(); std::vector<int32_t> tot_sizes(num_axes); for (int32_t i = 0; i != num_axes; ++i) { tot_sizes[i] = src_shape.TotSize(i); } RaggedShape result = RaggedShapeFromTotSizes(context, num_axes, tot_sizes.data()); ASSERT_EQ(result.NumAxes(), num_axes); for (int32_t i = 0; i < num_axes; ++i) { EXPECT_EQ(result.TotSize(i), src_shape.TotSize(i)); if (i > 0) { EXPECT_EQ(result.RowSplits(i).Dim(), src_shape.RowSplits(i).Dim()); EXPECT_EQ(result.RowIds(i).Dim(), src_shape.RowIds(i).Dim()); } } } TEST_F(RaggedShapeOpsSuiteTest, TestShapeFromTotSizeCpu) { TestShapeFromTotSize(GetCpuContext(), simple_shape_); TestShapeFromTotSize(GetCpuContext(), random_shape_); } TEST_F(RaggedShapeOpsSuiteTest, TestShapeFromTotSizeGpu) { TestShapeFromTotSize(GetCudaContext(), simple_shape_); TestShapeFromTotSize(GetCudaContext(), random_shape_); } template <typename T, DeviceType d> void TestRagged() { ContextPtr cpu = GetCpuContext(); // will use to copy data ContextPtr context = nullptr; if (d == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(d, kCuda); context = GetCudaContext(); } { // constructed with row_splits and row_ids // RaggedTensor4 t = [ // [ [[ 1, 2], [4]], [[3, 0]] ], // [ [[7, 8, 9]], [[6], [3, 5, 7]], [[2]] ], // [ [[3, 4], [], [8]] ] // ] const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; const std::vector<int32_t> row_splits3 = {0, 2, 3, 5, 8, 9, 12, 13, 15, 15, 16}; const std::vector<int32_t> row_ids3 = {0, 0, 1, 2, 2, 3, 3, 3, 4, 5, 5, 5, 6, 7, 7, 9}; const std::vector<T> values_vec = {1, 2, 4, 3, 0, 7, 8, 9, 6, 3, 5, 7, 2, 3, 4, 8}; std::vector<RaggedShapeDim> axes; axes.emplace_back(RaggedShapeDim{Array1<int32_t>(context, row_splits1), Array1<int32_t>(context, row_ids1), static_cast<int32_t>(row_ids1.size())}); axes.emplace_back(RaggedShapeDim{Array1<int32_t>(context, row_splits2), Array1<int32_t>(context, row_ids2), static_cast<int32_t>(row_ids2.size())}); axes.emplace_back(RaggedShapeDim{Array1<int32_t>(context, row_splits3), Array1<int32_t>(context, row_ids3), static_cast<int32_t>(row_ids3.size())}); RaggedShape shape(axes, true); Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); // test Index(axis, i) { // values: [[[ 1, 2], [4]], [[3, 0]]] Ragged<T> sub_raggged = ragged.Index(0, 0); RaggedShape &sub_shape = sub_raggged.shape; EXPECT_EQ(sub_shape.NumAxes(), 3); const std::vector<std::vector<int32_t>> sub_row_splits_vec = { {0, 2, 3}, {0, 2, 3, 5}}; CheckRowSplits(sub_shape, sub_row_splits_vec); const Array1<T> &sub_values = sub_raggged.values; const std::vector<T> sub_values_vec = {1, 2, 4, 3, 0}; CheckArrayData<T>(sub_values, sub_values_vec); } { // values: [[[7, 8, 9]], [[6], [3, 5, 7]], [[2]]] Ragged<T> sub_raggged = ragged.Index(0, 1); RaggedShape &sub_shape = sub_raggged.shape; EXPECT_EQ(sub_shape.NumAxes(), 3); const std::vector<std::vector<int32_t>> sub_row_splits_vec = { {0, 1, 3, 4}, {0, 3, 4, 7, 8}}; CheckRowSplits(sub_shape, sub_row_splits_vec); const Array1<T> &sub_values = sub_raggged.values; const std::vector<T> sub_values_vec = {7, 8, 9, 6, 3, 5, 7, 2}; CheckArrayData<T>(sub_values, sub_values_vec); } { // values: [[[3, 4], [], [8]]] Ragged<T> sub_raggged = ragged.Index(0, 2); RaggedShape &sub_shape = sub_raggged.shape; EXPECT_EQ(sub_shape.NumAxes(), 3); const std::vector<std::vector<int32_t>> sub_row_splits_vec = { {0, 3}, {0, 2, 2, 3}}; CheckRowSplits(sub_shape, sub_row_splits_vec); const Array1<T> &sub_values = sub_raggged.values; const std::vector<T> sub_values_vec = {3, 4, 8}; CheckArrayData<T>(sub_values, sub_values_vec); } // test operator[](const std::vector<int32_t> &indexes) if (d == kCpu) { { std::vector<int32_t> indexes = {0, 0, 0, 0}; EXPECT_EQ(ragged.shape[indexes], 0); EXPECT_EQ(ragged[indexes], 1); } { std::vector<int32_t> indexes = {0, 1, 0, 0}; EXPECT_EQ(ragged.shape[indexes], 3); EXPECT_EQ(ragged[indexes], 3); } { std::vector<int32_t> indexes = {1, 0, 0, 1}; EXPECT_EQ(ragged.shape[indexes], 6); EXPECT_EQ(ragged[indexes], 8); } { std::vector<int32_t> indexes = {1, 1, 1, 0}; EXPECT_EQ(ragged.shape[indexes], 9); EXPECT_EQ(ragged[indexes], 3); } { std::vector<int32_t> indexes = {2, 0, 0, 1}; EXPECT_EQ(ragged.shape[indexes], 14); EXPECT_EQ(ragged[indexes], 4); } { std::vector<int32_t> indexes = {2, 0, 2, 0}; EXPECT_EQ(ragged.shape[indexes], 15); EXPECT_EQ(ragged[indexes], 8); } } const std::vector<std::vector<int32_t>> row_splits_vec = { row_splits1, row_splits2, row_splits3}; // test To(ctx) { // to GPU Ragged<T> other = ragged.To(GetCudaContext()); CheckRowSplits(other.shape, row_splits_vec); CheckArrayData<T>(other.values, values_vec); } { // to CPU Ragged<T> other = ragged.To(GetCpuContext()); CheckRowSplits(other.shape, row_splits_vec); CheckArrayData<T>(other.values, values_vec); } } } template <typename T, typename OP = LessThan<T>> static void CpuSortSublists(const Array1<int32_t> &row_splits, Array1<T> *src) { K2_CHECK(src->Context()->GetDeviceType() == kCpu); T *p = src->Data(); OP comp = OP(); for (int32_t i = 0; i < row_splits.Dim() - 1; ++i) { int32_t cur = row_splits[i]; int32_t next = row_splits[i + 1]; std::sort(p + cur, p + next, comp); } } template <typename T, typename OP = LessThan<T>> static void TestSortSublists() { auto cpu_context = GetCpuContext(); auto cuda_context = GetCudaContext(); RaggedShape shape = RandomRaggedShape(false, // set_row_ids 2, // min_num_axes 4, // max_num_axes 1, // min_num_elements 2000); // max_num_elements Array1<T> values = RandUniformArray1<T>(shape.Context(), shape.NumElements(), -2000, 2000); Ragged<T> ragged(shape, values); ragged = ragged.To(cuda_context); values = values.To(cpu_context); // to be sorted by cpu // TODO(fangjun): add a `Clone` method to Array1<T> Array1<T> unsorted = values.To(cuda_context).To(cpu_context); Array1<int32_t> order(ragged.Context(), ragged.values.Dim()); SortSublists<T, OP>(&ragged, &order); Array1<int32_t> &segment = ragged.shape.RowSplits(ragged.NumAxes() - 1); CpuSortSublists<T, OP>(segment, &values); int32_t n = order.Dim(); for (int i = 0; i != n; ++i) { EXPECT_EQ(values[i], ragged.values[i]); EXPECT_EQ(ragged.values[i], unsorted[order[i]]); } } TEST(RaggedTest, Ragged) { TestRagged<int32_t, kCuda>(); TestRagged<int32_t, kCpu>(); TestRagged<double, kCuda>(); TestRagged<double, kCpu>(); TestSortSublists<int32_t>(); TestSortSublists<double>(); } template <DeviceType d> void TestAppend() { ContextPtr cpu = GetCpuContext(); // will use to copy data ContextPtr context = nullptr; if (d == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(d, kCuda); context = GetCudaContext(); } { // simple case std::vector<RaggedShape> shapes(2); std::vector<RaggedShape *> shapes_ptr(2); std::vector<std::vector<Array1<int32_t>>> row_splits_vec(2); { const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); row_splits_vec[0].push_back(splits1); row_splits_vec[1].push_back(splits2); shapes[0] = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); shapes_ptr[0] = &shapes[0]; } { const std::vector<int32_t> row_splits1 = {0, 1, 3, 4}; const std::vector<int32_t> row_ids1 = {0, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 3, 4, 5, 7}; const std::vector<int32_t> row_ids2 = {0, 0, 0, 1, 2, 3, 3}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); row_splits_vec[0].push_back(splits1); row_splits_vec[1].push_back(splits2); RaggedShape shape = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); shapes[1] = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); shapes_ptr[1] = &shapes[1]; } { // axis == 1 RaggedShape result = Append(1, 2, shapes_ptr.data()); std::vector<std::vector<int32_t>> expected_row_splits = { {0, 3, 8, 10}, {0, 2, 3, 6, 7, 9, 10, 11, 12, 15, 17}}; std::vector<std::vector<int32_t>> expected_row_ids = { {0, 0, 0, 1, 1, 1, 1, 1, 2, 2}, {0, 0, 1, 2, 2, 2, 3, 4, 4, 5, 6, 7, 8, 8, 8, 9, 9}}; for (int32_t i = 0; i < 2; ++i) { CheckArrayData(result.RowSplits(i + 1), expected_row_splits[i]); CheckArrayData(result.RowIds(i + 1), expected_row_ids[i]); } } { // axis == 0 RaggedShape result = Append(0, 2, shapes_ptr.data()); // get result splits with `SpliceRowSplits` and get result row-ids with // `RowSplitsToRowIds`` std::vector<Array1<int32_t>> result_splits; std::vector<Array1<int32_t>> result_ids; for (auto i = 0; i < 2; ++i) { std::vector<const Array1<int32_t> *> splits_ptr = { &row_splits_vec[i][0], &row_splits_vec[i][1]}; Array1<int32_t> curr_row_splits = SpliceRowSplits(2, splits_ptr.data()); result_splits.push_back(curr_row_splits); Array1<int32_t> curr_row_ids(context, curr_row_splits.Back()); RowSplitsToRowIds(curr_row_splits, &curr_row_ids); result_ids.push_back(curr_row_ids); } for (int32_t i = 0; i < 2; ++i) { CheckArrayData(result.RowSplits(i + 1), result_splits[i]); CheckArrayData(result.RowIds(i + 1), result_ids[i]); } } } { // test with random large size for (int32_t i = 0; i < 2; ++i) { int32_t num_shape = RandInt(2, 100); int32_t num_axes = RandInt(2, 4); std::vector<RaggedShape> shape_vec(num_shape); std::vector<RaggedShape *> shapes(num_shape); for (int32_t j = 0; j != num_shape; ++j) { shape_vec[j] = RandomRaggedShape(true, num_axes, num_axes, 0, 1000).To(context); shapes[j] = &shape_vec[j]; } // only test case axis == 0, test axis==1 with simple case is good enough // as it just calls Stack RaggedShape result = Append(0, num_shape, shapes.data()); ASSERT_EQ(result.NumAxes(), num_axes); // get result splits with `SpliceRowSplits` and get result row-ids with // `RowSplitsToRowIds`` std::vector<Array1<int32_t>> result_splits; std::vector<Array1<int32_t>> result_ids; for (int32_t axis = 1; axis < num_axes; ++axis) { std::vector<Array1<int32_t>> splits_vec(num_shape); std::vector<const Array1<int32_t> *> splits_vec_ptr(num_shape); for (int32_t n = 0; n != num_shape; ++n) { splits_vec[n] = shape_vec[n].RowSplits(axis); splits_vec_ptr[n] = &splits_vec[n]; } Array1<int32_t> curr_row_splits = SpliceRowSplits(num_shape, splits_vec_ptr.data()); result_splits.push_back(curr_row_splits); Array1<int32_t> curr_row_ids(context, curr_row_splits.Back()); RowSplitsToRowIds(curr_row_splits, &curr_row_ids); result_ids.push_back(curr_row_ids); } // check data for (int32_t axis = 1; axis < num_axes; ++axis) { CheckArrayData(result.RowSplits(axis), result_splits[axis - 1]); CheckArrayData(result.RowIds(axis), result_ids[axis - 1]); } } } } TEST(RaggedShapeOpsTest, TestAppend) { TestAppend<kCpu>(); TestAppend<kCuda>(); } template <DeviceType d, typename T> void TestAppendRagged() { ContextPtr cpu = GetCpuContext(); // will use to copy data ContextPtr context = nullptr; if (d == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(d, kCuda); context = GetCudaContext(); } // TODO(haowen): remove duplicate code in TestAppend above. // test with simple case could be good enough, as we have tested // Append(RaggedShape&) already. std::vector<Ragged<T>> ragged_vec(2); std::vector<Ragged<T> *> ragged(2); std::vector<std::vector<Array1<int32_t>>> row_splits_vec(2); { const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; const std::vector<T> values_vec = {1, 2, 5, 7, 9, 10, 12, 14, 15, 18}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); RaggedShape shape = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); Array1<T> values(context, values_vec); ragged_vec[0] = Ragged<T>(shape, values); ragged[0] = &ragged_vec[0]; } { const std::vector<int32_t> row_splits1 = {0, 1, 3, 4}; const std::vector<int32_t> row_ids1 = {0, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 3, 4, 5, 7}; const std::vector<int32_t> row_ids2 = {0, 0, 0, 1, 2, 3, 3}; const std::vector<T> values_vec = {20, 21, 23, 28, 30, 32, 35}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); RaggedShape shape = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); Array1<T> values(context, values_vec); ragged_vec[1] = Ragged<T>(shape, values); ragged[1] = &ragged_vec[1]; } { // axis == 0 Ragged<T> result = Append(0, 2, ragged.data()); std::vector<std::vector<int32_t>> expected_row_splits = { {0, 2, 5, 6, 7, 9, 10}, {0, 2, 3, 4, 6, 7, 10, 13, 14, 15, 17}}; std::vector<std::vector<int32_t>> expected_row_ids = { {0, 0, 1, 1, 1, 2, 3, 4, 4, 5}, {0, 0, 1, 2, 3, 3, 4, 5, 5, 5, 6, 6, 6, 7, 8, 9, 9}}; for (int32_t i = 0; i < 2; ++i) { CheckArrayData(result.RowSplits(i + 1), expected_row_splits[i]); CheckArrayData(result.RowIds(i + 1), expected_row_ids[i]); } std::vector<T> expected_data = {1, 2, 5, 7, 9, 10, 12, 14, 15, 18, 20, 21, 23, 28, 30, 32, 35}; CheckArrayData(result.values, expected_data); } { // axis == 1 Ragged<T> result = Append(1, 2, ragged.data()); std::vector<std::vector<int32_t>> expected_row_splits = { {0, 3, 8, 10}, {0, 2, 3, 6, 7, 9, 10, 11, 12, 15, 17}}; std::vector<std::vector<int32_t>> expected_row_ids = { {0, 0, 0, 1, 1, 1, 1, 1, 2, 2}, {0, 0, 1, 2, 2, 2, 3, 4, 4, 5, 6, 7, 8, 8, 8, 9, 9}}; for (int32_t i = 0; i < 2; ++i) { CheckArrayData(result.RowSplits(i + 1), expected_row_splits[i]); CheckArrayData(result.RowIds(i + 1), expected_row_ids[i]); } std::vector<T> expected_data = {1, 2, 5, 20, 21, 23, 7, 9, 10, 12, 28, 30, 14, 15, 18, 32, 35}; CheckArrayData(result.values, expected_data); } } TEST(RaggedTest, TestAppendRagged) { TestAppendRagged<kCpu, int32_t>(); TestAppendRagged<kCuda, int32_t>(); TestAppendRagged<kCpu, double>(); TestAppendRagged<kCuda, double>(); } void CheckResultOfRenumber(const ContextPtr &context, RaggedShape &shape, Array1<int32_t> &new2old, RaggedShape &result) { ContextPtr cpu = GetCpuContext(); // will use to copy data int32_t num_axes = shape.NumAxes(); int32_t dim0 = shape.Dim0(); if (dim0 == 0) { std::vector<int32_t> empty_row_splits = {0}; for (int32_t i = 0; i < num_axes - 1; ++i) { CheckArrayData(result.RowSplits(i + 1), empty_row_splits); EXPECT_EQ(result.RowIds(i + 1).Dim(), 0); } return; } Array2<int32_t> old_offsets(context, num_axes, dim0 + 1); auto old_offsets_acc = old_offsets.Accessor(); Array1<int32_t *> row_splits_ptrs = GetRowSplitsPtr(shape); int32_t **row_splits_ptrs_data = row_splits_ptrs.Data(); // Set old_offsets auto lambda_get_old_offsets = [=] __host__ __device__(int32_t i) { // 0 <= i <= dim0 int32_t cur_offset = i; for (int32_t axis = 0; axis < num_axes; axis++) { old_offsets_acc(axis, i) = cur_offset; if (axis + 1 == num_axes) return; cur_offset = row_splits_ptrs_data[axis][cur_offset]; } }; Eval(context, dim0 + 1, lambda_get_old_offsets); old_offsets = old_offsets.To(cpu); auto cpu_offsets_acc = old_offsets.Accessor(); shape = shape.To(cpu); new2old = new2old.To(cpu); // get result splits with `SpliceRowSplits` and get result row-ids with // `RowSplitsToRowIds`` std::vector<Array1<int32_t>> result_splits; std::vector<Array1<int32_t>> result_ids; for (auto axis = 0; axis < num_axes - 1; ++axis) { Array1<int32_t> curr_row_splits = shape.RowSplits(axis + 1); std::vector<Array1<int32_t>> splits_vec(dim0); std::vector<const Array1<int32_t> *> splits_vec_ptr(dim0); for (int32_t m = 0; m != dim0; ++m) { int32_t old_idx = new2old[m]; int32_t start = cpu_offsets_acc(axis, old_idx); int32_t end = cpu_offsets_acc(axis, old_idx + 1); Array1<int32_t> sub_list = curr_row_splits.Range(start, end - start + 1); Array1<int32_t> copy_sub_list(cpu, sub_list.Dim()); copy_sub_list.CopyFrom(sub_list); int32_t *data = copy_sub_list.Data(); int32_t init = data[0]; for (int32_t n = 0; n != copy_sub_list.Dim(); ++n) { data[n] -= init; } splits_vec[m] = copy_sub_list; splits_vec_ptr[m] = &splits_vec[m]; } Array1<int32_t> result_row_splits = SpliceRowSplits(dim0, splits_vec_ptr.data()); result_splits.push_back(result_row_splits); Array1<int32_t> result_row_ids(cpu, result_row_splits.Back()); RowSplitsToRowIds(result_row_splits, &result_row_ids); result_ids.push_back(result_row_ids); } for (int32_t i = 0; i < num_axes - 1; ++i) { CheckArrayData(result.RowSplits(i + 1), result_splits[i]); CheckArrayData(result.RowIds(i + 1), result_ids[i]); } } template <DeviceType d> void TestRenumber() { ContextPtr cpu = GetCpuContext(); // will use to copy data ContextPtr context = nullptr; if (d == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(d, kCuda); context = GetCudaContext(); } { // simple case const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; // std::vector<std::vector<int32_t>> expected_row_splits = { // {0, 3, 4, 6}, {0, 1, 3, 4, 7, 9, 10}}; // std::vector<std::vector<int32_t>> expected_row_ids = { // {0, 0, 0, 1, 2, 2}, {0, 1, 1, 2, 3, 3, 3, 4, 4, 5}}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); RaggedShape shape = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); std::vector<int32_t> new2old_vec = {1, 2, 0}; Array1<int32_t> new2old(context, new2old_vec); RaggedShape result = Renumber(shape, new2old); CheckResultOfRenumber(context, shape, new2old, result); } { // test with random large size for (int32_t i = 0; i < 2; ++i) { int32_t num_axes = RandInt(2, 4); RaggedShape shape = RandomRaggedShape(true, num_axes, num_axes, 0, 1000).To(context); int32_t dim0 = shape.Dim0(); std::vector<int32_t> new2old_vec(dim0); std::iota(new2old_vec.begin(), new2old_vec.end(), 0); std::random_device rd; std::mt19937 g(rd()); std::shuffle(new2old_vec.begin(), new2old_vec.end(), g); Array1<int32_t> new2old(context, new2old_vec); RaggedShape result = Renumber(shape, new2old); CheckResultOfRenumber(context, shape, new2old, result); } } } TEST(RaggedShapeOpsTest, TestRenumber) { TestRenumber<kCpu>(); TestRenumber<kCuda>(); } TEST(GetTransposeReordering, NoDuplicates) { // 0 0 0 9 2 // 5 8 0 0 1 // 0 0 3 0 0 // 0 6 0 0 0 std::vector<int32_t> col_indexes{3, 4, 0, 1, 4, 2, 1}; std::vector<int32_t> _row_splits{0, 2, 5, 6, 7}; for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> row_splits(context, _row_splits); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); Array1<int32_t> values(context, col_indexes); Ragged<int32_t> ragged(shape, values); Array1<int32_t> order = GetTransposeReordering(ragged, 5); // index 0 1 2 3 4 5 6 // it maps 9 2 5 8 1 3 6 to // 5 8 6 3 9 2 1 // so it returns // 2 3 6 5 0 1 4 CheckArrayData(order, {2, 3, 6, 5, 0, 1, 4}); EXPECT_TRUE(context->IsCompatible(*order.Context())); } } TEST(GetTransposeReordering, WithDuplicates) { // 0 0 0 (9,9,9) // 5 8 0 0 // 0 0 (3,3) 0 // 0 6 0 0 std::vector<int32_t> col_indexes{3, 3, 3, 0, 1, 2, 2, 1}; std::vector<int32_t> _row_splits{0, 3, 5, 7, 8}; for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> row_splits(context, _row_splits); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); Array1<int32_t> values(context, col_indexes); Ragged<int32_t> ragged(shape, values); Array1<int32_t> order = GetTransposeReordering(ragged, 4); // index 0 1 2 3 4 5 6 7 // it maps 9 9 9 5 8 3 3 6 to // 5 8 6 3 3 9 9 9 // so it returns // 3 4 7 5 6 0 1 2 Note that it is stable CheckArrayData(order, {3, 4, 7, 5, 6, 0, 1, 2}); EXPECT_TRUE(context->IsCompatible(*order.Context())); } } template <DeviceType d> void TestGetCountsPartitioned() { ContextPtr cpu = GetCpuContext(); // will use to copy data ContextPtr context = nullptr; if (d == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(d, kCuda); context = GetCudaContext(); } // Testing with simple case is good enough as we have tested GetCounts() with // random large size and GetCountsPartitioned just calls GetCounts. std::vector<int32_t> src_row_splits_vec = {0, 3, 4, 6, 10}; Array1<int32_t> src_row_splits(context, src_row_splits_vec); RaggedShape src_shape = RaggedShape2(&src_row_splits, nullptr, -1); std::vector<int32_t> src_values_vec = {0, 1, 0, 2, 5, 5, 7, 7, 9, 7}; Array1<int32_t> src_values(context, src_values_vec); Ragged<int32_t> src(src_shape, src_values); std::vector<int32_t> ans_row_splits_vec = {0, 2, 4, 7, 10}; Array1<int32_t> ans_row_splits(context, ans_row_splits_vec); RaggedShape ans_shape = RaggedShape2(&ans_row_splits, nullptr, -1); Ragged<int32_t> result = GetCountsPartitioned(src, ans_shape); ASSERT_EQ(result.NumAxes(), 2); // Check row_splits Array1<int32_t> row_splits = result.shape.RowSplits(1).To(cpu); std::vector<int32_t> result_row_splits(row_splits.Data(), row_splits.Data() + row_splits.Dim()); EXPECT_EQ(result_row_splits, ans_row_splits_vec); // check values std::vector<int32_t> expected_data = {2, 1, 1, 0, 0, 2, 0, 3, 0, 1}; Array1<int32_t> values = result.values.To(cpu); std::vector<int32_t> data(values.Data(), values.Data() + values.Dim()); EXPECT_EQ(data, expected_data); } TEST(RaggedShapeOpsTest, TestGetCountsPartitioned) { TestGetCountsPartitioned<kCpu>(); TestGetCountsPartitioned<kCuda>(); } template <DeviceType d> void TestStack() { ContextPtr cpu = GetCpuContext(); // will use to copy data ContextPtr context = nullptr; if (d == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(d, kCuda); context = GetCudaContext(); } { // simple case std::vector<RaggedShape> shapes(2); std::vector<RaggedShape *> shapes_ptr(2); std::vector<std::vector<Array1<int32_t>>> row_splits_vec(2); { const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> splits2(context, row_splits2); row_splits_vec[0].push_back(splits1); row_splits_vec[1].push_back(splits2); shapes[0] = RaggedShape3(&splits1, nullptr, -1, &splits2, nullptr, -1); shapes_ptr[0] = &shapes[0]; } { const std::vector<int32_t> row_splits1 = {0, 1, 3, 4}; const std::vector<int32_t> row_splits2 = {0, 3, 4, 5, 7}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> splits2(context, row_splits2); row_splits_vec[0].push_back(splits1); row_splits_vec[1].push_back(splits2); shapes[1] = RaggedShape3(&splits1, nullptr, -1, &splits2, nullptr, -1); shapes_ptr[1] = &shapes[1]; } std::vector<std::vector<int32_t>> expected_row_splits = { {0, 3, 6}, {0, 2, 5, 6, 7, 9, 10}, {0, 2, 3, 4, 6, 7, 10, 13, 14, 15, 17}}; { // axis == 0 int32_t axis = 0; RaggedShape result = Stack(axis, 2, shapes_ptr.data()); for (int32_t i = 0; i != 3; ++i) { CheckArrayData(result.RowSplits(i + 1), expected_row_splits[i]); } } { // axis == 1 int32_t axis = 1; RaggedShape result = Stack(axis, 2, shapes_ptr.data()); RaggedShape transpose = Transpose(result); for (int32_t i = 0; i != 3; ++i) { CheckArrayData(transpose.RowSplits(i + 1), expected_row_splits[i]); } } } { // test with random large size for (int32_t m = 0; m < 2; ++m) { int32_t num_shape = RandInt(2, 100); int32_t num_axes = RandInt(2, 4); int32_t dim0 = RandInt(1, 100); std::vector<RaggedShape> shape_vec(num_shape); std::vector<RaggedShape *> shapes(num_shape); for (int32_t j = 0; j != num_shape; ++j) { RaggedShape shape = RandomRaggedShape(false, num_axes, num_axes, 0, 1000).To(context); int32_t src_dim0 = shape.Dim0(); std::vector<int32_t> row_splits_vec(dim0 + 1); row_splits_vec[0] = 0; for (int32_t n = 1; n < dim0; ++n) { row_splits_vec[n] = RandInt(0, src_dim0); } row_splits_vec[dim0] = src_dim0; std::sort(row_splits_vec.begin(), row_splits_vec.end()); Array1<int32_t> row_splits(context, row_splits_vec); RaggedShape first = RaggedShape2(&row_splits, nullptr, -1); RaggedShape new_shape = ComposeRaggedShapes(first, shape); shape_vec[j] = new_shape; shapes[j] = &shape_vec[j]; } std::vector<RaggedShape> cpu_shapes(num_shape); for (auto i = 0; i != num_shape; ++i) { cpu_shapes[i] = shape_vec[i].To(cpu); } { // axis == 0 int32_t axis = 0; RaggedShape result = Stack(axis, num_shape, shapes.data()); ASSERT_EQ(result.NumAxes(), num_axes + 2); // note we append one axis in each shape in // `shapes` before `Stack` ASSERT_EQ(result.Dim0(), num_shape); result = result.To(cpu); for (auto iter = result.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); int32_t t = result[index]; // don't need the value, just make sure // it's a valid index. int32_t i = index[0]; index.erase(index.begin()); // result[i,j,k,l] = (shape[i])[j,k,l] i = cpu_shapes[i][index]; // don't need the value, just need to make // sure it's an allowable index. } } { // axis == 1 int32_t axis = 1; RaggedShape result = Stack(axis, num_shape, shapes.data()); ASSERT_EQ(result.NumAxes(), num_axes + 2); // note we append one axis in each shape in // `shapes` before `Stack` ASSERT_EQ(result.Dim0(), dim0); result = result.To(cpu); for (auto iter = result.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); int32_t t = result[index]; // don't need the value, just make sure // it's a valid index. int32_t i = index[1]; index.erase(index.begin() + 1); // result[i,j,k,l] = (shape[j])[i,k,l] i = cpu_shapes[i][index]; // don't need the value, just need to make // sure it's an allowable index. } } } } } TEST(RaggedShapeOpsTest, TestStack) { TestStack<kCpu>(); TestStack<kCuda>(); } template <DeviceType d, typename T> void TestStackRagged() { ContextPtr cpu = GetCpuContext(); // will use to copy data ContextPtr context = nullptr; if (d == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(d, kCuda); context = GetCudaContext(); } // test with random large size for (int32_t m = 0; m < 2; ++m) { int32_t num_shape = RandInt(2, 100); int32_t num_axes = RandInt(2, 4); int32_t dim0 = RandInt(1, 100); std::vector<Ragged<T>> ragged_vec(num_shape); std::vector<Ragged<T> *> ragged(num_shape); for (int32_t j = 0; j != num_shape; ++j) { RaggedShape shape = RandomRaggedShape(false, num_axes, num_axes, 0, 1000).To(context); int32_t src_dim0 = shape.Dim0(); std::vector<int32_t> row_splits_vec(dim0 + 1); row_splits_vec[0] = 0; for (int32_t n = 1; n < dim0; ++n) { row_splits_vec[n] = RandInt(0, src_dim0); } row_splits_vec[dim0] = src_dim0; std::sort(row_splits_vec.begin(), row_splits_vec.end()); Array1<int32_t> row_splits(context, row_splits_vec); RaggedShape first = RaggedShape2(&row_splits, nullptr, -1); RaggedShape new_shape = ComposeRaggedShapes(first, shape); int32_t num_elems = new_shape.NumElements(); Array1<T> src_values = RandUniformArray1<T>(context, num_elems, 0, 10000); ragged_vec[j] = Ragged<T>(new_shape, src_values); ragged[j] = &ragged_vec[j]; } std::vector<Ragged<T>> cpu_ragged_vec(num_shape); for (auto j = 0; j != num_shape; ++j) { cpu_ragged_vec[j] = ragged_vec[j].To(cpu); } { // axis == 0 int32_t axis = 0; Ragged<T> result = Stack(axis, num_shape, ragged.data()); ASSERT_EQ(result.NumAxes(), num_axes + 2); // note we append one axis in each shape in // `shapes` before `Stack` ASSERT_EQ(result.Dim0(), num_shape); result = result.To(cpu); RaggedShape &shape = result.shape; for (auto iter = shape.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); T value = result[index]; int32_t i = index[0]; index.erase(index.begin()); // result[i,j,k,l] = (shape[i])[j,k,l] EXPECT_EQ(value, cpu_ragged_vec[i][index]); } } { // axis == 1 int32_t axis = 1; Ragged<T> result = Stack(axis, num_shape, ragged.data()); ASSERT_EQ(result.NumAxes(), num_axes + 2); // note we append one axis in each shape in // `shapes` before `Stack` ASSERT_EQ(result.Dim0(), dim0); result = result.To(cpu); RaggedShape &shape = result.shape; for (auto iter = shape.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); T value = result[index]; int32_t j = index[1]; index.erase(index.begin() + 1); // result[i,j,k,l] = (shape[j])[i,k,l] EXPECT_EQ(value, cpu_ragged_vec[j][index]); } } } } TEST(RaggedTest, TestStackRagged) { TestStackRagged<kCpu, int32_t>(); TestStackRagged<kCuda, int32_t>(); TestStackRagged<kCpu, double>(); TestStackRagged<kCuda, double>(); } template <DeviceType d> void TestMakeTransposable() { ContextPtr cpu = GetCpuContext(); // will use to copy data ContextPtr context = nullptr; if (d == kCpu) { context = GetCpuContext(); } else { K2_CHECK_EQ(d, kCuda); context = GetCudaContext(); } { // simple case const std::vector<int32_t> row_splits1 = {0, 2, 5, 6, 8}; // const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2, 3, 3}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10, 12, 13}; // const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5, 6, // 6, 7}; Array1<int32_t> row_splits1_array(context, row_splits1); Array1<int32_t> row_splits2_array(context, row_splits2); RaggedShape shape = RaggedShape3(&row_splits1_array, nullptr, -1, &row_splits2_array, nullptr, -1); std::vector<std::vector<int32_t>> expected_row_splits = { {0, 3, 6, 9, 12}, {0, 2, 3, 3, 4, 6, 7, 10, 10, 10, 12, 13, 13}}; std::vector<std::vector<int32_t>> expected_row_ids = { {0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3}, {0, 0, 1, 3, 4, 4, 5, 6, 6, 6, 9, 9, 10}}; RaggedShape result = MakeTransposable(shape); for (int32_t i = 1; i != 3; ++i) { CheckArrayData(result.RowSplits(i), expected_row_splits[i - 1]); CheckArrayData(result.RowIds(i), expected_row_ids[i - 1]); } } { // test with random large size for (int32_t i = 0; i < 2; ++i) { int32_t num_axes = RandInt(2, 4); RaggedShape shape = RandomRaggedShape(true, num_axes, num_axes, 0, 1000).To(context); int32_t dim0 = shape.Dim0(); int32_t max_size = shape.MaxSize(1); RaggedShape result = MakeTransposable(shape); shape = shape.To(cpu); result = result.To(cpu); EXPECT_EQ(result.Dim0(), dim0); EXPECT_EQ(result.TotSize(1), dim0 * max_size); // check if every sub list in axis 1 has the same size int32_t *row_splits1 = result.RowSplits(1).Data(); for (int32_t j = 0; j != dim0 + 1; ++j) { EXPECT_EQ(row_splits1[j], j * max_size); } if (num_axes > 2) { for (auto iter = shape.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); EXPECT_EQ(shape[index], result[index]); } } } } } TEST(RaggedShapeOpsTest, TestMakeTransposable) { TestMakeTransposable<kCpu>(); TestMakeTransposable<kCuda>(); } } // namespace k2
the_stack
#include <cstdio> #include <utility_kernels_pose.h> namespace pose { // number of median reductions performed within each block // implies that we have 3^MED_BLOCK_LEVELS threads const int MED_BLOCK_LEVELS = 5; const int MED_BLOCK_SIZE = 243; texture<float, cudaTextureType2D, cudaReadModeElementType> d_Zbuffer_texture; texture<unsigned int, 1, cudaReadModeElementType> labelsTexture; // Convert Zbuffer for initial Z model construction static __global__ void convert_Zbuffer_to_Z_GPU(float *d_Z, int n_cols, int n_rows, float Z_conv1, float Z_conv2, float floatnan) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if ((x < n_cols) & (y < n_rows)) // are we in the image? { // determine output linear index unsigned int ind = x + y * n_cols; // determine gl coord float xt = (float)x + 0.5f; float yt = (float)y + 0.5f; float Zbuffer = tex2D(d_Zbuffer_texture, xt, yt); d_Z[ind] = (Zbuffer > 0.0f) ? (__fdividef(Z_conv1, Zbuffer + Z_conv2)) : floatnan; } } // Convert Zbuffer to disparity static __global__ void convert_Zbuffer_to_Disparity_GPU(float *d_Disparity, int n_cols, int n_rows, int pitch, float D_conv1, float D_conv2, float floatnan) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if ((x < n_cols) & (y < n_rows)) // are we in the image? { // determine output linear index // unsigned int ind = x + y*n_cols; // determine gl coord float xt = (float)x + 0.5f; float yt = (float)y + 0.5f; float Zbuffer = tex2D(d_Zbuffer_texture, xt, yt); *((float *)((char *)d_Disparity + y *pitch) + x) = (Zbuffer > 0.0f) ? (D_conv1 * Zbuffer + D_conv2) : floatnan; } } __global__ void convertPointCloudToDepthImage_kernel( unsigned int *depth_image, const float4 *point_cloud, int n_cols, int n_rows, int n_points, float nodal_point_x, float nodal_point_y, float focal_length_x, float focal_length_y, const float *T, const float *R) { const int ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind < n_points) { // fetch point float4 point = point_cloud[ind]; // transform to camera frame float x = R[0] * point.x + R[1] * point.y + R[2] * point.z + T[0]; float y = R[3] * point.x + R[4] * point.y + R[5] * point.z + T[1]; float z = R[6] * point.x + R[7] * point.y + R[8] * point.z + T[2]; float inv_z = 1.0f / z; // project in image int x_pix = __float2int_rn(focal_length_x * x * inv_z + nodal_point_x); int y_pix = __float2int_rn(focal_length_y * y * inv_z + nodal_point_y); // check if inside image bool valid = ((x_pix >= 0) && (x_pix < n_cols) && (y_pix >= 0) && (y_pix < n_rows)); if (valid) { int ind_out = y_pix * n_cols + x_pix; // depth_image[ind_out] = (unsigned int)(point.z * 1000.0f); atomicMin(depth_image + ind_out, (unsigned int)(point.z * 1000.0f)); } } } __global__ void initializeToValue_kernel(unsigned int *data, unsigned int value, int width, int height) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < width && y < height) { data[y * width + x] = value; } } __global__ void convertDepthImageToMeter_kernel(float *d_depth_image_meter, const unsigned int *d_depth_image_millimeter, int n_rows, int n_cols) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < n_cols && y < n_rows) { int ind = y * n_cols + x; unsigned int depth = d_depth_image_millimeter[ind]; d_depth_image_meter[ind] = (depth == 4294967295) ? nanf("") : (float)depth / 1000.0f; } } __global__ void colorValidationDepthImageMatches_kernel( uchar4 *out_image, const float *depth_image, int width, int height, float Z_conv1, float Z_conv2, float max_error, float llim_depth, float ulim_depth) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if ((x < width) & (y < height)) // are we in the image? { // determine i/o linear index unsigned int ind = x + y * width; // determine gl coord float xt = (float)x + 0.5f; float yt = (float)y + 0.5f; float Zbuffer = tex2D(d_Zbuffer_texture, xt, yt); bool model_present = Zbuffer > 0.0f; float Z_measured = depth_image[ind]; float Z_estimated = __fdividef(Z_conv1, Zbuffer + Z_conv2); bool validated = fabsf(Z_estimated - Z_measured) <= max_error; unsigned char output_intensity = 255 * (Z_measured - llim_depth) / (ulim_depth - llim_depth); uchar4 outpixel; outpixel.x = output_intensity; outpixel.y = output_intensity; outpixel.z = output_intensity; outpixel.w = 255; if (model_present) { outpixel.x = validated ? 0 : output_intensity; outpixel.y = validated ? output_intensity : 0; outpixel.z = 0; } out_image[ind] = outpixel; } } // Approximate median with data shuffling // The input data is shuffled (with replacement) according to the random // numbers, n_in is the size of the input data (no limit) __global__ void median_reduce_shuffle_gpu(const float *d_in, float *d_out, float *d_random_numbers, int n_in) { /**************/ /* initialize */ /**************/ // compute indices int t_ind = threadIdx.x; int g_ind = blockIdx.x * MED_BLOCK_SIZE + t_ind; // allocate shared memory __shared__ float DATA[MED_BLOCK_SIZE]; /**************/ /* load stage */ /**************/ int sample_ind = floorf(d_random_numbers[g_ind] * (float)n_in); DATA[t_ind] = d_in[sample_ind]; __syncthreads(); /*******************/ /* reduction stage */ /*******************/ for (int s = 1; s < MED_BLOCK_SIZE; s *= 3) { int index = 3 * s * t_ind; if (index < MED_BLOCK_SIZE) { // fetch three values float value1 = DATA[index]; float value2 = DATA[index + s]; float value3 = DATA[index + 2 * s]; // extract the middle value (median) float smallest = fminf(value1, value2); value2 = fmaxf(value1, value2); value1 = smallest; value3 = fmaxf(value1, value3); value2 = fminf(value2, value3); DATA[index] = value2; } __syncthreads(); } /***************/ /* write stage */ /***************/ // write this block's approx median (first element) if (t_ind == 0) { d_out[blockIdx.x] = DATA[0]; } } // Approximate median with data shuffling // The input data is shuffled (with replacement) according to the random // numbers, n_in is the size of the input data (no limit) __global__ void multiple_median_reduce_shuffle_gpu(const float *d_in, float *d_out, const float *d_random_numbers, const int *d_start_inds, const int *d_n_in) { /**************/ /* initialize */ /**************/ int segment = blockIdx.y; // compute indices int t_ind = threadIdx.x; int g_ind = blockIdx.x * MED_BLOCK_SIZE + t_ind; // means that every row of blocks uses the same random numbers // allocate shared memory // __shared__ float DATA[MED_BLOCK_SIZE]; __shared__ float DATA[256]; /**************/ /* load stage */ /**************/ if (t_ind < MED_BLOCK_SIZE) { int sample_ind = d_start_inds[segment] + floorf(d_random_numbers[g_ind] * (float)d_n_in[segment]); DATA[t_ind] = d_in[sample_ind]; } __syncthreads(); /*******************/ /* reduction stage */ /*******************/ for (int s = 1; s < MED_BLOCK_SIZE; s *= 3) { int index = 3 * s * t_ind; if (index < MED_BLOCK_SIZE) { // fetch three values float value1 = DATA[index]; float value2 = DATA[index + s]; float value3 = DATA[index + 2 * s]; // extract the middle value (median) float smallest = fminf(value1, value2); value2 = fmaxf(value1, value2); value1 = smallest; value3 = fmaxf(value1, value3); value2 = fminf(value2, value3); DATA[index] = value2; } __syncthreads(); } /***************/ /* write stage */ /***************/ // write this block's approx median (first element) if (t_ind == 0) { d_out[gridDim.x * blockIdx.y + blockIdx.x] = DATA[0]; } } // writes the index of the first occurence of label l in labels into // starting_indices __global__ void extractLabelStartingIndicesGPU(int *starting_indices, unsigned int *labels, int n_labels) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; if (x < n_labels) { unsigned int curr_label = tex1Dfetch(labelsTexture, x); if ((x == 0) || (curr_label != tex1Dfetch(labelsTexture, x - 1))) starting_indices[curr_label] = x; } } /////////////////////// // // // Calling functions // // // /////////////////////// int divUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } // void get_GL_conv_constants(float &Z_conv1, float &Z_conv2, int n_cols, int // n_rows, float nodal_point_x, float nodal_point_y, float far_plane, float // near_plane) { void get_GL_conv_constants(float &Z_conv1, float &Z_conv2, float far_plane, float near_plane) { double f = (double)(far_plane); double n = (double)(near_plane); Z_conv1 = (float)((-f * n) / (f - n)); Z_conv2 = (float)(-(f + n) / (2 * (f - n)) - 0.5); } void convertZbufferToZ(float *d_Z, cudaArray *d_ZbufferArray, int n_cols, int n_rows, float nodal_point_x, float nodal_point_y, float near_plane, float far_plane) { // Determine Zbuffer conversion constants // depth = Z_conv1/(Zbuffer+Z_conv2) float Z_conv1, Z_conv2; get_GL_conv_constants(Z_conv1, Z_conv2, far_plane, near_plane); // Bind textures to arrays cudaChannelFormatDesc channelFloat = cudaCreateChannelDesc<float>(); cudaBindTextureToArray(d_Zbuffer_texture, d_ZbufferArray, channelFloat); // Generate nan; // unsigned long raw = 0x7FFFFFFF; // float floatnan = *(float *)&raw; float floatnan = nanf(""); // Convert Zbuffer dim3 TB(16, 16, 1); dim3 BG(divUp(n_cols, TB.x), divUp(n_rows, TB.y)); convert_Zbuffer_to_Z_GPU << <BG, TB>>> (d_Z, n_cols, n_rows, Z_conv1, Z_conv2, floatnan); } void convertZbufferToDisparity(float *d_Disparity, cudaArray *d_ZbufferArray, int n_cols, int n_rows, int pitch, float nodal_point_x, float nodal_point_y, float near_plane, float far_plane, float focal_length, float baseline) { // Determine Zbuffer conversion constants // depth = Z_conv1/(Zbuffer+Z_conv2) float Z_conv1, Z_conv2; get_GL_conv_constants(Z_conv1, Z_conv2, far_plane, near_plane); // modify conversion constants float D_conv1 = (float)((double)(-focal_length * baseline) / (double)Z_conv1); float D_conv2 = D_conv1 * Z_conv2; // Bind textures to arrays cudaChannelFormatDesc channelFloat = cudaCreateChannelDesc<float>(); cudaBindTextureToArray(d_Zbuffer_texture, d_ZbufferArray, channelFloat); // Generate nan; unsigned long raw = 0x7FFFFFFF; float floatnan = *(float *)&raw; // Convert Zbuffer dim3 TB(16, 16, 1); dim3 BG(divUp(n_cols, TB.x), divUp(n_rows, TB.y)); convert_Zbuffer_to_Disparity_GPU << <BG, TB>>> (d_Disparity, n_cols, n_rows, pitch, D_conv1, D_conv2, floatnan); } void save_device_var_to_file(const char *file_name, const void *device_var, int elem_size, int n_elements) { void *host_var = malloc(n_elements * elem_size); FILE *fout = fopen(file_name, "wb"); cudaMemcpy(host_var, device_var, n_elements * elem_size, cudaMemcpyDeviceToHost); fwrite(host_var, elem_size, n_elements, fout); fclose(fout); free(host_var); } // Compute the 1D approximate median using the first power of 3 elements // d_data gets overwritten! // the data gets shuffled (with replacement) at each stage float approx_median_shuffle_cuda(float *d_data, float *d_tmp, float *d_random_numbers, int pp) { // Make sure we're working with a power of 3 number of elements // (select the first power of 3 elements) int n_levels = (int)floor(log(double(pp)) / log(3.0)); // Define block size (grid size defined later) dim3 threadBlock(MED_BLOCK_SIZE, 1); int n_blocks; float *d_out; float *d_in; // d_data and d_tmp are constantly swapped -> d_data gets overwritten! d_in = d_data; // initial input is the data matrix d_out = d_tmp; // initial output is d_tmp int n_in = pp; // initially all data can be used for sampling while (n_levels > 0) { // Number of blocks and elements in block (only smaller for last block) n_blocks = 1; if (n_levels >= MED_BLOCK_LEVELS) n_blocks = (int)pow(3.0, n_levels - MED_BLOCK_LEVELS); dim3 blockGrid(n_blocks, 1); median_reduce_shuffle_gpu << <blockGrid, threadBlock>>> (d_in, d_out, d_random_numbers, n_in); // Update variables for next iteration n_levels -= MED_BLOCK_LEVELS; n_in = (int)pow(3.0, n_levels); // Swap input and output pointers float *tmp = d_in; d_in = d_out; d_out = tmp; } // Copy result back to host // (the last reduction output was swapped into d_in) float median; cudaMemcpy(&median, d_in, sizeof(float), cudaMemcpyDeviceToHost); return (median); } // Compute the 1D approximate medians using the first power of 3 elements // d_data gets overwritten! // the data gets shuffled (with replacement) at each stage // this version handles multiple datastreams packed in d_data // pp contains the number of values for each segment void approx_multiple_medians_shuffle_cuda(float *medians, float *d_data, float *d_tmp, const float *d_random_numbers, const int *pp, int n_segments, int *d_n_in, int *d_start_inds) { // Make sure we're working with a power of 3 number of elements // Use n_levels determined by largest stream int pp_max = pp[0]; for (int i = 1; i < n_segments; i++) if (pp[i] > pp_max) pp_max = pp[i]; int n_levels = (int)floor(log(double(pp_max)) / log(3.0)); // never process more than 3^9 elements per segment = 19683 n_levels = (n_levels > 9) ? 9 : n_levels; int start_inds[n_segments]; start_inds[0] = 0; for (int i = 1; i < n_segments; i++) start_inds[i] = start_inds[i - 1] + pp[i - 1]; // Define block size (grid size defined later) // dim3 threadBlock(MED_BLOCK_SIZE,1); dim3 threadBlock(256, 1); int n_blocks; float *d_out; float *d_in; // d_data and d_tmp are constantly swapped -> d_data gets overwritten! d_in = d_data; // initial input is the data matrix d_out = d_tmp; // initial output is d_tmp // initially all data can be used for sampling int n_in[n_segments]; memcpy(n_in, pp, n_segments * sizeof(int)); // int *d_n_in, *d_start_inds; // cudaMalloc((void**)&d_n_in,n_segments*sizeof(int)); // cudaMalloc((void**)&d_start_inds,n_segments*sizeof(int)); cudaMemcpy(d_n_in, n_in, n_segments * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_start_inds, start_inds, n_segments * sizeof(int), cudaMemcpyHostToDevice); // printf("starting med it\n"); while (n_levels > 0) { // Number of blocks and elements in block (only smaller for last block) n_blocks = 1; if (n_levels > MED_BLOCK_LEVELS) n_blocks = (int)pow(3.0, n_levels - MED_BLOCK_LEVELS); dim3 blockGrid(n_blocks, n_segments); // printf("blockgrid: %d %d\n",blockGrid.x,blockGrid.y); multiple_median_reduce_shuffle_gpu << <blockGrid, threadBlock>>> (d_in, d_out, d_random_numbers, d_start_inds, d_n_in); // Update variables for next iteration n_levels -= MED_BLOCK_LEVELS; int n = (int)pow(3.0, n_levels); for (int i = 0; i < n_segments; i++) { n_in[i] = n; start_inds[i] = i * n; } cudaMemcpy(d_n_in, n_in, n_segments * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_start_inds, start_inds, n_segments * sizeof(int), cudaMemcpyHostToDevice); // Swap input and output pointers float *tmp = d_in; d_in = d_out; d_out = tmp; } // Copy result back to host // (the last reduction output was swapped into d_in) cudaMemcpy(medians, d_in, n_segments * sizeof(float), cudaMemcpyDeviceToHost); // cudaFree(d_start_inds); // cudaFree(d_n_in); } void convertPointCloudToDepthImage(unsigned int *d_depth_image, const float4 *d_point_cloud, int n_cols, int n_rows, int n_points, float nodal_point_x, float nodal_point_y, float focal_length_x, float focal_length_y, const float *d_translation_vector, const float *d_rotation_matrix) { // initialize depth_image to max value dim3 dim_block_init(16, 8); dim3 dim_grid_init(divUp(n_cols, dim_block_init.x), divUp(n_rows, dim_block_init.y)); initializeToValue_kernel << <dim_grid_init, dim_block_init>>> (d_depth_image, 4294967295, n_cols, n_rows); dim3 dim_block(128); dim3 dim_grid(divUp(n_points, dim_block.x)); convertPointCloudToDepthImage_kernel << <dim_grid, dim_block>>> (d_depth_image, d_point_cloud, n_cols, n_rows, n_points, nodal_point_x, nodal_point_y, focal_length_x, focal_length_y, d_translation_vector, d_rotation_matrix); } void convertDepthImageToMeter(float *d_depth_image_meter, const unsigned int *d_depth_image_millimeter, int n_cols, int n_rows) { dim3 dim_block(16, 8); dim3 dim_grid(divUp(n_cols, dim_block.x), divUp(n_rows, dim_block.y)); convertDepthImageToMeter_kernel << <dim_grid, dim_block>>> (d_depth_image_meter, d_depth_image_millimeter, n_rows, n_cols); } void colorValidationDepthImageMatches(uchar4 *out_image, const float *d_depth_image, cudaArray *d_z_buffer_array, int width, int height, float near_plane, float far_plane, float max_error, float llim_depth, float ulim_depth) { dim3 dimBlock(16, 8); dim3 dimGrid(divUp(width, dimBlock.x), divUp(height, dimBlock.y)); // Determine Zbuffer conversion constants // depth = Z_conv1/(Zbuffer+Z_conv2) float Z_conv1, Z_conv2; get_GL_conv_constants(Z_conv1, Z_conv2, far_plane, near_plane); // Bind textures to arrays cudaChannelFormatDesc channelFloat = cudaCreateChannelDesc<float>(); cudaBindTextureToArray(d_Zbuffer_texture, d_z_buffer_array, channelFloat); colorValidationDepthImageMatches_kernel << <dimGrid, dimBlock>>> (out_image, d_depth_image, width, height, Z_conv1, Z_conv2, max_error, llim_depth, ulim_depth); } void extractLabelStartingIndices(int *starting_indices, unsigned int *labels, int n_labels, int max_label) { cudaMemset(starting_indices, -1, (max_label + 1) * sizeof(int)); labelsTexture.normalized = 0; labelsTexture.filterMode = cudaFilterModePoint; labelsTexture.addressMode[0] = cudaAddressModeClamp; cudaChannelFormatDesc channelUINT = cudaCreateChannelDesc<unsigned int>(); cudaBindTexture(0, &labelsTexture, labels, &channelUINT); dim3 threads(256); dim3 blocks(divUp(n_labels, threads.x)); extractLabelStartingIndicesGPU << <blocks, threads>>> (starting_indices, labels, n_labels); cudaUnbindTexture(labelsTexture); } }
the_stack
typedef long long ll_t; typedef struct __builtin_align__(8) { float value; int index; } pair; #if (__CUDA_ARCH__ < 700) __device__ void __nanosleep(unsigned int ns){ clock_t start_clock = clock(); clock_t clock_offset = 0; while (clock_offset < ns) { clock_offset = clock() - start_clock; } } #endif __device__ __forceinline__ unsigned int bfe( unsigned int source, unsigned int bitIndex ) { unsigned int bit; asm volatile("bfe.u32 %0, %1, %2, %3;" : "=r"(bit) : "r"((unsigned int) source), "r"(bitIndex), "r"(1)); return bit; } __device__ __forceinline__ void warp_comparator( float &value, int &index, const int stride, const int direction ){ const float otherValue = __shfl_xor_sync(0xFFFFFFFF, value, stride); const int otherIndex = __shfl_xor_sync(0xFFFFFFFF, index, stride); bool condition = value < otherValue == direction; index = condition ? otherIndex : index; value = condition ? otherValue : value; } __device__ __forceinline__ void block_comparator( float &value, int &index, const int stride, const int direction, const int laneID, _VOLATILE_ float valSmem[_TPB_], _VOLATILE_ int idxSmem[_TPB_] ){ valSmem[laneID] = value; idxSmem[laneID] = index; __syncthreads(); float otherValue = valSmem[laneID ^ stride]; float otherIndex = idxSmem[laneID ^ stride]; __syncthreads(); bool condition = value < otherValue == direction; value = condition ? otherValue : value; index = condition ? otherIndex : index; } __device__ __forceinline__ void block_comparator_noop( ){ __syncthreads(); __syncthreads(); } __device__ __forceinline__ void thread_comparator( float &value, int &index, float otherValue, int otherIndex, const int direction ){ bool condition = value > otherValue == direction; if (condition){ value = otherValue; index = otherIndex; } } __device__ void bitonic_sort_2( float &value, int &index, int laneID ){ warp_comparator(value, index, 1, bfe(laneID, 1) ^ bfe(laneID, 0)); } __device__ void bitonic_sort_4( float &value, int &index, int laneID ){ bitonic_sort_2(value, index, laneID); warp_comparator(value, index, 2, bfe(laneID, 2) ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe(laneID, 2) ^ bfe(laneID, 0)); } __device__ void bitonic_sort_8( float &value, int &index, int laneID ){ bitonic_sort_4(value, index, laneID); warp_comparator(value, index, 4, bfe(laneID, 3) ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe(laneID, 3) ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe(laneID, 3) ^ bfe(laneID, 0)); } __device__ void bitonic_sort_16( float &value, int &index, int laneID ){ bitonic_sort_8(value, index, laneID); warp_comparator(value, index, 8, bfe(laneID, 4) ^ bfe(laneID, 3)); warp_comparator(value, index, 4, bfe(laneID, 4) ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe(laneID, 4) ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe(laneID, 4) ^ bfe(laneID, 0)); } __device__ void bitonic_sort_32( float &value, int &index, int laneID ){ bitonic_sort_16(value, index, laneID); warp_comparator(value, index, 16, bfe(laneID, 5) ^ bfe(laneID, 4)); warp_comparator(value, index, 8, bfe(laneID, 5) ^ bfe(laneID, 3)); warp_comparator(value, index, 4, bfe(laneID, 5) ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe(laneID, 5) ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe(laneID, 5) ^ bfe(laneID, 0)); } __device__ void bitonic_sort_global_2( float &value, int &index, float otherValue, int otherIndex, int laneID ) { if (_TPB_ - 32 <= threadIdx.x){ thread_comparator(value, index, otherValue, otherIndex, 0); warp_comparator(value, index, 1, !bfe(laneID, 0)); } } __device__ void bitonic_sort_global_4( float &value, int &index, float otherValue, int otherIndex, int laneID ) { if (_TPB_ - 32 <= threadIdx.x){ thread_comparator(value, index, otherValue, otherIndex, 0); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } } __device__ void bitonic_sort_global_8( float &value, int &index, float otherValue, int otherIndex, int laneID ) { if (_TPB_ - 32 <= threadIdx.x){ thread_comparator(value, index, otherValue, otherIndex, 0); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } } __device__ void bitonic_sort_global_16( float &value, int &index, float otherValue, int otherIndex, int laneID ) { if (_TPB_ - 32 <= threadIdx.x){ thread_comparator(value, index, otherValue, otherIndex, 0); warp_comparator(value, index, 8, !bfe(laneID, 3)); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } } __device__ void bitonic_sort_global_32( float &value, int &index, float otherValue, int otherIndex, int laneID ) { if (_TPB_ - 32 <= threadIdx.x){ thread_comparator(value, index, otherValue, otherIndex, 0); warp_comparator(value, index, 16, !bfe(laneID, 4)); warp_comparator(value, index, 8, !bfe(laneID, 3)); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } } #if _TPB_ >= 64 __device__ void bitonic_sort_64( float &value, int &index, _VOLATILE_ float valSmem[_TPB_], _VOLATILE_ int idxSmem[_TPB_], int laneID ){ bitonic_sort_32(value, index, laneID); block_comparator(value, index, 32, bfe(laneID, 6) ^ bfe(laneID, 5), laneID, valSmem, idxSmem); warp_comparator(value, index, 16, bfe(laneID, 6) ^ bfe(laneID, 4)); warp_comparator(value, index, 8, bfe(laneID, 6) ^ bfe(laneID, 3)); warp_comparator(value, index, 4, bfe(laneID, 6) ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe(laneID, 6) ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe(laneID, 6) ^ bfe(laneID, 0)); } #endif __device__ void bitonic_sort_global_64( float &value, int &index, float otherValue, int otherIndex, _VOLATILE_ float valSmem[_TPB_], _VOLATILE_ int idxSmem[_TPB_], int laneID ) { if (_TPB_ - 64 <= threadIdx.x){ thread_comparator(value, index, otherValue, otherIndex, 0); block_comparator(value, index, 32, !bfe(laneID, 5), laneID, valSmem, idxSmem); warp_comparator(value, index, 16, !bfe(laneID, 4)); warp_comparator(value, index, 8, !bfe(laneID, 3)); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } else { block_comparator_noop(); } } #if _TPB_ >= 128 __device__ void bitonic_sort_128( float &value, int &index, _VOLATILE_ float valSmem[_TPB_], _VOLATILE_ int idxSmem[_TPB_], int laneID ){ bitonic_sort_64(value, index, valSmem, idxSmem, laneID); block_comparator(value, index, 64, bfe(laneID, 7) ^ bfe(laneID, 6), laneID, valSmem, idxSmem); block_comparator(value, index, 32, bfe(laneID, 7) ^ bfe(laneID, 5), laneID, valSmem, idxSmem); warp_comparator(value, index, 16, bfe(laneID, 7) ^ bfe(laneID, 4)); warp_comparator(value, index, 8, bfe(laneID, 7) ^ bfe(laneID, 3)); warp_comparator(value, index, 4, bfe(laneID, 7) ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe(laneID, 7) ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe(laneID, 7) ^ bfe(laneID, 0)); } #endif __device__ void bitonic_sort_global_128( float &value, int &index, float otherValue, int otherIndex, _VOLATILE_ float valSmem[_TPB_], _VOLATILE_ int idxSmem[_TPB_], int laneID ) { if (_TPB_ - 128 <= threadIdx.x){ thread_comparator(value, index, otherValue, otherIndex, 0); block_comparator(value, index, 64, !bfe(laneID, 6), laneID, valSmem, idxSmem); block_comparator(value, index, 32, !bfe(laneID, 5), laneID, valSmem, idxSmem); warp_comparator(value, index, 16, !bfe(laneID, 4)); warp_comparator(value, index, 8, !bfe(laneID, 3)); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } else { block_comparator_noop(); block_comparator_noop(); } } #if _TPB_ >= 256 __device__ void bitonic_sort_256( float &value, int &index, _VOLATILE_ float valSmem[_TPB_], _VOLATILE_ int idxSmem[_TPB_], int laneID ){ bitonic_sort_128(value, index, valSmem, idxSmem, laneID); block_comparator(value, index, 128, bfe(laneID, 8) ^ bfe(laneID, 7), laneID, valSmem, idxSmem); block_comparator(value, index, 64, bfe(laneID, 8) ^ bfe(laneID, 6), laneID, valSmem, idxSmem); block_comparator(value, index, 32, bfe(laneID, 8) ^ bfe(laneID, 5), laneID, valSmem, idxSmem); warp_comparator(value, index, 16, bfe(laneID, 8) ^ bfe(laneID, 4)); warp_comparator(value, index, 8, bfe(laneID, 8) ^ bfe(laneID, 3)); warp_comparator(value, index, 4, bfe(laneID, 8) ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe(laneID, 8) ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe(laneID, 8) ^ bfe(laneID, 0)); } #endif __device__ void bitonic_sort_global_256( float &value, int &index, float otherValue, int otherIndex, _VOLATILE_ float valSmem[_TPB_], _VOLATILE_ int idxSmem[_TPB_], int laneID ) { if (_TPB_ - 256 <= threadIdx.x){ thread_comparator(value, index, otherValue, otherIndex, 0); block_comparator(value, index, 128, !bfe(laneID, 7), laneID, valSmem, idxSmem); block_comparator(value, index, 64, !bfe(laneID, 6), laneID, valSmem, idxSmem); block_comparator(value, index, 32, !bfe(laneID, 5), laneID, valSmem, idxSmem); warp_comparator(value, index, 16, !bfe(laneID, 4)); warp_comparator(value, index, 8, !bfe(laneID, 3)); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } else { block_comparator_noop(); block_comparator_noop(); block_comparator_noop(); } } #if _TPB_ >= 512 __device__ void bitonic_sort_512( float &value, int &index, _VOLATILE_ float valSmem[_TPB_], _VOLATILE_ int idxSmem[_TPB_], int laneID ){ bitonic_sort_256(value, index, valSmem, idxSmem, laneID); block_comparator(value, index, 256, bfe(laneID, 9) ^ bfe(laneID, 8), laneID, valSmem, idxSmem); block_comparator(value, index, 128, bfe(laneID, 9) ^ bfe(laneID, 7), laneID, valSmem, idxSmem); block_comparator(value, index, 64, bfe(laneID, 9) ^ bfe(laneID, 6), laneID, valSmem, idxSmem); block_comparator(value, index, 32, bfe(laneID, 9) ^ bfe(laneID, 5), laneID, valSmem, idxSmem); warp_comparator(value, index, 16, bfe(laneID, 9) ^ bfe(laneID, 4)); warp_comparator(value, index, 8, bfe(laneID, 9) ^ bfe(laneID, 3)); warp_comparator(value, index, 4, bfe(laneID, 9) ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe(laneID, 9) ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe(laneID, 9) ^ bfe(laneID, 0)); } #endif __device__ void bitonic_sort_global_512( float &value, int &index, float otherValue, int otherIndex, _VOLATILE_ float valSmem[_TPB_], _VOLATILE_ int idxSmem[_TPB_], int laneID ) { if (_TPB_ - 512 <= threadIdx.x){ thread_comparator(value, index, otherValue, otherIndex, 0); block_comparator(value, index, 256, !bfe(laneID, 8), laneID, valSmem, idxSmem); block_comparator(value, index, 128, !bfe(laneID, 7), laneID, valSmem, idxSmem); block_comparator(value, index, 64, !bfe(laneID, 6), laneID, valSmem, idxSmem); block_comparator(value, index, 32, !bfe(laneID, 5), laneID, valSmem, idxSmem); warp_comparator(value, index, 16, !bfe(laneID, 4)); warp_comparator(value, index, 8, !bfe(laneID, 3)); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } else { block_comparator_noop(); block_comparator_noop(); block_comparator_noop(); block_comparator_noop(); } } #if _TPB_ >= 1024 __device__ void bitonic_sort_1024( float &value, int &index, _VOLATILE_ float valSmem[_TPB_], _VOLATILE_ int idxSmem[_TPB_], int laneID ){ bitonic_sort_512(value, index, valSmem, idxSmem, laneID); block_comparator(value, index, 512, bfe(laneID, 10) ^ bfe(laneID, 9), laneID, valSmem, idxSmem); block_comparator(value, index, 256, bfe(laneID, 10) ^ bfe(laneID, 8), laneID, valSmem, idxSmem); block_comparator(value, index, 128, bfe(laneID, 10) ^ bfe(laneID, 7), laneID, valSmem, idxSmem); block_comparator(value, index, 64, bfe(laneID, 10) ^ bfe(laneID, 6), laneID, valSmem, idxSmem); block_comparator(value, index, 32, bfe(laneID, 10) ^ bfe(laneID, 5), laneID, valSmem, idxSmem); warp_comparator(value, index, 16, bfe(laneID, 10) ^ bfe(laneID, 4)); warp_comparator(value, index, 8, bfe(laneID, 10) ^ bfe(laneID, 3)); warp_comparator(value, index, 4, bfe(laneID, 10) ^ bfe(laneID, 2)); warp_comparator(value, index, 2, bfe(laneID, 10) ^ bfe(laneID, 1)); warp_comparator(value, index, 1, bfe(laneID, 10) ^ bfe(laneID, 0)); } #endif __device__ void bitonic_sort_global_1024( float &value, int &index, float otherValue, int otherIndex, _VOLATILE_ float valSmem[_TPB_], _VOLATILE_ int idxSmem[_TPB_], int laneID ) { if (_TPB_ - 1024 <= threadIdx.x){ thread_comparator(value, index, otherValue, otherIndex, 0); block_comparator(value, index, 512, !bfe(laneID, 9), laneID, valSmem, idxSmem); block_comparator(value, index, 256, !bfe(laneID, 8), laneID, valSmem, idxSmem); block_comparator(value, index, 128, !bfe(laneID, 7), laneID, valSmem, idxSmem); block_comparator(value, index, 64, !bfe(laneID, 6), laneID, valSmem, idxSmem); block_comparator(value, index, 32, !bfe(laneID, 5), laneID, valSmem, idxSmem); warp_comparator(value, index, 16, !bfe(laneID, 4)); warp_comparator(value, index, 8, !bfe(laneID, 3)); warp_comparator(value, index, 4, !bfe(laneID, 2)); warp_comparator(value, index, 2, !bfe(laneID, 1)); warp_comparator(value, index, 1, !bfe(laneID, 0)); } else { block_comparator_noop(); block_comparator_noop(); block_comparator_noop(); block_comparator_noop(); block_comparator_noop(); } } __device__ __inline__ bool is_queue_full( int queueFront, int queueRear ){ return ((queueFront - queueRear) == 1 || (queueFront == 0 && queueRear == _QCAP_ - 1)); //return (queueRear + 1) % _QCAP_ == queueFront; } __device__ __inline__ bool is_queue_empty( int queueFront, int queueRear ){ return queueFront == -1; } __device__ void push_queue( _VOLATILE_ pair queueSmem[_TPB_][_QCAP_], pair newPair, int &queueFront, int &queueRear ) { const int tid = threadIdx.x; if (is_queue_full(queueFront, queueRear)){ return; } else if (is_queue_empty(queueFront, queueRear)){ queueFront = 0; queueRear = 0; queueSmem[tid][queueRear] = newPair; } else { queueRear = (queueRear + 1) % _QCAP_; queueSmem[tid][queueRear] = newPair; } } __device__ void pop_queue( _VOLATILE_ pair queueSmem[_TPB_][_QCAP_], pair &oldPair, int &queueFront, int &queueRear ) { const int tid = threadIdx.x; if (is_queue_empty(queueFront, queueRear)){ return; } else if (queueFront == queueRear){ pair poppedPair = queueSmem[tid][queueFront]; oldPair.value = poppedPair.value; oldPair.index = poppedPair.index; queueFront = -1; queueRear = -1; } else { pair poppedPair = queueSmem[tid][queueFront]; oldPair.value = poppedPair.value; oldPair.index = poppedPair.index; //oldPair = queueSmem[tid][queueFront]; queueFront = (queueFront + 1) % _QCAP_; } } __device__ void push_pop_queue( _VOLATILE_ pair queueSmem[_TPB_][_QCAP_], pair newPair, pair &oldPair, int &queueFront, int &queueRear ) { const int tid = threadIdx.x; if (is_queue_empty(queueFront, queueRear)){ return; } else if (queueFront == queueRear){ oldPair = queueSmem[tid][queueFront]; queueSmem[tid][queueRear] = newPair; } else { oldPair = queueSmem[tid][queueFront]; queueFront = (queueFront + 1) % _QCAP_; queueRear = (queueRear + 1) % _QCAP_; queueSmem[tid][queueRear] = newPair; } } __device__ void init_queue( _VOLATILE_ pair queueSmem[_TPB_][_QCAP_] ){ const int tid = threadIdx.x; pair emptyPair; emptyPair.value = -INFINITY; emptyPair.index = -1; #pragma unroll for (int i=0; i<_QCAP_; i++){ queueSmem[tid][i] = emptyPair; } } __device__ void sort( float &finalValue, int &finalIndex, float value, int index, _VOLATILE_ float valSmem[_TPB_], _VOLATILE_ int idxSmem[_TPB_], int K ){ int tid = threadIdx.x; #if _TPB_ == 32 bitonic_sort_32(value, index, tid); #elif _TPB_ == 64 bitonic_sort_64(value, index, valSmem, idxSmem, tid); #elif _TPB_ == 128 bitonic_sort_128(value, index, valSmem, idxSmem, tid); #elif _TPB_ == 256 bitonic_sort_256(value, index, valSmem, idxSmem, tid); #elif _TPB_ == 512 bitonic_sort_512(value, index, valSmem, idxSmem, tid); #elif _TPB_ == 1024 bitonic_sort_1024(value, index, valSmem, idxSmem, tid); #endif switch (K){ case 2: bitonic_sort_global_2( finalValue, finalIndex, value, index, tid); break; case 4: bitonic_sort_global_4( finalValue, finalIndex, value, index, tid); break; case 8: bitonic_sort_global_8( finalValue, finalIndex, value, index, tid); break; case 16: bitonic_sort_global_16( finalValue, finalIndex, value, index, tid); break; case 32: bitonic_sort_global_32( finalValue, finalIndex, value, index, tid); break; case 64: bitonic_sort_global_64( finalValue, finalIndex, value, index, valSmem, idxSmem, tid); break; case 128: bitonic_sort_global_128( finalValue, finalIndex, value, index, valSmem, idxSmem, tid); break; case 256: bitonic_sort_global_256( finalValue, finalIndex, value, index, valSmem, idxSmem, tid); break; case 512: bitonic_sort_global_512( finalValue, finalIndex, value, index, valSmem, idxSmem, tid); break; case 1024: bitonic_sort_global_1024( finalValue, finalIndex, value, index, valSmem, idxSmem, tid); break; } } __device__ void load_buffer( const float* mat, pair buffer[_TN_], int i, int N ){ const ll_t iM = blockIdx.x; const int tid = threadIdx.x; #pragma unroll for (int j=0; j<_TN_; j++){ ll_t iN = i * _TPB_ * _TN_ + j * _TPB_ + tid; if (iN < N){ buffer[j].value = mat[iM * ll_t(N) + iN]; buffer[j].index = iN; } else { buffer[j].value = -INFINITY; buffer[j].index = -1; } } } __device__ void arr2arr( pair src[_TN_], pair tar[_TN_] ){ #pragma unroll for (int i=0; i<_TN_; i++){ tar[i] = src[i]; } } extern "C" __global__ void topk_select( const float* __restrict__ mat, float* __restrict__ gValue, ll_t* __restrict__ gIndex, int M, int N, int K ){ const int tid = threadIdx.x; const ll_t iM = blockIdx.x; // this is used to exchange values between threads when sorting __shared__ _VOLATILE_ float valSmem[_TPB_]; // this is used to exchange indices between threads when sorting __shared__ _VOLATILE_ int idxSmem[_TPB_]; /* this is used to signal that at least one threads has reached its maximum queue size, so that all threads will perform a bitonic sort. */ __shared__ _VOLATILE_ int signal[1]; signal[0] = 0; /* this is used to threshold the input values, values below this threashold will not be added to thread queue, or trigger a sort, this value is broadcasted from last thread to all threads at the end of each bitonic sort. */ __shared__ _VOLATILE_ float minSmem[1]; minSmem[0] = -INFINITY; __shared__ _VOLATILE_ pair queueSmem[_TPB_][_QCAP_]; init_queue(queueSmem); __syncthreads(); int queueFront = -1; int queueRear = -1; float minValue = -INFINITY; /* finalValue and finalIndex are the storage of final topk values and indices, they will be updated at each bitonic sort step, and stored to DRAM at the very end. */ float finalValue = -INFINITY; int finalIndex = -1; pair buffer[_TN_]; pair working[_TN_]; load_buffer(mat, buffer, 0, N); // The number of iterations of the main loop is ceil(N / (ThreadsPerBlock * TN)) const int nIter = (N + _TPB_ * _TN_ - 1) / (_TPB_ * _TN_); for (int i=0; i < nIter; i++){ // move prefetched data from buffer to working array arr2arr(buffer, working); // then start fetching next tiles of data to buffer array if (i < nIter - 1){ load_buffer(mat, buffer, i+1, N); } #pragma unroll for (int j=0; j < _TN_; j++){ pair newPair = working[j]; pair oldPair; oldPair.value = -INFINITY; oldPair.index = -1; /* if the queue is full, pop the front item, if the value of popped item is larger than previous minValue, trigger block-wise bitonic sort */ if (is_queue_full(queueFront, queueRear)){ pop_queue(queueSmem, oldPair, queueFront, queueRear); if (oldPair.value > minValue){ // atomicAdd(signal, 1); signal[0] = 1; } } /* if incoming value is greater then previous minValue, add the (newValue, newIndex) pair to queue */ if (newPair.value > minValue){ push_queue(queueSmem, newPair, queueFront, queueRear); } __syncthreads(); if (signal[0] > 0){ //if any thread has triggered blockwise sort, perform sort sort( finalValue, finalIndex, oldPair.value, oldPair.index, valSmem, idxSmem, K ); __syncthreads(); // reset the signal signal[0] = 0; // last thread sets minSmem to its finalValue if (tid == _TPB_ - 1){ minSmem[0] = finalValue; } __syncthreads(); // all threads read from minSmem to set new minValue minValue = minSmem[0]; } __syncthreads(); } } // pop all remaining items from queue for (int i=0; i<_QCAP_; i++){ pair oldPair; oldPair.value = -INFINITY; oldPair.index = -1; if (!is_queue_empty(queueFront, queueRear)){ pop_queue(queueSmem, oldPair, queueFront, queueRear); if (oldPair.value > minValue){ //atomicAdd(signal, 1); signal[0] = 1; } } __syncthreads(); if (signal[0] > 0){ sort( finalValue, finalIndex, oldPair.value, oldPair.index, valSmem, idxSmem, K ); __syncthreads(); signal[0] = 0; if (tid == _TPB_ - 1){ minSmem[0] = finalValue; } __syncthreads(); minValue = minSmem[0]; } __syncthreads(); } // last K threads write their finalValue and finalIndex to gValue and gIndex if (_TPB_ - K <= tid){ const int writeAddress = (iM * K) + tid - ( _TPB_ - K); gValue[writeAddress] = finalValue; gIndex[writeAddress] = ll_t(finalIndex); } }
the_stack
size_t free_space_ = 0; void EstimateFreeSpace() { /* size_t total; cudaMemGetInfo(&free_space_, &total); //free_space_ >>= 1; if (free_space_ > 1>>20) { free_space_ -= 1 << 20; // Just remove 1 MB. This seems to work! } */ // How to get free contiguous space ? free_space_ = MAX_MEMORY_BYTES; } inline bool check_cublas_error() { cublasStatus status = cublasGetError(); return status != CUBLAS_STATUS_SUCCESS; } __device__ inline float square(float a) { return a * a; } inline void __getLastCudaError(const char *errorMessage, const char *file, const int line) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } void _Scale(cudamat* mat, float scale) { if (scale == 0) { cudaMemset(mat->data_device, 0, sizeof(float) * mat->size[0] * mat->size[1]); } else if (scale != 1) { cublasSscal(mat->size[0] * mat->size[1], scale, mat->data_device, 1); } } class AvgPooler { public: __device__ inline float operator()(const float a, const float b) const { return a + b; } __device__ inline float getBaseValue() const { return 0; } __device__ inline float output(const float a, const int regionSize) const { return a / regionSize; } }; class MaxPooler { public: __device__ inline float operator()(const float a, const float b) const { return fmaxf(a, b); } __device__ inline float getBaseValue() const { return -2e38; } __device__ inline float output(const float a, const int regionSize) const { return a; } }; __global__ void kExpand(float *images, float* targets, int num_images, int num_input_channels, int image_size_y, int image_size_x, int num_modules_y, int num_modules_x, int kernel_size_y, int kernel_size_x, int padding_y, int padding_x, int stride_y, int stride_x, int num_modules_batch, int module_id_offset) { int color = blockIdx.y; int src_module_id = module_id_offset + blockIdx.x; int dst_module_id = blockIdx.x; int module_id_x = src_module_id % num_modules_x; int module_id_y = src_module_id / num_modules_x; int startX = module_id_x * stride_x + padding_x; int startY = module_id_y * stride_y + padding_y; int Y, X; long target_id, source_id; images += num_images * image_size_x * image_size_y * color; targets += num_images * (dst_module_id + num_modules_batch * (kernel_size_y * kernel_size_x * color)); for (int y = 0; y < kernel_size_y; y++) { Y = startY + y; for (int x = 0; x < kernel_size_x; x++) { X = startX + x; target_id = num_images * num_modules_batch * (x + kernel_size_x * y); source_id = num_images * (X + image_size_x * Y); if (X < 0 || X >= image_size_x || Y < 0 || Y >= image_size_y) { for (int im = threadIdx.x; im < num_images; im += blockDim.x) { targets[target_id + im] = 0; } } else { for (int im = threadIdx.x; im < num_images; im += blockDim.x) { targets[target_id + im] = images[source_id + im]; } } __syncthreads(); } } } template <class Pooler> __global__ void kPool(float* images, float* targets, int num_images, int num_input_channels, int image_size_y, int image_size_x, int image_size_t, int num_modules_y, int num_modules_x, int num_modules_t, int kernel_size_y, int kernel_size_x, int kernel_size_t, int padding_y, int padding_x, int padding_t, int stride_y, int stride_x, int stride_t, float scaleOutput, Pooler pooler) { const int color = blockIdx.y; const int num_colors = gridDim.y; const int num_modules = num_modules_y * num_modules_x * num_modules_t; long source_id, target_id; images += num_images * image_size_x * image_size_y * color; targets += num_images * num_modules_x * num_modules_y * color; for (int module_id = blockIdx.x; module_id < num_modules; module_id += gridDim.x) { int module_id_x = module_id % num_modules_x; int module_id_y = (module_id / num_modules_x) % num_modules_y; int module_id_t = (module_id / num_modules_x) / num_modules_y; int startX = module_id_x * stride_x + padding_x; int startY = module_id_y * stride_y + padding_y; int startT = module_id_t * stride_t + padding_t; int endX = startX + kernel_size_x; int endY = startY + kernel_size_y; int endT = startT + kernel_size_t; target_id = num_images * (module_id_x + num_modules_x * (module_id_y + num_modules_y * num_colors * module_id_t)); startX = MAX(startX, 0); startY = MAX(startY, 0); startT = MAX(startT, 0); endY = MIN(endY , image_size_y); endX = MIN(endX , image_size_x); endT = MIN(endT , image_size_t); int regionSize = (endX - startX) * (endY - startY) * (endT - startT); for (int im = threadIdx.x; im < num_images; im += blockDim.x) { float val = pooler.getBaseValue(); for (int T = startT; T < endT; T++) { for (int Y = startY; Y < endY; Y++) { for (int X = startX; X < endX; X++) { source_id = num_images * (X + image_size_x * (Y + image_size_y * num_colors * T)); val = pooler(val, images[source_id + im]); } } } targets[target_id + im] = scaleOutput * pooler.output(val, regionSize); } } __syncthreads(); } __global__ void kAvgPoolUndo(float *derivs, float* targets, int num_images, int num_input_channels, int image_size_y, int image_size_x, int image_size_t, int num_modules_y, int num_modules_x, int num_modules_t, int kernel_size_y, int kernel_size_x, int kernel_size_t, int padding_y, int padding_x, int padding_t, int stride_y, int stride_x, int stride_t, float scaleOutput) { const int color = blockIdx.y; const int num_colors = gridDim.y; const int num_modules = num_modules_y * num_modules_x * num_modules_t; long source_id, target_id; derivs += num_images * num_modules_x * num_modules_y * color; targets += num_images * image_size_x * image_size_y * color; for (int module_id = blockIdx.x; module_id < num_modules; module_id += gridDim.x) { int module_id_x = module_id % num_modules_x; int module_id_y = (module_id / num_modules_x) % num_modules_y; int module_id_t = (module_id / num_modules_x) / num_modules_y; int startX = module_id_x * stride_x + padding_x; int startY = module_id_y * stride_y + padding_y; int startT = module_id_t * stride_t + padding_t; int endX = startX + kernel_size_x; int endY = startY + kernel_size_y; int endT = startT + kernel_size_t; source_id = num_images * (module_id_x + num_modules_x * (module_id_y + num_modules_y * num_colors * module_id_t)); startX = MAX(startX, 0); startY = MAX(startY, 0); startT = MAX(startT, 0); endY = MIN(endY , image_size_y); endX = MIN(endX , image_size_x); endT = MIN(endT , image_size_t); int regionSize = (endX - startX) * (endY - startY) * (endT - startT); for (int im = threadIdx.x; im < num_images; im += blockDim.x) { float val = scaleOutput * derivs[source_id + im] / regionSize; for (int T = startT; T < endT; T++) { for (int Y = startY; Y < endY; Y++) { for (int X = startX; X < endX; X++) { target_id = num_images * (X + image_size_x * (Y + image_size_y * num_colors * T)) + im; atomicAdd(&targets[target_id], val); __syncthreads(); } } } } } } __global__ void kMaxPoolUndo(float * images, float *derivs, float* maxes, float* targets, int num_images, int num_input_channels, int image_size_y, int image_size_x, int image_size_t, int num_modules_y, int num_modules_x, int num_modules_t, int kernel_size_y, int kernel_size_x, int kernel_size_t, int padding_y, int padding_x, int padding_t, int stride_y, int stride_x, int stride_t, float scaleOutput) { const int color = blockIdx.y; const int num_colors = gridDim.y; const int num_modules = num_modules_y * num_modules_x * num_modules_t; long source_id, target_id; derivs += num_images * num_modules_x * num_modules_y * color; maxes += num_images * num_modules_x * num_modules_y * color; targets += num_images * image_size_x * image_size_y * color; images += num_images * image_size_x * image_size_y * color; for (int module_id = blockIdx.x; module_id < num_modules; module_id += gridDim.x) { int module_id_x = module_id % num_modules_x; int module_id_y = (module_id / num_modules_x) % num_modules_y; int module_id_t = (module_id / num_modules_x) / num_modules_y; int startX = module_id_x * stride_x + padding_x; int startY = module_id_y * stride_y + padding_y; int startT = module_id_t * stride_t + padding_t; int endX = startX + kernel_size_x; int endY = startY + kernel_size_y; int endT = startT + kernel_size_t; source_id = num_images * (module_id_x + num_modules_x * (module_id_y + num_modules_y * num_colors * module_id_t)); startX = MAX(startX, 0); startY = MAX(startY, 0); startT = MAX(startT, 0); endY = MIN(endY , image_size_y); endX = MIN(endX , image_size_x); endT = MIN(endT , image_size_t); for (int im = threadIdx.x; im < num_images; im += blockDim.x) { float val = scaleOutput * derivs[source_id + im]; for (int T = startT; T < endT; T++) { for (int Y = startY; Y < endY; Y++) { for (int X = startX; X < endX; X++) { target_id = num_images * (X + image_size_x * (Y + image_size_y * num_colors * T)) + im; if (images[target_id] == maxes[source_id + im]) { atomicAdd(&targets[target_id], val); } __syncthreads(); } } } } } } __global__ void kContract(float *expanded_data, float* targets, int num_images, int num_input_channels, int image_size_y, int image_size_x, int num_modules_y, int num_modules_x, int kernel_size_y, int kernel_size_x, int padding_y, int padding_x, int stride_y, int stride_x, int num_modules_batch, int module_id_offset) { int color = blockIdx.y; int dst_module_id = module_id_offset + blockIdx.x; int src_module_id = blockIdx.x; int module_id_x = dst_module_id % num_modules_x; int module_id_y = dst_module_id / num_modules_x; int startX = module_id_x * stride_x + padding_x; int startY = module_id_y * stride_y + padding_y; int Y, X; long target_id, source_id; targets += num_images * image_size_x * image_size_y * color; expanded_data += num_images * (src_module_id + num_modules_batch * (kernel_size_y * kernel_size_x * color)); for (int y = 0; y < kernel_size_y; y++) { Y = startY + y; for (int x = 0; x < kernel_size_x; x++) { X = startX + x; source_id = num_images * num_modules_batch * (x + kernel_size_x * y); target_id = num_images * (X + image_size_x * Y); if (X < 0 || X >= image_size_x || Y < 0 || Y >= image_size_y) { // do nothing. } else { for (int im = threadIdx.x; im < num_images; im += blockDim.x) { atomicAdd(&targets[target_id + im], expanded_data[source_id + im]); __syncthreads(); } } } } } __global__ void kWriteRows(float* data, float* target, int num_images, int num_modules, int num_modules_batch, int module_id_offset, float beta) { int c = blockIdx.y; int src_module_id = blockIdx.x; int dst_module_id = module_id_offset + blockIdx.x; data += num_images * (src_module_id + c * num_modules_batch); target += num_images * (dst_module_id + c * num_modules); for (int im = threadIdx.x; im < num_images; im += blockDim.x) { target[im] = beta * data[im]; } } __global__ void kReadRows(float* data, float* target, int num_images, int num_modules, int num_modules_batch, int module_id_offset) { int c = blockIdx.y; int src_module_id = module_id_offset + blockIdx.x; int dst_module_id = blockIdx.x; data += num_images * (src_module_id + c * num_modules); target += num_images * (dst_module_id + c * num_modules_batch); for (int im = threadIdx.x; im < num_images; im += blockDim.x) { target[im] = data[im]; } } __global__ void kWriteRowsMult(float* data, float* target, int num_images, int num_modules, int num_modules_batch, int module_id_offset, float alpha, float beta) { int c = blockIdx.y; int src_module_id = blockIdx.x; int dst_module_id = module_id_offset + blockIdx.x; data += num_images * (src_module_id + c * num_modules_batch); target += num_images * (dst_module_id + c * num_modules); for (int im = threadIdx.x; im < num_images; im += blockDim.x) { target[im] = alpha * target[im] + beta * data[im]; } } __global__ void kCrossMapDenoms(float* data, float* denoms, int num_locs, int batch_locs, int batch_offset, float addScale, float powScale, int num_filters, int k, bool blocked) { long loc_id = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); if (batch_offset + loc_id < num_locs) { data += batch_offset + loc_id; denoms += loc_id; int prev_start = 0, prev_end = 0, start, end; float sum = 0; for (int j = 0; j < num_filters; j++) { start = blocked ? (j / k) * k : -k/2 + j; end = MIN(num_filters, start + k); start = MAX(0, start); for (int i = prev_start; i < start; i++) { sum -= square(data[i * num_locs]); } for (int i = prev_end; i < end; i++) { sum += square(data[i * num_locs]); } denoms[j * batch_locs] = __powf(1 + addScale * sum, -powScale - 1); prev_start = start; prev_end = end; } } } __global__ void kCrossMapRNorm(float* data, float* target, int num_locs, float addScale, float powScale, int num_filters, int k, bool blocked) { long loc_id = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); if (loc_id < num_locs) { data += loc_id; target += loc_id; float sum = 0; int prev_start = 0, prev_end = 0, start, end; for (int j = 0; j < num_filters; j++) { start = blocked ? (j / k) * k : -k/2 + j; end = MIN(num_filters, start + k); start = MAX(0, start); for (int i = prev_start; i < start; i++) { sum -= square(data[i * num_locs]); } for (int i = prev_end; i < end; i++) { sum += square(data[i * num_locs]); } target[j * num_locs] = data[j * num_locs] * __powf(1 + addScale * sum, -powScale); prev_start = start; prev_end = end; } } } __global__ void kCrossMapRNormUndo(float* data, float* deriv, float* denoms, float* target, int num_locs, int batch_locs, int batch_offset, float addScale, float powScale, int num_filters, int k, bool blocked) { long loc_id = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); if (batch_offset + loc_id < num_locs) { data += batch_offset + loc_id; target += batch_offset + loc_id; deriv += batch_offset + loc_id; denoms += loc_id; float sum = 0; int prev_start = 0, prev_end = 0, start, end; for (int j = 0; j < num_filters; j++) { start = blocked ? (j / k) * k : -k + k/2 + j + 1; end = MIN(num_filters, start + k); start = MAX(0, start); for (int i = prev_start; i < start; i++) { sum -= deriv[i * num_locs] * data[i * num_locs] * denoms[i * batch_locs]; } for (int i = prev_end; i < end; i++) { sum += deriv[i * num_locs] * data[i * num_locs] * denoms[i * batch_locs]; } target[j * num_locs] = deriv[j * num_locs] * __powf(denoms[j * batch_locs], powScale / (powScale + 1)) - 2 * addScale * powScale * data[j * num_locs] * sum; prev_start = start; prev_end = end; } } } void _convUpGemm(cudamat* images, cudamat* filters, cudamat* targets, Shape4D images_shape, Shape4D filters_shape, Shape4D targets_shape, ConvDesc conv_desc, float scaleTargets, float scaleOutput, bool conv) { int num_input_channels = conv_desc.num_input_channels; int num_output_channels = conv_desc.num_output_channels; int kernel_size_y = conv_desc.kernel_size_y; int kernel_size_x = conv_desc.kernel_size_x; int stride_y = conv_desc.stride_y; int stride_x = conv_desc.stride_x; int padding_y = conv_desc.padding_y; int padding_x = conv_desc.padding_x; int input_channel_begin = conv_desc.input_channel_begin; int input_channel_end = conv_desc.input_channel_end; int output_channel_begin = conv_desc.output_channel_begin; int output_channel_end = conv_desc.output_channel_end; int num_groups = conv_desc.num_groups; if (output_channel_end == 0) output_channel_end = num_output_channels; if (input_channel_end == 0) input_channel_end = num_input_channels; int num_output_channels2 = targets_shape.shape[3]; int num_modules_y = targets_shape.shape[2]; int num_modules_x = targets_shape.shape[1]; int num_images = targets_shape.shape[0]; int num_input_channels2 = images_shape.shape[3]; int image_size_y = images_shape.shape[2]; int image_size_x = images_shape.shape[1]; int num_images2 = images_shape.shape[0]; int num_input_channels3 = filters_shape.shape[3]; int kernel_size_y2 = filters_shape.shape[2]; int kernel_size_x2 = filters_shape.shape[1]; int num_output_channels3 = filters_shape.shape[0]; int num_modules = num_modules_y * num_modules_x; int filterModuleMult = conv ? 1 : num_modules; // Consistency checks. assert (num_images == num_images2); assert (num_output_channels == num_output_channels2); assert (output_channel_end - output_channel_begin == num_output_channels3); assert (num_input_channels == num_input_channels2); assert (input_channel_end - input_channel_begin == num_input_channels3 / filterModuleMult); assert (num_images == images->size[0]); assert (num_images == targets->size[0]); assert (num_output_channels3 == filters->size[0]); assert (image_size_y * image_size_x * num_input_channels == images->size[1]); assert (num_modules_y * num_modules_x * num_output_channels == targets->size[1]); assert (kernel_size_y * kernel_size_x * num_input_channels3 * filterModuleMult == filters->size[1]); assert (kernel_size_y == kernel_size_y2); assert (kernel_size_x == kernel_size_x2); assert (num_input_channels % num_groups == 0); assert (num_groups == 1); assert (input_channel_begin >= 0); assert (output_channel_begin >= 0); assert (input_channel_end <= num_input_channels); assert (output_channel_end <= num_output_channels); assert (input_channel_begin <= input_channel_end); assert (output_channel_begin <= output_channel_end); num_input_channels = input_channel_end - input_channel_begin; num_output_channels = output_channel_end - output_channel_begin; assert(num_input_channels > 0); assert(num_output_channels > 0); float* w = filters->data_device; float* images_data = images->data_device + input_channel_begin * image_size_y * image_size_x * num_images; float* targets_data = targets->data_device + output_channel_begin * num_modules * num_images; int input_size = kernel_size_y * kernel_size_x * num_input_channels; int num_threads_x = MIN(num_images, NUM_THREADS_PER_BLOCK); float *expanded_images = NULL, *expanded_target = NULL; int num_modules_batch; int input_memory_size = num_images * input_size * sizeof(float); int output_memory_size = num_images * num_output_channels * sizeof(float); if (free_space_ == 0) EstimateFreeSpace(); int max_batch_size = free_space_ / (input_memory_size + output_memory_size); max_batch_size = MIN(max_batch_size, num_modules / filterModuleMult); max_batch_size = MIN(max_batch_size, MAX_BLOCKS_X); //max_batch_size = MAX(max_batch_size, 1); //printf("Free space %ld max batch size %d\n", free_space, max_batch_size); cudaError_t err1, err2; err1 = cudaMalloc((void**)&expanded_images, max_batch_size * input_memory_size); err2 = cudaMalloc((void**)&expanded_target, max_batch_size * output_memory_size); if (cudaSuccess != err1 || cudaSuccess != err2) { printf("Could not allocate memory.\n"); num_modules_batch = 1; } else { num_modules_batch = max_batch_size; } int num_iter = DIVUP(num_modules, num_modules_batch); int module_id_start = 0; for (int i = 0; i < num_iter; i++) { int this_num_modules_batch = MIN(num_modules_batch, num_modules - module_id_start); //printf("Step %d num_modules %d\n", i, this_num_modules_batch); dim3 threads(num_threads_x); dim3 blocks = dim3(this_num_modules_batch, input_channel_end - input_channel_begin); kExpand<<<blocks, threads>>>(images_data, expanded_images, num_images, num_input_channels, image_size_y, image_size_x, num_modules_y, num_modules_x, kernel_size_y, kernel_size_x, padding_y, padding_x, stride_y, stride_x, this_num_modules_batch, module_id_start); if (!conv) w += num_output_channels * input_size; cublasSgemm('n', 't', num_images * this_num_modules_batch, num_output_channels, input_size, 1, expanded_images, num_images * this_num_modules_batch, w, num_output_channels, 0, expanded_target, num_images * this_num_modules_batch); dim3 blocks2 = dim3(this_num_modules_batch, num_output_channels); if (scaleTargets == 0) { kWriteRows<<<blocks2, threads>>>(expanded_target, targets_data, num_images, num_modules, this_num_modules_batch, module_id_start, scaleOutput); } else { kWriteRowsMult<<<blocks2, threads>>>(expanded_target, targets_data, num_images, num_modules, this_num_modules_batch, module_id_start, scaleTargets, scaleOutput); } module_id_start += this_num_modules_batch; } cudaFree(expanded_images); cudaFree(expanded_target); getLastCudaError("convUpGemm: kernel execution failed"); } void _convDownGemm(cudamat* derivs, cudamat* filters, cudamat* targets, Shape4D derivs_shape, Shape4D filters_shape, Shape4D targets_shape, ConvDesc conv_desc, float scaleTargets, float scaleOutput, bool conv) { int num_input_channels = conv_desc.num_input_channels; int num_output_channels = conv_desc.num_output_channels; int kernel_size_y = conv_desc.kernel_size_y; int kernel_size_x = conv_desc.kernel_size_x; int stride_y = conv_desc.stride_y; int stride_x = conv_desc.stride_x; int padding_y = conv_desc.padding_y; int padding_x = conv_desc.padding_x; int input_channel_begin = conv_desc.input_channel_begin; int input_channel_end = conv_desc.input_channel_end; int output_channel_begin = conv_desc.output_channel_begin; int output_channel_end = conv_desc.output_channel_end; int num_groups = conv_desc.num_groups; if (output_channel_end == 0) output_channel_end = num_output_channels; if (input_channel_end == 0) input_channel_end = num_input_channels; int num_output_channels2 = derivs_shape.shape[3]; int num_modules_y = derivs_shape.shape[2]; int num_modules_x = derivs_shape.shape[1]; int num_images = derivs_shape.shape[0]; int num_input_channels2 = targets_shape.shape[3]; int image_size_y = targets_shape.shape[2]; int image_size_x = targets_shape.shape[1]; int num_images2 = targets_shape.shape[0]; int num_input_channels3 = filters_shape.shape[3]; int kernel_size_y2 = filters_shape.shape[2]; int kernel_size_x2 = filters_shape.shape[1]; int num_output_channels3 = filters_shape.shape[0]; int num_modules = num_modules_y * num_modules_x; int filterModuleMult = conv ? 1 : num_modules; // Consistency checks. assert (num_images == num_images2); assert (num_output_channels == num_output_channels2); assert (output_channel_end - output_channel_begin == num_output_channels3); assert (num_input_channels == num_input_channels2); assert (input_channel_end - input_channel_begin == num_input_channels3 / filterModuleMult); assert (num_images2 == targets->size[0]); assert (num_images == derivs->size[0]); assert (num_output_channels3 == filters->size[0]); assert (image_size_y * image_size_x * num_input_channels2 == targets->size[1]); assert (num_modules_y * num_modules_x * num_output_channels2 == derivs->size[1]); assert (kernel_size_y * kernel_size_x * num_input_channels3 * filterModuleMult == filters->size[1]); assert (kernel_size_y == kernel_size_y2); assert (kernel_size_x == kernel_size_x2); assert (num_input_channels % num_groups == 0); assert (num_groups == 1); assert (input_channel_begin >= 0); assert (output_channel_begin >= 0); assert (input_channel_end <= num_input_channels); assert (output_channel_end <= num_output_channels); assert (input_channel_begin <= input_channel_end); assert (output_channel_begin <= output_channel_end); num_input_channels = input_channel_end - input_channel_begin; num_output_channels = output_channel_end - output_channel_begin; assert(num_input_channels > 0); assert(num_output_channels > 0); float* w = filters->data_device; float* derivs_data = derivs->data_device + output_channel_begin * num_modules * num_images; float* targets_data = targets->data_device + input_channel_begin * image_size_y * image_size_x * num_images; int input_size = kernel_size_y * kernel_size_x * num_input_channels; int num_threads_x = MIN(num_images, NUM_THREADS_PER_BLOCK); float *expanded_target = NULL, *expanded_derivs = NULL; int num_modules_batch; int input_memory_size = num_images * input_size * sizeof(float); int output_memory_size = num_images * num_output_channels * sizeof(float); if (free_space_ == 0) EstimateFreeSpace(); int max_batch_size = free_space_ / (input_memory_size + output_memory_size); max_batch_size = MIN(max_batch_size, num_modules / filterModuleMult); max_batch_size = MIN(max_batch_size, MAX_BLOCKS_X); max_batch_size = MAX(max_batch_size, 1); cudaError_t err1, err2; err1 = cudaMalloc((void**)&expanded_target, max_batch_size * input_memory_size); err2 = cudaMalloc((void**)&expanded_derivs, max_batch_size * output_memory_size); if (cudaSuccess != err1 || cudaSuccess != err2) { printf("Out of memory\n"); /* if (cudaSuccess == err1) cudaFree(expanded_target); if (cudaSuccess == err2) cudaFree(expanded_derivs); err1 = cudaMalloc((void**)&expanded_target, input_memory_size); err2 = cudaMalloc((void**)&expanded_derivs, output_memory_size); if (cudaSuccess != err1 || cudaSuccess != err2) { printf("Out of memory on GPU! %s \n", cudaGetErrorString(err1)); printf("Out of memory on GPU! %s \n", cudaGetErrorString(err2)); } */ num_modules_batch = 1; } else { num_modules_batch = max_batch_size; } int num_iter = DIVUP(num_modules, num_modules_batch); _Scale(targets, scaleTargets); int module_id_start = 0; for (int i = 0; i < num_iter; i++) { int this_num_modules_batch = MIN(num_modules_batch, num_modules - module_id_start); //printf("Step %d num_modules %d\n", i, this_num_modules_batch); dim3 blocks = dim3(this_num_modules_batch, num_output_channels); dim3 threads(num_threads_x); kReadRows<<<blocks, threads>>>(derivs_data, expanded_derivs, num_images, num_modules, this_num_modules_batch, module_id_start); if (!conv) w += num_output_channels * input_size; cublasSgemm('n', 'n', num_images * this_num_modules_batch, kernel_size_x * kernel_size_y * num_input_channels, num_output_channels, scaleOutput, expanded_derivs, num_images * this_num_modules_batch, w, num_output_channels, 0, expanded_target, num_images * this_num_modules_batch); if (check_cublas_error()) { printf("Error in dot or before it.\n"); } dim3 blocks2 = dim3(this_num_modules_batch, num_input_channels); kContract<<<blocks2, threads>>>(expanded_target, targets_data, num_images, num_input_channels, image_size_y, image_size_x, num_modules_y, num_modules_x, kernel_size_y, kernel_size_x, padding_y, padding_x, stride_y, stride_x, this_num_modules_batch, module_id_start); module_id_start += this_num_modules_batch; } cudaFree(expanded_derivs); cudaFree(expanded_target); getLastCudaError("convDownGemm: kernel execution failed"); } void _convOutpGemm(cudamat* images, cudamat* derivs, cudamat* targets, Shape4D images_shape, Shape4D derivs_shape, Shape4D targets_shape, ConvDesc conv_desc, float scaleTargets, float scaleOutput, bool conv) { int num_input_channels = conv_desc.num_input_channels; int num_output_channels = conv_desc.num_output_channels; int kernel_size_y = conv_desc.kernel_size_y; int kernel_size_x = conv_desc.kernel_size_x; int stride_y = conv_desc.stride_y; int stride_x = conv_desc.stride_x; int padding_y = conv_desc.padding_y; int padding_x = conv_desc.padding_x; int input_channel_begin = conv_desc.input_channel_begin; int input_channel_end = conv_desc.input_channel_end; int output_channel_begin = conv_desc.output_channel_begin; int output_channel_end = conv_desc.output_channel_end; int num_groups = conv_desc.num_groups; if (output_channel_end == 0) output_channel_end = num_output_channels; if (input_channel_end == 0) input_channel_end = num_input_channels; int num_output_channels2 = derivs_shape.shape[3]; int num_modules_y = derivs_shape.shape[2]; int num_modules_x = derivs_shape.shape[1]; int num_images = derivs_shape.shape[0]; int num_input_channels2 = images_shape.shape[3]; int image_size_y = images_shape.shape[2]; int image_size_x = images_shape.shape[1]; int num_images2 = images_shape.shape[0]; int num_input_channels3Mult = targets_shape.shape[3]; int kernel_size_y2 = targets_shape.shape[2]; int kernel_size_x2 = targets_shape.shape[1]; int num_output_channels3 = targets_shape.shape[0]; int num_modules = num_modules_y * num_modules_x; int filterModuleMult = conv ? 1 : num_modules; // Consistency checks. assert (num_images == num_images2); assert (num_output_channels == num_output_channels2); assert (output_channel_end - output_channel_begin == num_output_channels3); assert (num_input_channels == num_input_channels2); assert (input_channel_end - input_channel_begin == num_input_channels3Mult / filterModuleMult); assert (num_images2 == images->size[0]); assert (num_images == derivs->size[0]); assert (num_output_channels3 == targets->size[0]); assert (image_size_y * image_size_x * num_input_channels2 == images->size[1]); assert (num_modules_y * num_modules_x * num_output_channels2 == derivs->size[1]); assert (kernel_size_y2 * kernel_size_x2 * num_input_channels3Mult == targets->size[1]); assert (kernel_size_y == kernel_size_y2); assert (kernel_size_x == kernel_size_x2); assert (num_input_channels % num_groups == 0); assert (num_groups == 1); assert (input_channel_begin >= 0); assert (output_channel_begin >= 0); assert (input_channel_end <= num_input_channels); assert (output_channel_end <= num_output_channels); assert (input_channel_begin <= input_channel_end); assert (output_channel_begin <= output_channel_end); if (output_channel_end == 0) output_channel_end = num_output_channels; if (input_channel_end == 0) input_channel_end = num_input_channels; num_input_channels = input_channel_end - input_channel_begin; num_output_channels = output_channel_end - output_channel_begin; assert(num_input_channels > 0); assert(num_output_channels > 0); float* dw = targets->data_device; float* images_data = images->data_device + input_channel_begin * image_size_y * image_size_x * num_images; float* derivs_data = derivs->data_device + output_channel_begin * num_modules * num_images; int input_size = kernel_size_y * kernel_size_x * num_input_channels; int num_threads_x = MIN(num_images, NUM_THREADS_PER_BLOCK); float *expanded_images = NULL, *expanded_derivs = NULL; int num_modules_batch; int input_memory_size = num_images * input_size * sizeof(float); int output_memory_size = num_images * num_output_channels * sizeof(float); if (free_space_ == 0) EstimateFreeSpace(); int max_batch_size = free_space_ / (input_memory_size + output_memory_size); max_batch_size = MIN(max_batch_size, num_modules / filterModuleMult); max_batch_size = MIN(max_batch_size, MAX_BLOCKS_X); max_batch_size = MAX(max_batch_size, 1); cudaError_t err1, err2; err1 = cudaMalloc((void**)&expanded_images, max_batch_size * input_memory_size); err2 = cudaMalloc((void**)&expanded_derivs, max_batch_size * output_memory_size); if (cudaSuccess != err1 || cudaSuccess != err2) { printf("Out of memory.\n"); num_modules_batch = 1; } else { num_modules_batch = max_batch_size; } int num_iter = DIVUP(num_modules, num_modules_batch); _Scale(targets, scaleTargets); int module_id_start = 0; dim3 threads(num_threads_x); for (int i = 0; i < num_iter; i++) { int this_num_modules_batch = MIN(num_modules_batch, num_modules - module_id_start); //printf("Step %d num_modules %d\n", i, this_num_modules_batch); dim3 blocks = dim3(this_num_modules_batch, num_output_channels); kReadRows<<<blocks, threads>>>(derivs_data, expanded_derivs, num_images, num_modules, this_num_modules_batch, module_id_start); dim3 blocks2 = dim3(this_num_modules_batch, num_input_channels); kExpand<<<blocks2, threads>>>(images_data, expanded_images, num_images, num_input_channels, image_size_y, image_size_x, num_modules_y, num_modules_x, kernel_size_y, kernel_size_x, padding_y, padding_x, stride_y, stride_x, this_num_modules_batch, module_id_start); if (!conv) dw += num_output_channels * input_size; cublasSgemm('t', 'n', num_output_channels, input_size, num_images * this_num_modules_batch, scaleOutput, expanded_derivs, num_images * this_num_modules_batch, expanded_images, num_images * this_num_modules_batch, 1, dw, num_output_channels); if (check_cublas_error()) { printf("Error in dot or before it.\n"); } module_id_start += this_num_modules_batch; } cudaFree(expanded_derivs); cudaFree(expanded_images); getLastCudaError("convOutpGemm: kernel execution failed"); } template <class Pooler> void _convPoolGemm(cudamat* images, cudamat* targets, Shape4D images_shape, Shape4D targets_shape, ConvDesc conv_desc, float scaleTargets, float scaleOutput, Pooler pooler) { int num_input_channels = conv_desc.num_input_channels; int num_output_channels = conv_desc.num_output_channels; int kernel_size_y = conv_desc.kernel_size_y; int kernel_size_x = conv_desc.kernel_size_x; int kernel_size_t = conv_desc.kernel_size_t; int stride_y = conv_desc.stride_y; int stride_x = conv_desc.stride_x; int stride_t = conv_desc.stride_t; int padding_y = conv_desc.padding_y; int padding_x = conv_desc.padding_x; int padding_t = conv_desc.padding_t; int num_output_channels2 = targets_shape.shape[3]; int num_modules_y = targets_shape.shape[2]; int num_modules_x = targets_shape.shape[1]; int num_images = targets_shape.shape[0]; int num_input_channels2 = images_shape.shape[3]; int image_size_y = images_shape.shape[2]; int image_size_x = images_shape.shape[1]; int num_images2 = images_shape.shape[0]; int image_size_t = num_input_channels2 / num_input_channels; int num_modules_t = num_output_channels2 / num_output_channels; int num_modules = num_modules_y * num_modules_x * num_modules_t; // Consistency checks. assert (num_images == num_images2); assert (num_input_channels2 % image_size_t == 0); assert (num_input_channels == num_input_channels2 / image_size_t); assert (num_output_channels2 % num_modules_t == 0); assert (num_output_channels == num_output_channels2 / num_modules_t); assert (num_images == images->size[0]); assert (num_images == targets->size[0]); assert (image_size_t * image_size_y * image_size_x * num_input_channels == images->size[1]); assert (num_modules_t * num_modules_y * num_modules_x * num_output_channels == targets->size[1]); _Scale(targets, scaleTargets); dim3 threads(NUM_THREADS_PER_BLOCK); int num_blocks_x = MIN(MAX_BLOCKS_X, num_modules); dim3 blocks = dim3(num_blocks_x, num_input_channels); kPool<<<blocks, threads>>>(images->data_device, targets->data_device, num_images, num_input_channels, image_size_y, image_size_x, image_size_t, num_modules_y, num_modules_x, num_modules_t, kernel_size_y, kernel_size_x, kernel_size_t, padding_y, padding_x, padding_t, stride_y, stride_x, stride_t, scaleOutput, pooler); getLastCudaError("convLocalPool: kernel execution failed"); } void _avgPoolUndoGemm(cudamat* derivs, cudamat* targets, Shape4D derivs_shape, Shape4D targets_shape, ConvDesc conv_desc, float scaleTargets, float scaleOutput) { int num_input_channels = conv_desc.num_input_channels; int num_output_channels = conv_desc.num_output_channels; int kernel_size_y = conv_desc.kernel_size_y; int kernel_size_x = conv_desc.kernel_size_x; int kernel_size_t = conv_desc.kernel_size_t; int stride_y = conv_desc.stride_y; int stride_x = conv_desc.stride_x; int stride_t = conv_desc.stride_t; int padding_y = conv_desc.padding_y; int padding_x = conv_desc.padding_x; int padding_t = conv_desc.padding_t; int num_output_channels2 = derivs_shape.shape[3]; int num_modules_y = derivs_shape.shape[2]; int num_modules_x = derivs_shape.shape[1]; int num_images = derivs_shape.shape[0]; int num_input_channels2 = targets_shape.shape[3]; int image_size_y = targets_shape.shape[2]; int image_size_x = targets_shape.shape[1]; int num_images2 = targets_shape.shape[0]; int image_size_t = num_input_channels2 / num_input_channels; int num_modules_t = num_output_channels2 / num_output_channels; int num_modules = num_modules_y * num_modules_x * num_modules_t; // Consistency checks. assert (num_images == num_images2); assert (num_input_channels2 % image_size_t == 0); assert (num_input_channels == num_input_channels2 / image_size_t); assert (num_output_channels2 % num_modules_t == 0); assert (num_output_channels == num_output_channels2 / num_modules_t); assert (num_images == targets->size[0]); assert (num_images == derivs->size[0]); assert (image_size_t * image_size_y * image_size_x * num_input_channels == targets->size[1]); assert (num_modules_t * num_modules_y * num_modules_x * num_output_channels == derivs->size[1]); _Scale(targets, scaleTargets); dim3 threads(NUM_THREADS_PER_BLOCK); int num_blocks_x = MIN(MAX_BLOCKS_X, num_modules); dim3 blocks = dim3(num_blocks_x, num_input_channels); kAvgPoolUndo<<<blocks, threads>>>(derivs->data_device, targets->data_device, num_images, num_input_channels, image_size_y, image_size_x, image_size_t, num_modules_y, num_modules_x, num_modules_t, kernel_size_y, kernel_size_x, kernel_size_t, padding_y, padding_x, padding_t, stride_y, stride_x, stride_t, scaleOutput); getLastCudaError("avgPoolUndo: kernel execution failed"); } void _maxPoolUndoGemm(cudamat* images, cudamat* derivs, cudamat* maxes, cudamat* targets, Shape4D targets_shape, Shape4D derivs_shape, ConvDesc conv_desc, float scaleTargets, float scaleOutput) { int num_input_channels = conv_desc.num_input_channels; int num_output_channels = conv_desc.num_output_channels; int kernel_size_y = conv_desc.kernel_size_y; int kernel_size_x = conv_desc.kernel_size_x; int kernel_size_t = conv_desc.kernel_size_t; int stride_y = conv_desc.stride_y; int stride_x = conv_desc.stride_x; int stride_t = conv_desc.stride_t; int padding_y = conv_desc.padding_y; int padding_x = conv_desc.padding_x; int padding_t = conv_desc.padding_t; int num_output_channels2 = derivs_shape.shape[3]; int num_modules_y = derivs_shape.shape[2]; int num_modules_x = derivs_shape.shape[1]; int num_images = derivs_shape.shape[0]; int num_input_channels2 = targets_shape.shape[3]; int image_size_y = targets_shape.shape[2]; int image_size_x = targets_shape.shape[1]; int num_images2 = targets_shape.shape[0]; int image_size_t = num_input_channels2 / num_input_channels; int num_modules_t = num_output_channels2 / num_output_channels; int num_modules = num_modules_y * num_modules_x * num_modules_t; // Consistency checks. assert (num_images == num_images2); assert (num_input_channels2 % image_size_t == 0); assert (num_input_channels == num_input_channels2 / image_size_t); assert (num_output_channels2 % num_modules_t == 0); assert (num_output_channels == num_output_channels2 / num_modules_t); assert (num_images == targets->size[0]); assert (num_images == derivs->size[0]); assert (image_size_t * image_size_y * image_size_x * num_input_channels == targets->size[1]); assert (num_modules_t * num_modules_y * num_modules_x * num_output_channels == derivs->size[1]); _Scale(targets, scaleTargets); dim3 threads(NUM_THREADS_PER_BLOCK); int num_blocks_x = MIN(MAX_BLOCKS_X, num_modules); dim3 blocks = dim3(num_blocks_x, num_input_channels); kMaxPoolUndo<<<blocks, threads>>>(images->data_device, derivs->data_device, maxes->data_device, targets->data_device, num_images, num_input_channels, image_size_y, image_size_x, image_size_t, num_modules_y, num_modules_x, num_modules_t, kernel_size_y, kernel_size_x, kernel_size_t, padding_y, padding_x, padding_t, stride_y, stride_x, stride_t, scaleOutput); getLastCudaError("maxPoolUndo: kernel execution failed"); } void _CrossMapRNorm(cudamat* images, cudamat* targets, int num_filters, int sizeF, float addScale, float powScale, bool blocked) { int num_locs = (images->size[0] * images->size[1]) / num_filters; int num_blocks = DIVUP(num_locs, NUM_THREADS_PER_BLOCK); kCrossMapRNorm<<<num_blocks, NUM_THREADS_PER_BLOCK>>>(images->data_device, targets->data_device, num_locs, addScale, powScale, num_filters, sizeF, blocked); getLastCudaError("_CrossMapRNorm: kernel execution failed"); } void _CrossMapRNormUndo(cudamat* outGrads, cudamat* images, cudamat* targets, int num_filters, int sizeF, float addScale, float powScale, bool blocked) { int num_locs = (images->size[0] * images->size[1]) / num_filters; int batch_offset = 0; float *denoms; if (free_space_ == 0) EstimateFreeSpace(); int max_batch_size = free_space_ / (sizeof(float) * num_filters); max_batch_size = MIN(num_locs, max_batch_size); max_batch_size = MIN(num_locs, MAX_BLOCKS_X); cudaError_t err; err = cudaMalloc((void**)&denoms, max_batch_size * num_filters * sizeof(float)); if (cudaSuccess != err) { printf("Out of memory on GPU!\n"); } int num_batches = DIVUP(num_locs, max_batch_size); for (int i = 0; i < num_batches; i++) { int batch_size = MIN(max_batch_size, num_locs - batch_offset); int num_blocks = DIVUP(batch_size, NUM_THREADS_PER_BLOCK); kCrossMapDenoms<<<num_blocks, NUM_THREADS_PER_BLOCK>>>(images->data_device, denoms, num_locs, batch_size, batch_offset, addScale, powScale, num_filters, sizeF, blocked); kCrossMapRNormUndo<<<num_blocks, NUM_THREADS_PER_BLOCK>>>(images->data_device, outGrads->data_device, denoms, targets->data_device, num_locs, batch_size, batch_offset, addScale, powScale, num_filters, sizeF, blocked); batch_offset += batch_size; } cudaFree(denoms); getLastCudaError("_CrossMapRNormUndo: kernel execution failed"); } #ifdef __cplusplus extern "C" { #endif void convUpGemm(cudamat* images, cudamat* filters, cudamat* targets, Shape4D* images_shape, Shape4D* filters_shape, Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets) { /* printf("image shape %d %d %d %d\n", images_shape->shape[0], images_shape->shape[1], images_shape->shape[2], images_shape->shape[3]); printf("filters shape %d %d %d %d\n", filters_shape->shape[0], filters_shape->shape[1], filters_shape->shape[2], filters_shape->shape[3]); printf("targets shape %d %d %d %d\n", targets_shape->shape[0], targets_shape->shape[1], targets_shape->shape[2], targets_shape->shape[3]); printf("Convolution : kernel_size_y %d kernel_size_x %d stride_y %d stride_x %d padding_y %d padding_x %d num_input_channels %d num_output_channels %d num_groups %d\n", conv_desc.kernel_size_y, conv_desc.kernel_size_x, conv_desc.stride_x, conv_desc.stride_y, conv_desc.padding_y, conv_desc.padding_x, conv_desc.num_input_channels, conv_desc.num_output_channels, conv_desc.num_groups); */ _convUpGemm(images, filters, targets, *images_shape, *filters_shape, *targets_shape, conv_desc, scaleTargets, 1.0, true); } void convDownGemm(cudamat* derivs, cudamat* filters, cudamat* targets, Shape4D* derivs_shape, Shape4D* filters_shape, Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets) { _convDownGemm(derivs, filters, targets, *derivs_shape, *filters_shape, *targets_shape, conv_desc, scaleTargets, 1.0, true); } void convOutpGemm(cudamat* images, cudamat* derivs, cudamat* targets, Shape4D* images_shape, Shape4D* derivs_shape, Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets, float scaleOutput) { _convOutpGemm(images, derivs, targets, *images_shape, *derivs_shape, *targets_shape, conv_desc, scaleTargets, scaleOutput, true); } void localUpGemm(cudamat* images, cudamat* filters, cudamat* targets, Shape4D* images_shape, Shape4D* filters_shape, Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets) { _convUpGemm(images, filters, targets, *images_shape, *filters_shape, *targets_shape, conv_desc, scaleTargets, 1.0, false); } void localDownGemm(cudamat* derivs, cudamat* filters, cudamat* targets, Shape4D* derivs_shape, Shape4D* filters_shape, Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets) { _convDownGemm(derivs, filters, targets, *derivs_shape, *filters_shape, *targets_shape, conv_desc, scaleTargets, 1.0, false); } void localOutpGemm(cudamat* images, cudamat* derivs, cudamat* targets, Shape4D* images_shape, Shape4D* derivs_shape, Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets, float scaleOutput) { _convOutpGemm(images, derivs, targets, *images_shape, *derivs_shape, *targets_shape, conv_desc, scaleTargets, scaleOutput, false); } void MaxPoolGemm(cudamat* images, cudamat* targets, Shape4D* images_shape, Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets, float scaleOutput){ MaxPooler pooler; _convPoolGemm<MaxPooler>(images, targets, *images_shape, *targets_shape, conv_desc, scaleTargets, scaleOutput, pooler); } void AvgPoolGemm(cudamat* images, cudamat* targets, Shape4D* images_shape, Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets, float scaleOutput){ AvgPooler pooler; _convPoolGemm<AvgPooler>(images, targets, *images_shape, *targets_shape, conv_desc, scaleTargets, scaleOutput, pooler); } void MaxPoolUndoGemm(cudamat* images, cudamat* maxGrads, cudamat* maxActs, cudamat* targets, Shape4D* images_shape, Shape4D* maxGrads_shape, ConvDesc conv_desc, float scaleTargets) { _maxPoolUndoGemm(images, maxGrads, maxActs, targets, *images_shape, *maxGrads_shape, conv_desc, scaleTargets, 1); } void AvgPoolUndoGemm(cudamat* avgGrads, cudamat* targets, Shape4D* avgGrads_shape, Shape4D* targets_shape, ConvDesc conv_desc, float scaleTargets) { _avgPoolUndoGemm(avgGrads, targets, *avgGrads_shape, *targets_shape, conv_desc, scaleTargets, 1); } void UpSampleGemm(cudamat* images, cudamat* targets, Shape4D* images_shape, Shape4D* targets_shape, int factor, float scaleTargets) { ConvDesc conv_desc; conv_desc.kernel_size_y = factor; conv_desc.kernel_size_x = factor; conv_desc.stride_y = factor; conv_desc.stride_x = factor; conv_desc.padding_y = 0; conv_desc.padding_x = 0; conv_desc.num_input_channels = images_shape->shape[3]; conv_desc.num_output_channels = targets_shape->shape[3]; conv_desc.output_channel_end = targets_shape->shape[3]; conv_desc.input_channel_end = images_shape->shape[3]; conv_desc.input_channel_begin = 0; conv_desc.output_channel_begin = 0; conv_desc.num_groups = 1; _avgPoolUndoGemm(images, targets, *images_shape, *targets_shape, conv_desc, scaleTargets, factor * factor); } void DownSampleGemm(cudamat* images, cudamat* targets, Shape4D* images_shape, Shape4D* targets_shape, int factor) { AvgPooler pooler = AvgPooler(); ConvDesc conv_desc; conv_desc.kernel_size_y = factor; conv_desc.kernel_size_x = factor; conv_desc.stride_y = factor; conv_desc.stride_x = factor; conv_desc.padding_y = 0; conv_desc.padding_x = 0; conv_desc.num_input_channels = images_shape->shape[3]; conv_desc.num_output_channels = targets_shape->shape[3]; conv_desc.output_channel_end = targets_shape->shape[3]; conv_desc.input_channel_end = images_shape->shape[3]; conv_desc.input_channel_begin = 0; conv_desc.output_channel_begin = 0; conv_desc.num_groups = 1; _convPoolGemm<AvgPooler>(images, targets, *images_shape, *targets_shape, conv_desc, 0, 1, pooler); } void ResponseNormCrossMapGemm( cudamat* images, cudamat* targets, int num_filters, int sizeF, float addScale, float powScale, bool blocked) { _CrossMapRNorm(images, targets, num_filters, sizeF, addScale, powScale, blocked); } void ResponseNormCrossMapUndoGemm( cudamat* outGrads, cudamat* inputs, cudamat* targets, int num_filters, int sizeF, float addScale, float powScale, bool blocked) { _CrossMapRNormUndo(outGrads, inputs, targets, num_filters, sizeF, addScale, powScale, blocked); } void Scale(cudamat* mat, float scale) { _Scale(mat, scale); } #ifdef __cplusplus } #endif
the_stack
#include <stdio.h> #include "GCRSMatrix.h" #include "galois.h" #include "jerasure.h" #include "utils.h" int gcrs_check_k_m_w(int k, int m, int w){ if (k < MIN_K || k > MAX_K) { return -1; } if (k < m) { return -1; } if (w < MIN_W) { w = MIN_W; } while (pow(2, w) < (k+m)) { ++w; } return w; } int *gcrs_create_bitmatrix(int k, int m, int w){ int i, j; int *matrix, *bitmatrix; if (gcrs_check_k_m_w(k, m, w) < 0) { return NULL; } matrix = talloc(int, k*m); if (matrix == NULL) { return NULL; } for (i = 0; i < m; i++) { for (j = 0; j < k; j++) { matrix[i*k+j] = galois_single_divide(1, i ^ (m + j), w); } } bitmatrix = jerasure_matrix_to_bitmatrix(k, m, w, matrix); free(matrix); return bitmatrix; } unsigned int *gcrs_create_column_coding_bitmatrix(int k, int m, int w, int *bitmatrix){ int columnIdx, rowIdx; int freeBitmatrixMark = 0, bitmatrixIdx; int intbitmatrixIdx = 0; int bitIdx = 0; unsigned int bitOne = 0x01; unsigned int *column_encoded_bitmatrix; if (gcrs_check_k_m_w(k, m, w) < 0) { return NULL; } if (bitmatrix == NULL) { bitmatrix = gcrs_create_bitmatrix(k, m, w); freeBitmatrixMark = 1; if (bitmatrix == NULL) { return NULL; } } // int uIntSize = sizeof(unsigned int); // int wUnitsPerUInt = uIntSize/w; // int mwReqSize = m/wUnitsPerUInt; // // if (m%wUnitsPerUInt != 1) { // mwReqSize = mwReqSize + 1; // } column_encoded_bitmatrix = talloc(unsigned int, k * w * 2); if (column_encoded_bitmatrix == NULL) { free(bitmatrix); return NULL; } memset(column_encoded_bitmatrix, 0, k*w*2*sizeof(unsigned int)); for (columnIdx = 0; columnIdx < (k*w); ++columnIdx) { //map a whole column to 4 bytes (mw <= 32) or 8 bytes (mw >= 32) for (rowIdx = 0; rowIdx < (m*w); ++rowIdx) { bitmatrixIdx = rowIdx * k * w + columnIdx; if (rowIdx % w == 0) { //if cannot put a whole w bits to the last several bits of the integer, put w bits to the start w-bits next integer if ((bitIdx + w) > sizeof(int) * 8) { bitIdx = 0; intbitmatrixIdx = intbitmatrixIdx + 1; } } if (bitIdx >= sizeof(int) * 8) { bitIdx = 0; intbitmatrixIdx = intbitmatrixIdx + 1; } if (bitmatrix[bitmatrixIdx] == 1) { //copy a one to the bitIdx-bit of intbitmatrix[intbitmatrixIdx] column_encoded_bitmatrix[intbitmatrixIdx] = column_encoded_bitmatrix[intbitmatrixIdx] + (bitOne << bitIdx); }else{ //copy a zero to the bitIdx-bit of intbitmatrix[intbitmatrixIdx] } ++bitIdx; } bitIdx = 0; intbitmatrixIdx = intbitmatrixIdx + 1; } if (freeBitmatrixMark == 1) { free(bitmatrix); } return column_encoded_bitmatrix; } int gcrs_invert_bitmatrix(int *mat, int *inv, int rows) { int cols, i, j, k; int tmp; cols = rows; k = 0; for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { inv[k] = (i == j) ? 1 : 0; k++; } } /* First -- convert into upper triangular */ for (i = 0; i < cols; i++) { /* Swap rows if we have a zero i,i element. If we can't swap, then the matrix was not invertible */ if ((mat[i*cols+i]) == 0) { for (j = i+1; j < rows && (mat[j*cols+i]) == 0; j++) ; if (j == rows) return -1; for (k = 0; k < cols; k++) { tmp = mat[i*cols+k]; mat[i*cols+k] = mat[j*cols+k]; mat[j*cols+k] = tmp; tmp = inv[i*cols+k]; inv[i*cols+k] = inv[j*cols+k]; inv[j*cols+k] = tmp; } } /* Now for each j>i, add A_ji*Ai to Aj */ for (j = i+1; j != rows; j++) { if (mat[j*cols+i] != 0) { for (k = 0; k < cols; k++) { mat[j*cols+k] ^= mat[i*cols+k]; inv[j*cols+k] ^= inv[i*cols+k]; } } } } /* Now the matrix is upper triangular. Start at the top and multiply down */ for (i = rows-1; i >= 0; i--) { for (j = 0; j < i; j++) { if (mat[j*cols+i]) { for (k = 0; k < cols; k++) { mat[j*cols+k] ^= mat[i*cols+k]; inv[j*cols+k] ^= inv[i*cols+k]; } } } } return 0; } int gcrs_create_decoding_data_bitmatrix(int k, int m, int w, int *matrix, int *decoding_data_matrix, int *erased, int *dm_ids){ int i, j, *tmpmat; int index, mindex; if (gcrs_check_k_m_w(k, m, w) < 0) { return -1; } if (matrix == NULL) { return -1; } j = 0; for (i = 0; j < k; i++) { if (erased[i] == 0) { dm_ids[j] = i; j++; } } tmpmat = (int*)malloc(sizeof(int)*k*k*w*w); if (tmpmat == NULL) { return -1; } for (i = 0; i < k; i++) { if (dm_ids[i] < k) { index = i*k*w*w; for (j = 0; j < k*w*w; j++) tmpmat[index+j] = 0; index = i*k*w*w+dm_ids[i]*w; for (j = 0; j < w; j++) { tmpmat[index] = 1; index += (k*w+1); } } else { index = i*k*w*w; mindex = (dm_ids[i]-k)*k*w*w; for (j = 0; j < k*w*w; j++) { tmpmat[index+j] = matrix[mindex+j]; } } } i = gcrs_invert_bitmatrix(tmpmat, decoding_data_matrix, k*w); free(tmpmat); return 0; } int *gcrs_create_decoding_bitmatrix(int k, int m, int w, int *matrix, int *mat_idx, int *erasures, int *dm_ids){ int kIdx, mIdx, matIdx; int *erases; int *decoding_matrix, *decoding_data_matrix; int dFailedNum = 0; decoding_matrix = talloc(int, (k * w * w * m)); if (decoding_matrix == NULL) { return NULL; } if ((erases = gcrs_erasures_to_erased(k, m, erasures)) == NULL) { return NULL; } if ((decoding_data_matrix = talloc(int, k*k*w*w)) == NULL ) { return NULL; } for (kIdx = 0; kIdx < k; ++kIdx) { if (erases[kIdx] == 1) { ++dFailedNum; } } if (dFailedNum > 0) { if(gcrs_create_decoding_data_bitmatrix(k, m, w, matrix, decoding_data_matrix, erases, dm_ids) < 0){ free(erases); free(decoding_data_matrix); return NULL; } } matIdx = 0; for (kIdx = 0; kIdx < k; ++kIdx) { if (erases[kIdx] == 1) { *(mat_idx + matIdx) = kIdx; memcpy((decoding_matrix + matIdx * k * w * w), decoding_data_matrix + kIdx * k * w * w , sizeof(int) * k * w * w); matIdx = matIdx + 1; } } for (mIdx = 0; mIdx < m; ++mIdx) { if (erases[mIdx + kIdx] == 1) { *(mat_idx + matIdx) = mIdx + k; //Generate the vector for restoring memset((decoding_matrix + matIdx * k * w * w), 0, sizeof(int) * k * w * w); gcrs_generate_coding_vector(k,m,w,mIdx, matrix, decoding_data_matrix, (decoding_matrix + matIdx * k * w * w), erases); matIdx = matIdx + 1; } } free(erases); free(decoding_data_matrix); return decoding_matrix; } int *gcrs_erasures_to_erased(int k, int m, int *erasures) { int td; int t_non_erased; int *erased; int i; td = k+m; erased = talloc(int, td); if (erased == NULL) return NULL; t_non_erased = td; for (i = 0; i < td; i++) erased[i] = 0; for (i = 0; erasures[i] != -1; i++) { if (erased[erasures[i]] == 0) { erased[erasures[i]] = 1; t_non_erased--; if (t_non_erased < k) { free(erased); return NULL; } } } return erased; } int gcrs_generate_coding_vector(int k, int m, int w, int mIdx, int *matrix, int *invert_matrix, int *vector, int *erases){ int kIdx, wIdx, mapIdx, mapMissingIdx, rowIdx; int mappingMatrixRowIdx; //records the position mapped to vector for each k int *kMap = talloc(int, k); int *kMissingMap = talloc(int, (m+1)); // at most m data block missing; int *mappingMatrix = talloc(int, k*w*w); if (kMap == NULL || kMissingMap == NULL || mappingMatrix == NULL) { free(kMap); free(kMissingMap); free(mappingMatrix); return -1; } for (kIdx = 0; kIdx < k; ++kIdx) { *(kMap + kIdx) = -1; if (kIdx <= m) { *(kMissingMap + kIdx) = -1; } } mapIdx = 0; mapMissingIdx = 0; //Reorder vector elements for (kIdx = 0; kIdx < k; ++kIdx) { if (erases[kIdx] != 1) { *(kMap + mapIdx) = kIdx; for (rowIdx = 0; rowIdx < w; ++rowIdx) { memcpy((vector + mapIdx * w + rowIdx * k * w), (matrix + mIdx * k * w * w + rowIdx * k * w + kIdx * w), sizeof(int) * w); } ++mapIdx; }else{ *(kMissingMap + mapMissingIdx) = kIdx; ++mapMissingIdx; // Can't continue if more than m data blocks missing if (mapMissingIdx > m) { return -1; } } } for (rowIdx = 0; rowIdx < w; ++rowIdx) { for (mapMissingIdx = 0; *(kMissingMap + mapMissingIdx) != -1; ++mapMissingIdx) { memset(mappingMatrix, 0, sizeof(int) * k * w * w); mappingMatrixRowIdx = 0; for (wIdx = 0; wIdx < w; ++wIdx) { if (*(matrix + mIdx * k * w * w + rowIdx * k * w + *(kMissingMap + mapMissingIdx) * w + wIdx) == 1) { //We must put the vector for generating the block here memcpy((mappingMatrix + mappingMatrixRowIdx * k * w), (invert_matrix + (*(kMissingMap + mapMissingIdx)) * k * w * w + wIdx * k * w), sizeof(int) * k * w); ++mappingMatrixRowIdx; } } for (mappingMatrixRowIdx = 0; mappingMatrixRowIdx < w; ++mappingMatrixRowIdx) { int columnIdx = 0; for (; columnIdx < k*w; ++columnIdx) { *(vector + rowIdx * k * w + columnIdx) = (*(vector + rowIdx * k * w + columnIdx) + *(mappingMatrix + mappingMatrixRowIdx * k * w + columnIdx)) % 2; } } } } //Free what you allocated free(kMap); free(kMissingMap); free(mappingMatrix); return 0; } void printMatrix(int *mat, int coloum, int row){ int rIdx, cIdx; for (rIdx = 0; rIdx < row; ++rIdx) { for (cIdx = 0; cIdx < coloum; ++cIdx) { printf("%d ", *(mat + rIdx * coloum + cIdx)); } printf("\n"); } } void gcrs_print_column_encoded_bitmatrix(unsigned int *column_encoded_bitmatrix, int k, int m, int w){ int intIdx, intMatrixIdx = 0; int intBitIdx, intSize = sizeof(unsigned int)*8; unsigned int intBit = 0x01; for (intIdx = 0; intIdx < k*w; ++intIdx) { // printf("\n%u\n",int_encoded_bitmatrix[intMatrixIdx]); if (m*w > sizeof(int)*8) { for (intBitIdx = 0; intBitIdx < intSize; ++intBitIdx) { if ( (column_encoded_bitmatrix[intMatrixIdx] & (intBit << intBitIdx)) != 0) { printf("1"); }else{ printf("0"); } } ++intMatrixIdx; } for (intBitIdx = 0; intBitIdx < intSize; ++intBitIdx) { if ( (column_encoded_bitmatrix[intMatrixIdx] & (intBit << intBitIdx)) != 0) { printf("1"); }else{ printf("0"); } } ++intMatrixIdx; printf("\n"); } }
the_stack
#define MAX(x, y) ((x>y) ? x : y) // Define some error checking macros. #define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); } void cudaErrCheck_(cudaError_t stat, const char *file, int line) { if (stat != cudaSuccess) { fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line); } } #define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); } void cublasErrCheck_(cublasStatus_t stat, const char *file, int line) { if (stat != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line); } } #define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); } void curandErrCheck_(curandStatus_t stat, const char *file, int line) { if (stat != CURAND_STATUS_SUCCESS) { fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line); } } double cal_tflops(int m, int n, int k, double msec) { double flops = 2. * m * n * k; double tflops = (1E-12*flops) / (1E-3*msec); return tflops; } __global__ void assignFloatValue (float *out, int n, float value) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n) { out[idx] = value; } } __global__ void assignHalfValue (half *out, int n, float value) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n) { out[idx] = __float2half(value); //if(idx == 0)printf("Assign half precision value to out====%f\n", __half2float(out[idx])); } } __global__ void assignHalftoFloatValue (half *in, int n, float *out) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n) { out[idx] = __half2float(in[idx]); //if(idx==0)printf("Cast half to float ====%f\n", out[idx]); } } void correctnessCheck(int m, int n, int k, float *host, float value){ for (int i = 0; i < m * n; i++) { float val = host[i]; if ( val != k * value * value) { std::cout << "ERROR value = " << val<< ", correct value="<< k * value * value << std::endl; } } } __global__ void halfCorrectnessCheck(half *in, int n, int k, float value){ int idx = blockDim.x * blockIdx.x + threadIdx.x; half valueh = __float2half(value); float kf = k; half kh = __float2half(kf); half v = __hmul(kh,valueh); v = __hmul(v,valueh); if (idx < n) { if( __heq(in[idx], v)) printf("ERROR value = %f, correct value = %f", __half2float(in[idx]),__half2float(v) ); } } void printTime(float cublasTime, int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k ){ float tflops = cal_tflops(m, n, k, cublasTime); if (tflops > s_max_tflops){ s_max_tflops = tflops; s_max_m_n = m; s_max_k = k; } std::cout << std::setw(7) << m << ","; std::cout << std::setw(7) << n << ","; std::cout << std::setw(7) << k << ","; std::cout << std::setw(15) << std::setprecision(4) << cublasTime << ","; std::cout << std::setw(15) << std::setprecision(4) << tflops << "," << std::endl; } void calFP16Tensor(int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k, int numRepeats){ half *a_fp16; half *b_fp16; half *c_cublas; float *c_cublas_float; float *c_host_cublas; const float value = 0.1f; cublasHandle_t cublasHandle; cudaEvent_t startcublas; cudaEvent_t stopcublas; cudaErrCheck(cudaEventCreate(&startcublas)); cudaErrCheck(cudaEventCreate(&stopcublas)); cublasErrCheck(cublasCreate(&cublasHandle)); // Use tensor cores cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH)); cudaErrCheck(cudaMalloc((void**)&a_fp16, m * k * sizeof(half))); cudaErrCheck(cudaMalloc((void**)&b_fp16, k * n * sizeof(half))); cudaErrCheck(cudaMalloc((void**)&c_cublas, m * n * sizeof(half))); cudaErrCheck(cudaMalloc((void**)&c_cublas_float, m * n * sizeof(float))); c_host_cublas = (float*)malloc(m * n * sizeof(float)); // curand doesn't currently support fp16 so we generate in fp32 and convert to fp16. assignHalfValue <<< (m * k + 255) / 256, 256 >>> (a_fp16, m*k, value); assignHalfValue <<< (k * n + 255) / 256, 256 >>> (b_fp16, k*n, value); assignHalfValue <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, 0.0f); // alpha and beta MUST be the same type as compute type half alpha = __float2half(1.0f); half beta = __float2half(0.0f); // Now using cuBLAS cudaErrCheck(cudaEventRecord(startcublas)); for (int iteration = 0; iteration < numRepeats; ++iteration) { cublasGemmEx(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, a_fp16, CUDA_R_16F, m, b_fp16, CUDA_R_16F, n, &beta, c_cublas, CUDA_R_16F, m, CUDA_R_16F, CUBLAS_GEMM_DFALT_TENSOR_OP); } cudaErrCheck(cudaEventRecord(stopcublas)); cudaErrCheck(cudaEventSynchronize(stopcublas)); // Correctness check method1 // it will bring loss in half2float or float2half. For example, 0.1 will become 0.099976 after this change back and forth /* assignHalftoFloatValue <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, c_cublas_float); cudaErrCheck(cudaMemcpy(c_host_cublas, c_cublas_float, m * n * sizeof(float), cudaMemcpyDeviceToHost)); correctnessCheck(m, n, k, c_host_cublas, value); */ // Correctness check method 2 halfCorrectnessCheck <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, k, value); // Check time float cublasTime; cudaErrCheck(cudaEventElapsedTime(&cublasTime, startcublas, stopcublas)); cublasTime /= numRepeats; printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k); cudaErrCheck(cudaEventDestroy(startcublas)); cudaErrCheck(cudaEventDestroy(stopcublas)); cudaErrCheck(cudaFree(a_fp16)); cudaErrCheck(cudaFree(b_fp16)); cudaErrCheck(cudaFree(c_cublas)); free(c_host_cublas); } void calFP16Accu32Tensor(int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k, int numRepeats){ half *a_fp16; half *b_fp16; float *c_cublas; float *c_host_cublas; const float value = 1.0f; cublasHandle_t cublasHandle; cudaEvent_t startcublas; cudaEvent_t stopcublas; cudaErrCheck(cudaEventCreate(&startcublas)); cudaErrCheck(cudaEventCreate(&stopcublas)); cublasErrCheck(cublasCreate(&cublasHandle)); // Use tensor cores cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH)); cudaErrCheck(cudaMalloc((void**)&a_fp16, m * k * sizeof(half))); cudaErrCheck(cudaMalloc((void**)&b_fp16, k * n * sizeof(half))); cudaErrCheck(cudaMalloc((void**)&c_cublas, m * n * sizeof(float))); c_host_cublas = (float*)malloc(m * n * sizeof(float)); // curand doesn't currently support fp16 so we generate in fp32 and convert to fp16. assignHalfValue <<< (m * k + 255) / 256, 256 >>> (a_fp16, m*k, value); assignHalfValue <<< (k * n + 255) / 256, 256 >>> (b_fp16, k*n, value); assignFloatValue <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, 0.0f); float alpha = 1.0f; float beta = 0.0f; // Warp up not really needed // Now using cuBLAS cudaErrCheck(cudaEventRecord(startcublas)); for (int iteration = 0; iteration < numRepeats; ++iteration) { cublasErrCheck(cublasGemmEx(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, a_fp16, CUDA_R_16F, m, b_fp16, CUDA_R_16F, n, &beta, c_cublas, CUDA_R_32F, m, CUDA_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP)); } cudaErrCheck(cudaEventRecord(stopcublas)); cudaErrCheck(cudaEventSynchronize(stopcublas)); // Correctness check cudaErrCheck(cudaMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), cudaMemcpyDeviceToHost)); correctnessCheck(m, n, k, c_host_cublas, value); // Check time float cublasTime; cudaErrCheck(cudaEventElapsedTime(&cublasTime, startcublas, stopcublas)); cublasTime /= numRepeats; printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k); cudaErrCheck(cudaEventDestroy(startcublas)); cudaErrCheck(cudaEventDestroy(stopcublas)); cudaErrCheck(cudaFree(a_fp16)); cudaErrCheck(cudaFree(b_fp16)); cudaErrCheck(cudaFree(c_cublas)); free(c_host_cublas); } void calFP32CUDA(int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k, int numRepeats){ float *a_fp32; float *b_fp32; float *c_cublas; float *c_host_cublas; const float value = 1.0f; cublasHandle_t cublasHandle; cudaEvent_t startcublas; cudaEvent_t stopcublas; cudaErrCheck(cudaEventCreate(&startcublas)); cudaErrCheck(cudaEventCreate(&stopcublas)); cublasErrCheck(cublasCreate(&cublasHandle)); // No tensor cores cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH)); cudaErrCheck(cudaMalloc((void**)&a_fp32, m * k * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&b_fp32, k * n * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&c_cublas, m * n * sizeof(float))); c_host_cublas = (float*)malloc(m * n * sizeof(float)); // curand doesn't currently support fp16 so we generate in fp32 and convert to fp16. assignFloatValue <<< (m * k + 255) / 256, 256 >>> (a_fp32, m*k, value); assignFloatValue <<< (k * n + 255) / 256, 256 >>> (b_fp32, k*n, value); assignFloatValue <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, 0.0f); float alpha = 1.0f; float beta = 0.0f; cudaErrCheck(cudaEventRecord(startcublas)); for (int iteration = 0; iteration < numRepeats; ++iteration) { cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, a_fp32, m, b_fp32, n, &beta, c_cublas, m); } cudaErrCheck(cudaEventRecord(stopcublas)); cudaErrCheck(cudaEventSynchronize(stopcublas)); // Correctness check cudaErrCheck(cudaMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), cudaMemcpyDeviceToHost)); correctnessCheck(m, n, k, c_host_cublas, value); // Check time float cublasTime = 0.0f; cudaErrCheck(cudaEventElapsedTime(&cublasTime, startcublas, stopcublas)); cublasTime /= numRepeats; printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k); cudaErrCheck(cudaEventDestroy(startcublas)); cudaErrCheck(cudaEventDestroy(stopcublas)); cudaErrCheck(cudaFree(a_fp32)); cudaErrCheck(cudaFree(b_fp32)); cudaErrCheck(cudaFree(c_cublas)); free(c_host_cublas); } void calFP16CUDA(int m, int n, int k, float &s_max_tflops, int &s_max_m_n, int &s_max_k, int numRepeats){ half *a_fp16; half *b_fp16; half *c_cublas; float *c_host_cublas; const float value = 1.0f; cublasHandle_t cublasHandle; cudaEvent_t startcublas; cudaEvent_t stopcublas; cudaErrCheck(cudaEventCreate(&startcublas)); cudaErrCheck(cudaEventCreate(&stopcublas)); cublasErrCheck(cublasCreate(&cublasHandle)); // No tensor cores cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_DEFAULT_MATH)); cudaErrCheck(cudaMalloc((void**)&a_fp16, m * k * sizeof(half))); cudaErrCheck(cudaMalloc((void**)&b_fp16, k * n * sizeof(half))); cudaErrCheck(cudaMalloc((void**)&c_cublas, m * n * sizeof(half))); c_host_cublas = (float*)malloc(m * n * sizeof(float)); // curand doesn't currently support fp16 so we generate in fp32 and convert to fp16. assignHalfValue <<< (m * k + 255) / 256, 256 >>> (a_fp16, m*k, value); assignHalfValue <<< (k * n + 255) / 256, 256 >>> (b_fp16, k*n, value); assignHalfValue <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, 0.0f); half alpha = __float2half(1.0f); half beta = __float2half(0.0f); // Now using cuBLAS cudaErrCheck(cudaEventRecord(startcublas)); for (int iteration = 0; iteration < numRepeats; ++iteration) { cublasHgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, a_fp16, m, b_fp16, n, &beta, c_cublas, m); } cudaErrCheck(cudaEventRecord(stopcublas)); cudaErrCheck(cudaEventSynchronize(stopcublas)); // TODO: Correctness check //cudaErrCheck(cudaMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), cudaMemcpyDeviceToHost)); //correctnessCheck(m, n, k, c_host_cublas, value); // Check time float cublasTime; cudaErrCheck(cudaEventElapsedTime(&cublasTime, startcublas, stopcublas)); cublasTime /= numRepeats; printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k); cudaErrCheck(cudaEventDestroy(startcublas)); cudaErrCheck(cudaEventDestroy(stopcublas)); cudaErrCheck(cudaFree(a_fp16)); cudaErrCheck(cudaFree(b_fp16)); cudaErrCheck(cudaFree(c_cublas)); free(c_host_cublas); } int main(int argc, char* argv[]) { int m,n,k; std::string precision="NULL"; bool perf = true; if (argc < 3) { return EXIT_FAILURE; } // precision = INT8_TENSOR // precision = FP16_TENSOR // precision = FP16_32_TENSOR // precision = FP32_CUDA // precision = FP16_CUDA if (argc == 3) { precision = argv[1]; std::string tmp = argv[2]; if (tmp == "performance") perf= true; else if (tmp == "pressure") perf = false; else { std::cout << "Invalid parameters!"<<std::endl; return EXIT_FAILURE; } } float s_max_tflops = 0; int s_max_m_n = 0; int s_max_k = 0; int numRepeats; /* // deprecated this INT8 test as it will achieve the best perf. Please refer to cublasLt if (precision == "INT8_TENSOR" || precision == "NULL") { std::cout << "[TensorCore INT8(INT32 accumulation) Time and TOPS Result]" << std::endl; std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k"; std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TOPS"; std::cout << std::endl; // for tensorcore test TODO: to verify the int8 with int8 accumulation for(m=1024, n = 1024; m <= 25600; m+=1024, n+=1024) { for(k=1024; k <= 20480; k+=1024) { int8_t *a_; int8_t *b_; int *c_cublas; int *c_host_cublas; //const int value = 1; cublasHandle_t cublasHandle; cudaEvent_t startcublas; cudaEvent_t stopcublas; cudaErrCheck(cudaEventCreate(&startcublas)); cudaErrCheck(cudaEventCreate(&stopcublas)); cublasErrCheck(cublasCreate(&cublasHandle)); // Use tensor cores cublasErrCheck(cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH)); cudaErrCheck(cudaMalloc((void**)&a_, m * k * sizeof(int8_t))); cudaErrCheck(cudaMalloc((void**)&b_, k * m * sizeof(int8_t))); cudaErrCheck(cudaMalloc((void**)&c_cublas, m * n * sizeof(int))); c_host_cublas = (int*)malloc(m * n * sizeof(int)); //TODO curand doesn't currently support fp16 so we generate in fp32 and convert to fp16. //assignHalfValue <<< (m * k + 255) / 256, 256 >>> (a_fp16, m*k, value); //assignHalfValue <<< (k * n + 255) / 256, 256 >>> (b_fp16, k*n, value); //assignHalfValue <<< (k * n + 255) / 256, 256 >>> (c_cublas, m*n, 0.0f); int alpha = 1; int beta = 0; int numRepeats = 1; // Warp up not really needed here as many params will be tested // Now using cuBLAS cudaErrCheck(cudaEventRecord(startcublas)); for (int iteration = 0; iteration < numRepeats; ++iteration) { cublasErrCheck(cublasGemmEx(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha, a_, CUDA_R_8I, m, b_, CUDA_R_8I, n, &beta, c_cublas, CUDA_R_32I, m, CUDA_R_32I, CUBLAS_GEMM_DFALT_TENSOR_OP)); } cudaErrCheck(cudaEventRecord(stopcublas)); cudaErrCheck(cudaEventSynchronize(stopcublas)); // TODO: Correctness check //cudaErrCheck(cudaMemcpy(c_host_cublas, c_cublas, m * n * sizeof(float), cudaMemcpyDeviceToHost)); //correctnessCheck(m, n, k, c_host_cublas, value); // Check time float cublasTime; cudaErrCheck(cudaEventElapsedTime(&cublasTime, startcublas, stopcublas)); cublasTime /= numRepeats; printTime(cublasTime, m, n, k, s_max_tflops, s_max_m_n, s_max_k); cudaErrCheck(cudaEventDestroy(startcublas)); cudaErrCheck(cudaEventDestroy(stopcublas)); cudaErrCheck(cudaFree(a_)); cudaErrCheck(cudaFree(b_)); cudaErrCheck(cudaFree(c_cublas)); free(c_host_cublas); }} std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl; cudaErrCheck(cudaDeviceReset()); } */ //======= for tensorcore test // for perf test if (precision == "FP16_TENSOR" && perf == true) { std::cout << "[TensorCore FP16(FP16 accumulation) Time and TFLOPS Result]" << std::endl; std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k"; std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS"; std::cout << std::endl; s_max_tflops = 0; s_max_m_n = 0; s_max_k = 0; numRepeats = 10; for(m=1024, n = 1024; m <= 21504; m+=4096, n+=4096) { for(k=1024; k <= 20480; k+=4096) { // m=n=k=1024; calFP16Tensor( m, n, k,s_max_tflops, s_max_m_n, s_max_k, numRepeats); }} std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl; cudaErrCheck(cudaDeviceReset()); } // for pressure test if (precision == "FP16_TENSOR" && perf == false) { std::cout << "[TensorCore FP16(FP16 accumulation) Time and TFLOPS Result]" << std::endl; std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k"; std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS"; std::cout << std::endl; s_max_tflops = 0; s_max_m_n = 0; s_max_k = 0; numRepeats = 2000; std::vector<int> mnk={512, 1024, 5120, 10240}; for(int i=0; i<mnk.size(); i++) calFP16Tensor( mnk[i], mnk[i], mnk[i], s_max_tflops, s_max_m_n, s_max_k, numRepeats); cudaErrCheck(cudaDeviceReset()); } // for perf test if (precision == "FP16_32_TENSOR" && perf == true) { std::cout << "[TensorCore FP16(FP32 accumulation) Time and TFLOPS Result]" << std::endl; std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k"; std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS"; std::cout << std::endl; s_max_tflops = 0; numRepeats = 10; for(m=1024, n = 1024; m <= 21504; m+=4096, n+=4096) { for(k=1024; k <= 20480; k+=4096) { // m=n=k=1024; calFP16Accu32Tensor( m, n, k, s_max_tflops, s_max_m_n, s_max_k, numRepeats); }} std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl; cudaErrCheck(cudaDeviceReset()); } // for pressure test if (precision == "FP16_32_TENSOR" && perf == false) { std::cout << "[TensorCore FP16(FP32 accumulation) Time and TFLOPS Result]" << std::endl; std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k"; std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS"; std::cout << std::endl; s_max_tflops = 0; numRepeats = 2000; std::vector<int> mnk={512, 1024, 5120, 10240}; for(int i=0; i<mnk.size(); i++) calFP16Accu32Tensor( mnk[i], mnk[i], mnk[i], s_max_tflops, s_max_m_n, s_max_k, numRepeats); cudaErrCheck(cudaDeviceReset()); } //======= for cudacore test if (precision == "FP32_CUDA" && perf == true) { std::cout << "[CUDA core FP32 Time and TFLOPS Result]" << std::endl; std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k"; std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS"; std::cout << std::endl; s_max_tflops = 0; numRepeats = 10; for(m=1024, n = 1024; m <= 21504; m+=4096, n+=4096) { for(k=1024; k <= 20480; k+=4096) { calFP32CUDA( m, n, k,s_max_tflops, s_max_m_n, s_max_k, numRepeats); }} std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl; cudaErrCheck(cudaDeviceReset()); } // for pressure test if (precision == "FP32_CUDA" && perf == false) { std::cout << "[CUDA core FP32 Time and TFLOPS Result]" << std::endl; std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k"; std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS"; std::cout << std::endl; s_max_tflops = 0; numRepeats = 2000; std::vector<int> mnk={512, 1024, 5120, 10240}; for(int i=0; i<mnk.size(); i++) calFP32CUDA( mnk[i], mnk[i], mnk[i], s_max_tflops, s_max_m_n, s_max_k, numRepeats); cudaErrCheck(cudaDeviceReset()); } // for perf test if (precision == "FP16_CUDA" && perf == true) { std::cout << "[CUDA core FP16 Time and TFLOPS Result]" << std::endl; std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k"; std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS"; std::cout << std::endl; s_max_tflops = 0; numRepeats = 10; for(m=1024, n = 1024; m <= 21504; m+=4096, n+=4096) { for(k=1024; k <= 20480; k+=4096) { calFP16CUDA( m, n, k,s_max_tflops, s_max_m_n, s_max_k, numRepeats); }} std::cout << "[Peak TFLOPS]=" << s_max_tflops << ", m=n="<< s_max_m_n << ", k="<<s_max_k<< std::endl; cudaErrCheck(cudaDeviceReset()); } // for pressure test if (precision == "FP16_CUDA" && perf == false) { std::cout << "[CUDA core FP16 Time and TFLOPS Result]" << std::endl; std::cout << std::setw(7) << "m" << std::setw(7) << "n" << std::setw(7) << "k"; std::cout << std::setw(15) << "Time (msec)" << std::setw(15) << "TFLOPS"; std::cout << std::endl; s_max_tflops = 0; numRepeats = 2000; std::vector<int> mnk={512, 1024, 5120, 10240}; for(int i=0; i<mnk.size(); i++) calFP16CUDA( mnk[i], mnk[i], mnk[i], s_max_tflops, s_max_m_n, s_max_k, numRepeats); cudaErrCheck(cudaDeviceReset()); } return 0; }
the_stack
#define ERR (1.e-24) #ifdef __NVCC__ #define DECL __host__ __device__ #else #define DECL #endif template <typename RealType> struct Surreal { typedef RealType value_type; RealType real, imag; DECL Surreal<RealType>(){}; // uninitialized for efficiency DECL Surreal<RealType>(const RealType &v, const RealType &d) : real(v), imag(d) {} DECL Surreal<RealType>(const RealType &v) : real(v), imag(0) {} // copy constructor DECL Surreal<RealType>(const Surreal<RealType> &z) : real(z.real), imag(z.imag) {} DECL Surreal &operator=(const Surreal &s) { real = s.real; imag = s.imag; return *this; }; DECL Surreal &operator+=(const Surreal &); DECL Surreal &operator+=(const RealType &); DECL Surreal operator-() const; DECL Surreal &operator-=(const Surreal &); DECL Surreal &operator-=(const RealType &); DECL Surreal &operator*=(const Surreal &); DECL Surreal &operator*=(const RealType &); DECL Surreal &operator/=(const Surreal &); DECL Surreal &operator/=(const RealType &); template <typename OtherType> DECL Surreal &operator/=(const OtherType &); DECL Surreal sin(const Surreal &); DECL Surreal cos(const Surreal &); DECL Surreal acos(const Surreal &); DECL Surreal atan2(const Surreal &, const Surreal &); DECL Surreal sqrt(const Surreal &); DECL Surreal exp(const Surreal &); }; template <typename RealType> DECL Surreal<RealType> abs(const Surreal<RealType> &v) { return v.real < 0.0 ? -v : v; } template <typename RealType> DECL Surreal<RealType> fabs(const Surreal<RealType> &v) { return v.real < 0.0 ? -v : v; } template <typename RealType> DECL Surreal<RealType> max(const double &a, const Surreal<RealType> &v) { return v.real > a ? v : Surreal<RealType>(a); } #ifdef __NVCC__ template <typename RealType> DECL Surreal<RealType> __shfl_sync(unsigned mask, Surreal<RealType> &var, int srcLane, int width = warpSize) { var.real = __shfl_sync(mask, var.real, srcLane, width); var.imag = __shfl_sync(mask, var.imag, srcLane, width); return var; } template <typename RealType> __device__ inline void atomicAddOffset(Surreal<RealType> *base_ptr, const unsigned offset, const Surreal<RealType> &val) { RealType *real_ptr = reinterpret_cast<RealType *>(base_ptr) + offset * 2; RealType *imag_ptr = real_ptr + 1; atomicAdd(real_ptr, val.real); atomicAdd(imag_ptr, val.imag); } template <typename PtrType, typename SecondType> __device__ inline void atomicAddOffsetSplit(Surreal<PtrType> *base_ptr, const unsigned offset, const Surreal<SecondType> &val) { PtrType *real_ptr = reinterpret_cast<PtrType *>(base_ptr) + offset * 2; PtrType *imag_ptr = real_ptr + 1; atomicAdd(real_ptr, val.real); atomicAdd(imag_ptr, val.imag); } #endif template <typename RealType> DECL bool operator>(const Surreal<RealType> &l, const Surreal<RealType> &r) { return l.real > r.real; } template <typename RealType> DECL bool operator<(const Surreal<RealType> &l, const Surreal<RealType> &r) { return l.real < r.real; } template <typename RealType> DECL bool operator>(const Surreal<RealType> &l, const double &r) { return l.real > r; } template <typename RealType> DECL bool operator<(const Surreal<RealType> &l, const double &r) { return l.real < r; } template <typename RealType> DECL Surreal<RealType> &Surreal<RealType>::operator+=(const Surreal<RealType> &z) { real += z.real; imag += z.imag; return *this; } template <typename RealType> DECL Surreal<RealType> &Surreal<RealType>::operator+=(const RealType &r) { real += r; return *this; } template <typename RealType> DECL Surreal<RealType> operator+(const Surreal<RealType> &a, const Surreal<RealType> &b) { return Surreal<RealType>(a.real + b.real, a.imag + b.imag); } template <typename RealType> DECL Surreal<RealType> operator+(const int &i, const Surreal<RealType> &z) { return Surreal<RealType>(i + z.real, z.imag); } template <typename RealType> DECL Surreal<RealType> operator+(const float &i, const Surreal<RealType> &z) { return Surreal<RealType>(i + z.real, z.imag); } template <typename RealType> DECL Surreal<RealType> operator+(const double &i, const Surreal<RealType> &z) { return Surreal<RealType>(i + z.real, z.imag); } template <typename RealType> DECL Surreal<RealType> operator+(const Surreal<RealType> &z, const int &i) { return Surreal<RealType>(i + z.real, z.imag); } template <typename RealType> DECL Surreal<RealType> operator+(const Surreal<RealType> &z, const float &i) { return Surreal<RealType>(i + z.real, z.imag); } template <typename RealType> DECL Surreal<RealType> operator+(const Surreal<RealType> &z, const double &i) { return Surreal<RealType>(i + z.real, z.imag); } template <typename RealType> DECL Surreal<RealType> Surreal<RealType>::operator-() const { return Surreal<RealType>(-real, -imag); } template <typename RealType> DECL Surreal<RealType> operator-(const Surreal<RealType> &a, const Surreal<RealType> &b) { return Surreal<RealType>(a.real - b.real, a.imag - b.imag); } template <typename RealType> DECL Surreal<RealType> operator-(const Surreal<RealType> &a, const float &b) { return Surreal<RealType>(a.real - b, a.imag); } template <typename RealType> DECL Surreal<RealType> operator-(const Surreal<RealType> &a, const double &b) { return Surreal<RealType>(a.real - b, a.imag); } template <typename RealType> DECL Surreal<RealType> operator-(const Surreal<RealType> &a, const int &b) { return Surreal<RealType>(a.real - b, a.imag); } template <typename RealType> DECL Surreal<RealType> operator-(const float &b, const Surreal<RealType> &a) { return Surreal<RealType>(b - a.real, -a.imag); } template <typename RealType> DECL Surreal<RealType> operator-(const double &b, const Surreal<RealType> &a) { return Surreal<RealType>(b - a.real, -a.imag); } template <typename RealType> DECL Surreal<RealType> operator-(const int &b, const Surreal<RealType> &a) { return Surreal<RealType>(b - a.real, -a.imag); } template <typename RealType> DECL Surreal<RealType> &Surreal<RealType>::operator-=(const Surreal<RealType> &z) { real -= z.real; imag -= z.imag; return *this; } template <typename RealType> DECL Surreal<RealType> &Surreal<RealType>::operator-=(const RealType &r) { real -= r; return *this; } template <typename RealType> DECL Surreal<RealType> &Surreal<RealType>::operator*=(const Surreal<RealType> &z) { imag = real * z.imag + z.real * imag; real *= z.real; return *this; } template <typename RealType> DECL Surreal<RealType> &Surreal<RealType>::operator*=(const RealType &r) { real *= r; imag *= r; return *this; } template <typename RealType> DECL Surreal<RealType> operator*(const Surreal<RealType> &a, const Surreal<RealType> &b) { return Surreal<RealType>( b.real * a.real, // we don't do b.imag*b.imag cuz autodiff b.real * a.imag + a.real * b.imag); } template <typename RealType> DECL Surreal<RealType> operator*(const float &i, const Surreal<RealType> &z) { return Surreal<RealType>(i * z.real, i * z.imag); } template <typename RealType> DECL Surreal<RealType> operator*(const double &i, const Surreal<RealType> &z) { return Surreal<RealType>(i * z.real, i * z.imag); } template <typename RealType> DECL Surreal<RealType> operator*(const int &i, const Surreal<RealType> &z) { return Surreal<RealType>(i * z.real, i * z.imag); } template <typename RealType> DECL Surreal<RealType> operator*(const Surreal<RealType> &z, const float &i) { return Surreal<RealType>(i * z.real, i * z.imag); } template <typename RealType> DECL Surreal<RealType> operator*(const Surreal<RealType> &z, const double &i) { return Surreal<RealType>(i * z.real, i * z.imag); } template <typename RealType> DECL Surreal<RealType> operator*(const Surreal<RealType> &z, const int &i) { return Surreal<RealType>(i * z.real, i * z.imag); } template <typename RealType> DECL Surreal<RealType> operator/(const Surreal<RealType> &a, const Surreal<RealType> &b) { return Surreal<RealType>(a.real / b.real, (b.real * a.imag - a.real * b.imag) / (b.real * b.real)); } // template <typename RealType> DECL Surreal<RealType> operator/(const Surreal<RealType> &z, const int &r) { return Surreal<RealType>(z.real / r, z.imag / r); } template <typename RealType> DECL Surreal<RealType> operator/(const Surreal<RealType> &z, const float &r) { return Surreal<RealType>(z.real / r, z.imag / r); } template <typename RealType> DECL Surreal<RealType> operator/(const Surreal<RealType> &z, const double &r) { return Surreal<RealType>(z.real / r, z.imag / r); } // dividing a real by a complex is different from divding a complex by a real template <typename RealType> DECL Surreal<RealType> operator/(const int &r, const Surreal<RealType> &z) { return Surreal<RealType>(r / z.real, -r * z.imag / (z.real * z.real)); } template <typename RealType> DECL Surreal<RealType> operator/(const float &r, const Surreal<RealType> &z) { return Surreal<RealType>(r / z.real, -r * z.imag / (z.real * z.real)); } template <typename RealType> DECL Surreal<RealType> operator/(const double &r, const Surreal<RealType> &z) { return Surreal<RealType>(r / z.real, -r * z.imag / (z.real * z.real)); } template <typename RealType> DECL Surreal<RealType> &Surreal<RealType>::operator/=(const Surreal<RealType> &z) { imag = (z.real * imag - real * z.imag) / (z.real * z.real); real /= z.real; return *this; } template <typename RealType> DECL Surreal<RealType> &Surreal<RealType>::operator/=(const RealType &r) { real /= r; imag /= r; return *this; } template <typename RealType> template <typename OtherType> DECL Surreal<RealType> &Surreal<RealType>::operator/=(const OtherType &i) { real /= RealType(i); imag /= RealType(i); // wtf? return *this; } template <typename RealType> DECL Surreal<RealType> sin(const Surreal<RealType> &z) { return Surreal<RealType>(sin(z.real), z.imag * cos(z.real)); } template <typename RealType> DECL Surreal<RealType> cos(const Surreal<RealType> &z) { return Surreal<RealType>(cos(z.real), -z.imag * sin(z.real)); } template <typename RealType> DECL Surreal<RealType> acos(const Surreal<RealType> &z) { return Surreal<RealType>(acos(z.real), -z.imag / sqrt(1.0 - z.real * z.real + ERR)); } template <typename RealType> DECL Surreal<RealType> tanh(const Surreal<RealType> &z) { double coshv = cosh(z.real); return Surreal<RealType>(tanh(z.real), z.imag / (coshv * coshv)); } template <typename RealType> DECL Surreal<RealType> cosh(const Surreal<RealType> &z) { double coshv = cosh(z.real); double sinhv = sinh(z.real); return Surreal<RealType>(coshv, z.imag * sinhv); } template <typename RealType> DECL Surreal<RealType> atan2(const Surreal<RealType> &z1, const Surreal<RealType> &z2) { return Surreal<RealType>( atan2(z1.real, z2.real), (z2.real * z1.imag - z1.real * z2.imag) / (z1.real * z1.real + z2.real * z2.real)); } template <typename RealType> DECL Surreal<RealType> log(const Surreal<RealType> &z) { return Surreal<RealType>(log(z.real), z.imag / z.real); } template <typename RealType> DECL Surreal<RealType> sqrt(const Surreal<RealType> &z) { RealType sqrtv = sqrt(z.real); return Surreal<RealType>(sqrtv, 0.5 * z.imag / sqrtv); } template <typename RealType> DECL Surreal<RealType> rsqrt(const Surreal<RealType> &z) { RealType rsqrta = 1 / sqrt(z.real); return Surreal<RealType>(rsqrta, -(z.imag * rsqrta) / (2 * z.real)); } template <typename RealType> DECL Surreal<RealType> exp(const Surreal<RealType> &z) { RealType expv = exp(z.real); return Surreal<RealType>(expv, z.imag * expv); } template <typename RealType> DECL Surreal<RealType> floor(const Surreal<RealType> &z) { return Surreal<RealType>(floor(z.real), 0.); }
the_stack
#include <ATen/cuda/CUDABlas.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/native/cuda/vol2col.cuh> namespace at { namespace native { namespace { static inline void slow_conv_transpose3d_shape_check( const Tensor& input, const Tensor& grad_output, const Tensor& weight, const Tensor& bias, int kernel_depth, int kernel_width, int kernel_height, int stride_depth, int stride_width, int stride_height, int padding_depth, int padding_width, int padding_height, int dilation_depth, int dilation_width, int dilation_height, int output_padding_depth, int output_padding_width, int output_padding_height, int weight_nullable) { TORCH_CHECK( input.numel() != 0 && (input.dim() == 4 || input.dim() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input, but got: ", input.sizes()); TORCH_CHECK( stride_depth > 0 && stride_width > 0 && stride_height > 0, "stride should be greater than zero, but got stride_depth: ", stride_depth, " stride_height: ", stride_height, " stride_width: ", stride_width); TORCH_CHECK( dilation_depth > 0 && dilation_width > 0 && dilation_height > 0, "dilation should be greater than zero, but got dilation_depth: ", dilation_depth, ", dilation_height: ", dilation_height, ", dilation_width: ", dilation_width); TORCH_CHECK( (output_padding_depth < stride_depth || output_padding_depth < dilation_depth) && (output_padding_width < stride_width || output_padding_width < dilation_width) && (output_padding_height < stride_height || output_padding_height < dilation_height), "output padding must be smaller than either stride or dilation,", " but got output_padding_depth: ", output_padding_depth, " output_padding_height: ", output_padding_height, " output_padding_width: ", output_padding_width, " stride_depth: ", stride_depth, " stride_height: ", stride_height, " stride_width: ", stride_width, " dilation_depth: ", dilation_depth, " dilation_height: ", dilation_height, " dilation_width: ", dilation_width); // number of input & output planes and kernel size is indirectly defined by // the weight tensor if (weight.defined()) { TORCH_CHECK( weight.numel() != 0 && weight.dim() == 5, "non-empty 5D (n_output_plane x n_input_plane ", "x kernel_depth x kernel_height x kernel_width) tensor ", "expected for weight, but got: ", weight.sizes()); if (bias.defined()) { check_dim_size(bias, 1, 0, weight.size(1)); } } else if (!weight_nullable) { AT_ERROR("weight tensor is expected to be non-nullable"); } int ndim = input.dim(); int dimf = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (ndim == 5) { dimf++; dimd++; dimh++; dimw++; } if (weight.defined()) { const int64_t n_input_plane = weight.size(0); check_dim_size(input, ndim, dimf, n_input_plane); } int64_t input_width = input.size(dimw); int64_t input_height = input.size(dimh); int64_t input_depth = input.size(dimd); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; if (output_depth < 1 || output_width < 1 || output_height < 1) { AT_ERROR( "Given input size per channel: (", input_depth, " x ", input_height, " x ", input_width, "). Calculated output size per channel: (", output_depth, " x ", output_height, " x ", output_width, "). Output size is too small"); } if (grad_output.defined()) { if (weight.defined()) { const int64_t n_output_plane = weight.size(1); check_dim_size(grad_output, ndim, dimf, n_output_plane); } else if (bias.defined()) { const int64_t n_output_plane = bias.size(0); check_dim_size(grad_output, ndim, dimf, n_output_plane); } check_dim_size(grad_output, ndim, dimd, output_depth); check_dim_size(grad_output, ndim, dimh, output_height); check_dim_size(grad_output, ndim, dimw, output_width); } } void slow_conv_transpose3d_out_cuda_template( Tensor& output, const Tensor& input_, const Tensor& weight_, IntArrayRef kernel_size, const Tensor& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor& finput, Tensor& fgrad_input) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; Tensor columns = finput; Tensor ones = fgrad_input; int n_input_plane = weight_.size(0); int n_output_plane = weight_.size(1); TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2}, weight_arg{weight_, "weight", 3}, bias_arg{bias, "bias", 4}, columns_arg{columns, "columns", 5}, ones_arg{ones, "ones", 6}; checkAllSameGPU( "slow_conv_transpose3d_out_cuda", {input_arg, output_arg, weight_arg, bias_arg, columns_arg, ones_arg}); slow_conv_transpose3d_shape_check( input_, Tensor(), weight_, bias, kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 0); TORCH_CHECK( !bias.defined() || bias.is_contiguous(), "bias tensor has to be contiguous"); Tensor input = input_.contiguous(); Tensor weight = weight_.contiguous(); int is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Resize output output.resize_( {batch_size, n_output_plane, output_depth, output_height, output_width}); // Resize temporary columns columns.resize_({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}); // Define a buffer of ones, for bias accumulation // Note: this buffer can be shared with other modules, it only ever gets // increased, and always contains ones. if (ones.dim() != 3 || ones.size(0) * ones.size(1) * ones.size(2) < output_depth * output_height * output_width) { // Resize plane and fill with ones... ones.resize_({output_depth, output_height, output_width}); ones.fill_(1); } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv_transpose3d_out_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; // Helpers Tensor input_n; Tensor output_n; // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per output: input_n = input.select(0, elt); output_n = output.select(0, elt); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4); int64_t n = columns.size(1); int64_t k = weight.size(0); // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) at::cuda::blas::gemm<scalar_t>( 'n', 't', n, m, k, static_cast<scalar_t>(1), input_n.data_ptr<scalar_t>(), n, weight.data_ptr<scalar_t>(), m, static_cast<scalar_t>(0), columns.data_ptr<scalar_t>(), n); // Unpack columns back into input: at::native::col2vol<scalar_t, accscalar_t>( at::cuda::getCurrentCUDAStream(), columns.data_ptr<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, output_n.data_ptr<scalar_t>()); // Do Bias after: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m_ = n_output_plane; int64_t n_ = output_depth * output_height * output_width; int64_t k_ = 1; // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) if (bias.defined()) { at::cuda::blas::gemm<scalar_t>( 't', 'n', n_, m_, k_, static_cast<scalar_t>(1), ones.data_ptr<scalar_t>(), k_, bias.data_ptr<scalar_t>(), k_, static_cast<scalar_t>(1), output_n.data_ptr<scalar_t>(), n_); } } // Resize output if (is_batch) { output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {n_input_plane, input_depth, input_height, input_width}); } }); } void slow_conv_transpose3d_backward_out_cuda_template( const Tensor& input_, const Tensor& grad_output_, Tensor& grad_input, const Tensor& weight_, const Tensor& finput, const Tensor& fgrad_input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); Tensor grad_columns = finput; int n_input_plane = weight_.size(0); int n_output_plane = weight_.size(1); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; TensorArg input_arg{input_, "input", 1}, grad_output_arg{grad_output_, "grad_output", 2}, weight_arg{weight_, "weight", 3}, grad_columns_arg{grad_columns, "grad_columns", 4}, grad_input_arg{grad_input, "grad_input", 5}; checkAllSameGPU( "slow_conv_transpose3d_backward_out_cuda", {input_arg, grad_output_arg, weight_arg, grad_columns_arg, grad_input_arg}); slow_conv_transpose3d_shape_check( input_, grad_output_, weight_, Tensor(), kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 0); Tensor input = input_.contiguous(); Tensor grad_output = grad_output_.contiguous(); Tensor weight = weight_.contiguous(); bool is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); grad_output.resize_({1, grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Resize output grad_input.resize_( {batch_size, n_input_plane, input_depth, input_height, input_width}); // Resize temporary columns grad_columns.resize_( {n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv_transpose3d_backward_out_cuda", [&] { // Helpers Tensor grad_input_n; Tensor grad_output_n; // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per sample: grad_input_n = grad_input.select(0, elt); grad_output_n = grad_output.select(0, elt); if (kernel_depth != 1 || kernel_height != 1 || kernel_width != 1 || stride_depth != 1 || stride_height != 1 || stride_width != 1 || dilation_depth != 1 || dilation_height != 1 || dilation_width != 1 || padding_depth != 0 || padding_height != 0 || padding_width != 0) { // Extract columns: at::native::vol2col<scalar_t>( at::cuda::getCurrentCUDAStream(), grad_output_n.data_ptr<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, grad_columns.data_ptr<scalar_t>()); } // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = weight.size(0); int64_t n = grad_columns.size(1); int64_t k = weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4); // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) auto gemm_in_ptr = (kernel_depth != 1 || kernel_height != 1 || kernel_width != 1 || stride_depth != 1 || stride_height != 1 || stride_width != 1 || dilation_depth != 1 || dilation_height != 1 || dilation_width != 1 || padding_depth != 0 || padding_height != 0 || padding_width != 0) ? grad_columns.data_ptr<scalar_t>() : grad_output_n.data_ptr<scalar_t>(); at::cuda::blas::gemm<scalar_t>( 'n', 'n', n, m, k, static_cast<scalar_t>(1), gemm_in_ptr, n, weight.data_ptr<scalar_t>(), k, static_cast<scalar_t>(0), grad_input_n.data_ptr<scalar_t>(), n); } // Resize output if (is_batch) { grad_output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {n_input_plane, input_depth, input_height, input_width}); grad_input.resize_( {n_input_plane, input_depth, input_height, input_width}); } }); } void slow_conv_transpose3d_acc_grad_parameters_cuda( const Tensor& input_, const Tensor& grad_output_, Tensor& grad_weight, Tensor& grad_bias, const Tensor& finput, const Tensor& fgrad_input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, int scale_) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; Tensor columns = finput; Tensor ones = fgrad_input; TensorArg input_arg{input_, "input", 1}, grad_output_arg{grad_output_, "grad_output", 2}, grad_weight_arg{grad_weight, "grad_weight", 3}, grad_bias_arg{grad_bias, "grad_bias", 4}, columns_arg{columns, "columns", 5}, ones_arg{ones, "ones", 6}; checkAllSameGPU( "slow_conv_transpose3d_acc_grad_parameters_cuda", {input_arg, grad_output_arg, grad_weight_arg, grad_bias_arg, columns_arg, ones_arg}); slow_conv_transpose3d_shape_check( input_, grad_output_, grad_weight, grad_bias, kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 1); int n_output_plane; if (grad_weight.defined()) { n_output_plane = grad_weight.size(1); } else if (grad_bias.defined()) { n_output_plane = grad_bias.size(0); } else { return; } if (grad_weight.defined()) { TORCH_CHECK( grad_weight.is_contiguous(), "grad_weight needs to be contiguous"); } if (grad_bias.defined()) { TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous"); TORCH_CHECK(ones.is_contiguous(), "ones needs to be contiguous"); } Tensor input = input_.contiguous(); Tensor grad_output = grad_output_.contiguous(); bool is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); grad_output.resize_({1, grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Define a buffer of ones, for bias accumulation if (ones.dim() != 3 || ones.size(0) * ones.size(1) * ones.size(2) < output_depth * output_height * output_width) { // Resize plane and fill with ones... ones.resize_({output_depth, output_height, output_width}); ones.fill_(1); } // Resize temporary columns columns.resize_({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv_transpose3d_acc_grad_parameters_cuda", [&] { // Helpers Tensor input_n; Tensor grad_output_n; scalar_t scale = static_cast<scalar_t>(scale_); // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per output: grad_output_n = grad_output.select(0, elt); // Do Weight: if (grad_weight.defined()) { // Matrix mulitply per output: input_n = input.select(0, elt); if (kernel_depth != 1 || kernel_height != 1 || kernel_width != 1 || stride_depth != 1 || stride_height != 1 || stride_width != 1 || dilation_depth != 1 || dilation_height != 1 || dilation_width != 1 || padding_depth != 0 || padding_height != 0 || padding_width != 0) { // Extract columns: at::native::vol2col<scalar_t>( at::cuda::getCurrentCUDAStream(), grad_output_n.data_ptr<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, columns.data_ptr<scalar_t>()); } // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t n = columns.size(0); // n_output_plane * kt * kh * kw int64_t m = input_n.size(0); // n_input_plane int64_t k = columns.size(1); // input_height * input_width // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) auto gemm_in_ptr = (kernel_depth != 1 || kernel_height != 1 || kernel_width != 1 || stride_depth != 1 || stride_height != 1 || stride_width != 1 || dilation_depth != 1 || dilation_height != 1 || dilation_width != 1 || padding_depth != 0 || padding_height != 0 || padding_width != 0) ? columns.data_ptr<scalar_t>() : grad_output_n.data_ptr<scalar_t>(); at::cuda::blas::gemm<scalar_t>( 't', 'n', n, m, k, scale, gemm_in_ptr, k, input_n.data_ptr<scalar_t>(), k, static_cast<scalar_t>(1), grad_weight.data_ptr<scalar_t>(), n); } // Do Bias: if (grad_bias.defined()) { // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m_ = n_output_plane; int64_t k_ = output_depth * output_height * output_width; // Do GEMV (note: this is a bit confusing because gemv assumes // column-major matrices) at::cuda::blas::gemv<scalar_t>( 't', k_, m_, scale, grad_output_n.data_ptr<scalar_t>(), k_, ones.data_ptr<scalar_t>(), 1, static_cast<scalar_t>(1), grad_bias.data_ptr<scalar_t>(), 1); } } // Resize if (is_batch) { grad_output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {input.size(1), input_depth, input_height, input_width}); } }); } } // namespace Tensor& slow_conv_transpose3d_out_cuda(const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor& output) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; Tensor finput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor fgrad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); slow_conv_transpose3d_out_cuda_template( output, input, weight, kernel_size, bias, stride, padding, output_padding, dilation, finput, fgrad); return output; } Tensor slow_conv_transpose3d_cuda( const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const c10::optional<Tensor>& bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { // See [Note: hacky wrapper removal for optional tensor] c10::MaybeOwned<Tensor> bias_maybe_owned = at::borrow_from_optional_tensor(bias_opt); const Tensor& bias = *bias_maybe_owned; Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor finput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); Tensor fgrad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); slow_conv_transpose3d_out_cuda_template( output, input, weight, kernel_size, bias, stride, padding, output_padding, dilation, finput, fgrad); return output; } std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose3d_backward_out_cuda(const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor& finput, const Tensor& fgrad, Tensor& grad_input, Tensor& grad_weight, Tensor& grad_bias) { if (grad_input.defined()) { slow_conv_transpose3d_backward_out_cuda_template( input, grad_output, grad_input, weight, finput, fgrad, kernel_size, stride, padding, output_padding, dilation); } if (grad_weight.defined()) { grad_weight.resize_(weight.sizes()); grad_weight.zero_(); } if (grad_bias.defined()) { grad_bias.resize_({weight.size(1)}); grad_bias.zero_(); } if (grad_weight.defined() || grad_bias.defined()) { slow_conv_transpose3d_acc_grad_parameters_cuda( input, grad_output, grad_weight, grad_bias, finput, fgrad, kernel_size, stride, padding, output_padding, dilation, 1); } return std::tuple<Tensor&, Tensor&, Tensor&>( grad_input, grad_weight, grad_bias); } std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose3d_backward_cuda( const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor& finput, const Tensor& fgrad, std::array<bool, 3> output_mask) { Tensor grad_input; Tensor grad_weight; Tensor grad_bias; if (output_mask[0]) { grad_input = at::empty({0}, grad_output.options()); } else { grad_input = Tensor(); } if (output_mask[1]) { grad_weight = at::empty({0}, grad_output.options()); } else { grad_weight = Tensor(); } if (output_mask[2]) { grad_bias = at::empty({0}, grad_output.options()); } else { grad_bias = Tensor(); } if (grad_input.defined()) { slow_conv_transpose3d_backward_out_cuda_template( input, grad_output, grad_input, weight, finput, fgrad, kernel_size, stride, padding, output_padding, dilation); } if (grad_weight.defined()) { grad_weight.resize_(weight.sizes()); grad_weight.zero_(); } if (grad_bias.defined()) { grad_bias.resize_({weight.size(1)}); grad_bias.zero_(); } if (grad_weight.defined() || grad_bias.defined()) { slow_conv_transpose3d_acc_grad_parameters_cuda( input, grad_output, grad_weight, grad_bias, finput, fgrad, kernel_size, stride, padding, output_padding, dilation, 1); } return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias); } } // namespace native } // namespace at
the_stack
#include <claraparabricks/genomeworks/utils/cudautils.hpp> #include <claraparabricks/genomeworks/utils/mathutils.hpp> #include <claraparabricks/genomeworks/utils/signed_integer_utils.hpp> #include <claraparabricks/genomeworks/utils/threadsafe_containers.hpp> #include <claraparabricks/genomeworks/cudaaligner/aligner.hpp> #include <claraparabricks/genomeworks/cudaaligner/alignment.hpp> #include <claraparabricks/genomeworks/cudamapper/index.hpp> #include <claraparabricks/genomeworks/cudamapper/matcher.hpp> #include <claraparabricks/genomeworks/cudamapper/overlapper.hpp> #include <claraparabricks/genomeworks/cudamapper/utils.hpp> #include "application_parameters.hpp" #include "cudamapper_utils.hpp" #include "index_batcher.cuh" namespace claraparabricks { namespace genomeworks { namespace cudamapper { namespace { void run_alignment_batch(DefaultDeviceAllocator allocator, std::mutex& overlap_idx_mtx, std::vector<Overlap>& overlaps, const io::FastaParser& query_parser, const io::FastaParser& target_parser, int32_t& overlap_idx, const int32_t max_query_size, const int32_t max_target_size, std::vector<std::string>& cigars, const int32_t batch_size) { int32_t device_id; GW_CU_CHECK_ERR(cudaGetDevice(&device_id)); CudaStream stream = make_cuda_stream(); std::unique_ptr<cudaaligner::Aligner> batch = cudaaligner::create_aligner( max_query_size, max_target_size, batch_size, cudaaligner::AlignmentType::global_alignment, allocator, stream.get(), device_id); while (true) { int32_t idx_start = 0, idx_end = 0; // Get the range of overlaps for this batch { std::lock_guard<std::mutex> lck(overlap_idx_mtx); if (overlap_idx == get_size<int32_t>(overlaps)) { break; } else { idx_start = overlap_idx; idx_end = std::min(idx_start + batch_size, get_size<int32_t>(overlaps)); overlap_idx = idx_end; } } for (int32_t idx = idx_start; idx < idx_end; idx++) { const Overlap& overlap = overlaps[idx]; const io::FastaSequence query = query_parser.get_sequence_by_id(overlap.query_read_id_); const io::FastaSequence target = target_parser.get_sequence_by_id(overlap.target_read_id_); const char* query_start = &query.seq[overlap.query_start_position_in_read_]; const int32_t query_length = overlap.query_end_position_in_read_ - overlap.query_start_position_in_read_; const char* target_start = &target.seq[overlap.target_start_position_in_read_]; const int32_t target_length = overlap.target_end_position_in_read_ - overlap.target_start_position_in_read_; cudaaligner::StatusType status = batch->add_alignment(query_start, query_length, target_start, target_length, false, overlap.relative_strand == RelativeStrand::Reverse); if (status != cudaaligner::success) { throw std::runtime_error("Experienced error type " + std::to_string(status)); } } // Launch alignment on the GPU. align_all is an async call. batch->align_all(); // Synchronize all alignments. batch->sync_alignments(); const std::vector<std::shared_ptr<cudaaligner::Alignment>>& alignments = batch->get_alignments(); { GW_NVTX_RANGE(profiler, "copy_alignments"); for (int32_t i = 0; i < get_size<int32_t>(alignments); i++) { cigars[idx_start + i] = alignments[i]->convert_to_cigar(); } } // Reset batch to reuse memory for new alignments. batch->reset(); } } /// \brief performs global alignment between overlapped regions of reads /// \param overlaps List of overlaps to align /// \param query_parser Parser for query reads /// \param target_parser Parser for target reads /// \param num_alignment_engines Number of parallel alignment engines to use for alignment /// \param cigars Output vector to store CIGAR strings for alignments /// \param allocator The allocator to allocate memory on the device void align_overlaps(DefaultDeviceAllocator allocator, std::vector<Overlap>& overlaps, const io::FastaParser& query_parser, const io::FastaParser& target_parser, int32_t num_alignment_engines, std::vector<std::string>& cigars) { // Calculate max target/query size in overlaps int32_t max_query_size = 0; int32_t max_target_size = 0; for (const auto& overlap : overlaps) { int32_t query_overlap_size = overlap.query_end_position_in_read_ - overlap.query_start_position_in_read_; int32_t target_overlap_size = overlap.target_end_position_in_read_ - overlap.target_start_position_in_read_; if (query_overlap_size > max_query_size) max_query_size = query_overlap_size; if (target_overlap_size > max_target_size) max_target_size = target_overlap_size; } // Heuristically calculate max alignments possible with available memory based on // empirical measurements of memory needed for alignment per base. const float memory_per_base = 0.03f; // Estimation of space per base in bytes for alignment float memory_per_alignment = memory_per_base * max_query_size * max_target_size; size_t free, total; GW_CU_CHECK_ERR(cudaMemGetInfo(&free, &total)); const size_t max_alignments = (static_cast<float>(free) * 85 / 100) / memory_per_alignment; // Using 85% of available memory int32_t batch_size = std::min(get_size<int32_t>(overlaps), static_cast<int32_t>(max_alignments)) / num_alignment_engines; std::cerr << "Aligning " << overlaps.size() << " overlaps (" << max_query_size << "x" << max_target_size << ") with batch size " << batch_size << std::endl; int32_t overlap_idx = 0; std::mutex overlap_idx_mtx; // Launch multiple alignment engines in separate threads to overlap D2H and H2D copies // with compute from concurrent engines. std::vector<std::future<void>> align_futures; for (int32_t t = 0; t < num_alignment_engines; t++) { align_futures.push_back(std::async(std::launch::async, &run_alignment_batch, allocator, std::ref(overlap_idx_mtx), std::ref(overlaps), std::ref(query_parser), std::ref(target_parser), std::ref(overlap_idx), max_query_size, max_target_size, std::ref(cigars), batch_size)); } for (auto& f : align_futures) { f.get(); } } /// OverlapsAndCigars - packs overlaps and cigars together so they can be passed to writer thread more easily struct OverlapsAndCigars { std::vector<Overlap> overlaps; std::vector<std::string> cigars; }; /// \brief does overlapping and matching for pairs of query and target indices from device_batch /// \param device_batch list of query and target indices to be processed /// \param query_index_cache indices should be loaded into cache beforehand /// \param target_index_cache indices should be loaded into cache beforehand /// \param application_parameters /// \param device_allocator /// \param overlaps_and_cigars_to_process overlaps and cigars are output here and then consumed by another thread /// \param number_of_skipped_pairs_of_indices number of pairs of indices skipped due to OOM error, variable shared between all threads, each call increases the number by the number of skipped pairs /// \param cuda_stream cuda stream on which the computation should be done void process_one_device_batch(const IndexBatch& device_batch, std::shared_ptr<DeviceIndexCache> query_index_cache, std::shared_ptr<DeviceIndexCache> target_index_cache, const ApplicationParameters& application_parameters, DefaultDeviceAllocator device_allocator, ThreadsafeProducerConsumer<OverlapsAndCigars>& overlaps_and_cigars_to_process, std::atomic<int32_t>& number_of_skipped_pairs_of_indices, cudaStream_t cuda_stream) { GW_NVTX_RANGE(profiler, "main::process_one_device_batch"); const std::vector<IndexDescriptor>& query_index_descriptors = device_batch.query_indices; const std::vector<IndexDescriptor>& target_index_descriptors = device_batch.target_indices; assert(!query_index_descriptors.empty() && !target_index_descriptors.empty()); // process pairs of query and target indices for (const IndexDescriptor& query_index_descriptor : query_index_descriptors) { for (const IndexDescriptor& target_index_descriptor : target_index_descriptors) { // if doing all-to-all skip pairs in which target batch has smaller id than query batch as it will be covered by symmetry if (!application_parameters.all_to_all || target_index_descriptor.first_read() >= query_index_descriptor.first_read()) { std::shared_ptr<const Index> query_index = query_index_cache->get_index(query_index_descriptor); std::shared_ptr<const Index> target_index = target_index_cache->get_index(target_index_descriptor); try { // find anchors and overlaps auto matcher = Matcher::create_matcher(device_allocator, *query_index, *target_index, cuda_stream); std::vector<Overlap> overlaps; auto overlapper = Overlapper::create_overlapper(device_allocator, cuda_stream); overlapper->get_overlaps(overlaps, matcher->anchors(), application_parameters.all_to_all, application_parameters.min_residues, application_parameters.min_overlap_len, application_parameters.min_bases_per_residue, application_parameters.min_overlap_fraction); // free up memory taken by matcher matcher.reset(nullptr); // Align overlaps std::vector<std::string> cigars; if (application_parameters.alignment_engines > 0) { cigars.resize(overlaps.size()); GW_NVTX_RANGE(profiler, "align_overlaps"); align_overlaps(device_allocator, overlaps, *application_parameters.query_parser, *application_parameters.target_parser, application_parameters.alignment_engines, cigars); } // pass overlaps and cigars to writer thread overlaps_and_cigars_to_process.add_new_element({std::move(overlaps), std::move(cigars)}); } catch (device_memory_allocation_exception& oom_exception) { // if the application ran out of memory skip this pair of indices ++(number_of_skipped_pairs_of_indices); } } } } } /// \brief loads one batch into host memory and then processes its device batches one by one /// \param batch list of all query and target indices that belong to this batch, as well as subgroups of those indices that belong to device batches /// \param application_parameters /// \param host_index_cache data will be loaded into cache within the function /// \param overlaps_and_cigars_to_process overlaps and cigars are output to this structure and the then consumed by another thread /// \param number_of_skipped_pairs_of_indices number of pairs of indices skipped due to OOM error, variable shared between all threads, each call increases the number by the number of skipped pairs /// \param cuda_stream void process_one_batch(const BatchOfIndices& batch, const ApplicationParameters& application_parameters, DefaultDeviceAllocator device_allocator, HostIndexCache& host_index_cache, ThreadsafeProducerConsumer<OverlapsAndCigars>& overlaps_and_cigars_to_process, std::atomic<int32_t>& number_of_skipped_pairs_of_indices, cudaStream_t cuda_stream) { GW_NVTX_RANGE(profiler, "main::process_one_batch"); const IndexBatch& host_batch = batch.host_batch; const std::vector<IndexBatch>& device_batches = batch.device_batches; // if there is only one device batch and it is the same as host bach (which should be the case then) there is no need to copy indices to host // as they will be queried only once const bool skip_copy_to_host = 1 == device_batches.size(); assert(!skip_copy_to_host || (host_batch.query_indices == device_batches.front().query_indices && host_batch.target_indices == device_batches.front().target_indices)); // load indices into host memory { GW_NVTX_RANGE(profiler, "main::process_one_batch::generte_host_indices"); assert(!host_batch.query_indices.empty() && !host_batch.target_indices.empty() && !device_batches.empty()); host_index_cache.generate_content(CacheType::query_cache, host_batch.query_indices, device_batches.front().query_indices, skip_copy_to_host); host_index_cache.generate_content(CacheType::target_cache, host_batch.target_indices, device_batches.front().target_indices, skip_copy_to_host); } // process device batches one by one // Processing one device batch is overlapped with fetching indices from host to device for the next device batch. // Loop uses copy_device_batch_index as its index, compute_device_batch_index is equal to copy_device_batch_index - 1 // (compute_device_batch_index does not actually exist). // copy_device_batch_index loops over one more element because for such element compute_device_batch_index would have been equal to the last batch std::shared_ptr<DeviceIndexCache> query_cache_being_copied_into; std::shared_ptr<DeviceIndexCache> query_cache_with_ready_data; std::shared_ptr<DeviceIndexCache> target_cache_being_copied_into; std::shared_ptr<DeviceIndexCache> target_cache_with_ready_data; for (int32_t copy_device_batch_index = 0; copy_device_batch_index < get_size<int32_t>(batch.device_batches) + 1; ++copy_device_batch_index) { if (copy_device_batch_index > 0) { // if not the first batch wait for previous batch to finish copying GW_NVTX_RANGE(profiler, "main::process_one_batch::finish_generating_device_indices"); query_cache_being_copied_into->wait_for_data_to_be_ready(); target_cache_being_copied_into->wait_for_data_to_be_ready(); query_cache_with_ready_data = query_cache_being_copied_into; target_cache_with_ready_data = target_cache_being_copied_into; } if (copy_device_batch_index < get_size<int32_t>(batch.device_batches)) { // if not pass-the-last batch start copying the batch GW_NVTX_RANGE(profiler, "main::process_one_batch::start_generating_device_indices"); query_cache_being_copied_into = host_index_cache.start_copying_indices_to_device(CacheType::query_cache, batch.device_batches[copy_device_batch_index].query_indices); target_cache_being_copied_into = host_index_cache.start_copying_indices_to_device(CacheType::target_cache, batch.device_batches[copy_device_batch_index].target_indices); } if (copy_device_batch_index > 0) { // when copy_device_batch_index == 0 then compute_device_batch_index == -1, so there is no batch to process GW_NVTX_RANGE(profiler, "main::process_one_batch::process_device_batch"); process_one_device_batch(batch.device_batches[copy_device_batch_index - 1], query_cache_with_ready_data, target_cache_with_ready_data, application_parameters, device_allocator, overlaps_and_cigars_to_process, number_of_skipped_pairs_of_indices, cuda_stream); } GW_CU_CHECK_ERR(cudaStreamSynchronize(cuda_stream)); } } /// \brief does post-processing and writes data to output /// \param device_id /// \param application_parameters /// \param overlaps_and_cigars_to_process new data is added to this structure as it gets available, also signals when there is not going to be any new data /// \param output_mutex controls access to output to prevent race conditions void postprocess_and_write_thread_function(const int32_t device_id, const ApplicationParameters& application_parameters, ThreadsafeProducerConsumer<OverlapsAndCigars>& overlaps_and_cigars_to_process, std::mutex& output_mutex) { GW_NVTX_RANGE(profiler, ("main::postprocess_and_write_thread_for_device_" + std::to_string(device_id)).c_str()); // This function is expected to run in a separate thread so set current device in order to avoid problems GW_CU_CHECK_ERR(cudaSetDevice(device_id)); // keep processing data as it arrives gw_optional_t<OverlapsAndCigars> data_to_write; while (data_to_write = overlaps_and_cigars_to_process.get_next_element()) // if optional is empty that means that there will be no more overlaps to process and the thread can finish { { GW_NVTX_RANGE(profiler, "main::postprocess_and_write_thread::one_set"); std::vector<Overlap>& overlaps = data_to_write->overlaps; const std::vector<std::string>& cigars = data_to_write->cigars; { GW_NVTX_RANGE(profiler, "main::postprocess_and_write_thread::postprocessing"); // Overlap post processing - add overlaps which can be combined into longer ones. Overlapper::post_process_overlaps(data_to_write->overlaps, application_parameters.drop_fused_overlaps); } if (application_parameters.perform_overlap_end_rescue) { GW_NVTX_RANGE(profiler, "main::postprocess_and_write_thread::rescue_overlap_end"); // Perform overlap-end rescue Overlapper::rescue_overlap_ends(data_to_write->overlaps, *application_parameters.query_parser, *application_parameters.target_parser, 50, 0.5); } // write to output PAF, SAM, or BAM format { GW_NVTX_RANGE(profiler, "main::postprocess_and_write_thread::print_function"); #ifdef GW_BUILD_HTSLIB if (application_parameters.format == OutputFormat::PAF) { #endif print_paf(overlaps, cigars, *application_parameters.query_parser, *application_parameters.target_parser, application_parameters.kmer_size, output_mutex); #ifdef GW_BUILD_HTSLIB } // SAM or BAM, depends on type of format else { print_sam(overlaps, cigars, *application_parameters.query_parser, *application_parameters.target_parser, application_parameters.format, output_mutex); } #endif } } } } /// \brief controls one GPU /// /// Each thread is resposible for one GPU. It takes one batch, processes it and passes it to postprocess_and_write_thread. /// It keeps doing this as long as there are available batches. It also controls the postprocess_and_write_thread. /// /// \param device_id /// \param batches_of_indices /// \param application_parameters /// \param output_mutex /// \param cuda_stream_computation /// \param cuda_stream_copy /// \param number_of_total_batches /// \param number_of_skipped_pairs_of_indices /// \param number_of_processed_batches void worker_thread_function(const int32_t device_id, ThreadsafeDataProvider<BatchOfIndices>& batches_of_indices, const ApplicationParameters& application_parameters, std::mutex& output_mutex, cudaStream_t cuda_stream_computation, cudaStream_t cuda_stream_copy, const int64_t number_of_total_batches, std::atomic<int32_t>& number_of_skipped_pairs_of_indices, std::atomic<int64_t>& number_of_processed_batches) { GW_NVTX_RANGE(profiler, "main::worker_thread"); // This function is expected to run in a separate thread so set current device in order to avoid problems GW_CU_CHECK_ERR(cudaSetDevice(device_id)); // Whenever device_allocator is used directly (e.g. in Thrust library) it will be associated with cuda_stream_computation DefaultDeviceAllocator device_allocator = create_default_device_allocator(application_parameters.max_cached_memory_bytes, cuda_stream_computation); // create index_cache, indices are not created at this point but later as each batch gets processed HostIndexCache host_index_cache(application_parameters.all_to_all, device_allocator, application_parameters.query_parser, application_parameters.target_parser, application_parameters.kmer_size, application_parameters.windows_size, true, // hash_representations application_parameters.filtering_parameter, cuda_stream_computation, cuda_stream_copy); // data structure used to exchange data with postprocess_and_write_thread ThreadsafeProducerConsumer<OverlapsAndCigars> overlaps_and_cigars_to_process; // There should be at least one postprocess_and_write_thread per worker_thread. If more threads are available one thread should be reserved for // worker_thread and all other threads should be postprocess_and_write_threads const int32_t threads_per_device = ceiling_divide(static_cast<int32_t>(std::thread::hardware_concurrency()), application_parameters.num_devices); const int32_t postprocess_and_write_threads_per_device = std::max(threads_per_device - 1, 1); // postprocess_and_write_threads run in the background and post-process and write overlaps and cigars to output as they become available in overlaps_and_cigars_to_process std::vector<std::thread> postprocess_and_write_threads; for (int32_t i = 0; i < postprocess_and_write_threads_per_device; ++i) { postprocess_and_write_threads.emplace_back(postprocess_and_write_thread_function, device_id, std::ref(application_parameters), std::ref(overlaps_and_cigars_to_process), std::ref(output_mutex)); } // keep processing batches of indices until there are none left gw_optional_t<BatchOfIndices> batch_of_indices; while (batch_of_indices = batches_of_indices.get_next_element()) // if optional is empty that means that there are no more batches to process and the thread can finish { const int64_t batch_number = number_of_processed_batches.fetch_add(1); // as this is not called atomically with get_next_element() the value does not have to be completely accurate, but this is ok as the value is only use for displaying progress const std::string progress_message = "Device " + std::to_string(device_id) + " took batch " + std::to_string(batch_number + 1) + " out of " + std::to_string(number_of_total_batches) + " batches in total\n"; std::cerr << progress_message; // TODO: possible race condition, switch to logging library process_one_batch(batch_of_indices.value(), application_parameters, device_allocator, host_index_cache, overlaps_and_cigars_to_process, number_of_skipped_pairs_of_indices, cuda_stream_computation); } // tell writer thread that there will be no more overlaps and it can finish once it has written all overlaps overlaps_and_cigars_to_process.signal_pushed_last_element(); for (std::thread& postprocess_and_write_thread : postprocess_and_write_threads) { postprocess_and_write_thread.join(); } // by this point all GPU work should anyway be done as postprocess_and_write_thread also finished and all GPU work had to be done before last values could be written GW_CU_CHECK_ERR(cudaStreamSynchronize(cuda_stream_computation)); } } // namespace int main(int argc, char* argv[]) { logging::initialize_logger(logging::LogLevel::error); const ApplicationParameters parameters(argc, argv); std::mutex output_mutex; // Program should process all combinations of query and target (if query and target are the same half of those can be skipped // due to symmetry). The matrix of query-target combinations is split into tiles called batches. Worker threads (one per GPU) // take batches one by one and process them. // Every batch is small enough for its indices to fit in host memory. Batches are further divided into sub-batches which are // small enough that all their indices fit in device memory. // After a worker thread has taken a batch it generates all necessary indices and saves them in host memory using IndexCacheHost. // It then processes sub-batches one by one but first loading indices into IndexCacheDevice from IndexCacheHost and then finding // the overlaps. // Output formatting and writing is done by a separate thread. // Split work into batches std::vector<BatchOfIndices> batches_of_indices_vect = generate_batches_of_indices(parameters.query_indices_in_host_memory, parameters.query_indices_in_device_memory, parameters.target_indices_in_host_memory, parameters.target_indices_in_device_memory, parameters.query_parser, parameters.target_parser, parameters.index_size * 1'000'000, // value was in MB parameters.target_index_size * 1'000'000, // value was in MB parameters.all_to_all); const int64_t number_of_total_batches = get_size<int64_t>(batches_of_indices_vect); std::atomic<int64_t> number_of_processed_batches(0); ThreadsafeDataProvider<BatchOfIndices> batches_of_indices(std::move(batches_of_indices_vect)); // pairs of indices might be skipped if they cause out of memory errors std::atomic<int32_t> number_of_skipped_pairs_of_indices{0}; // explicitly assign one stream for computations and one for D2H and H2D copies of indices to each GPU std::vector<CudaStream> cuda_streams_computation; std::vector<CudaStream> cuda_streams_copy; // create worker threads (one thread per device) // these thread process batches_of_indices one by one std::vector<std::thread> worker_threads; for (int32_t device_id = 0; device_id < parameters.num_devices; ++device_id) { GW_CU_CHECK_ERR(cudaSetDevice(device_id)); cuda_streams_computation.emplace_back(make_cuda_stream()); cuda_streams_copy.emplace_back(make_cuda_stream()); worker_threads.emplace_back(worker_thread_function, device_id, std::ref(batches_of_indices), std::ref(parameters), std::ref(output_mutex), cuda_streams_computation.back().get(), cuda_streams_copy.back().get(), number_of_total_batches, std::ref(number_of_skipped_pairs_of_indices), std::ref(number_of_processed_batches)); } // wait for all work to be done for (auto& t : worker_threads) { // no need to sync, it should be done at the end of worker_threads t.join(); } if (number_of_skipped_pairs_of_indices != 0) { std::cerr << "NOTE: Skipped " << number_of_skipped_pairs_of_indices << " pairs of indices due to device out of memory error" << std::endl; } return 0; } } // namespace cudamapper } // namespace genomeworks } // namespace claraparabricks /// \brief main function /// main function cannot be in a namespace so using this function to call actual main function int main(int argc, char* argv[]) { return claraparabricks::genomeworks::cudamapper::main(argc, argv); }
the_stack
#include <cstring> #include <cmath> #include <iostream> #include <vector> #include <memory> // std::align #include <chrono> #include <hip/hip_runtime.h> #define REPEAT 100 #define NPTS (2048*8) #define NDIM 128 #define M1W 128 #define M2W 16 #define M2H 16 #define M5W 16 #define M5H 16 #define M5R 4 #define M7W 32 #define M7H 32 #define M7R 4 // serial execution on a host void MatchC1(float *h_pts1, float *h_pts2, float *h_score, int *h_index) { std::memset(h_score, 0, sizeof(float)*NPTS); for (int p1=0;p1<NPTS;p1++) { for (int p2=0;p2<NPTS;p2++) { float score = 0.0f; for (int d=0;d<NDIM;d++) score += h_pts1[p1*NDIM + d]*h_pts2[p2*NDIM + d]; if (score>h_score[p1]) { h_score[p1] = score; h_index[p1] = p2; } } } } // verify host and device results void CheckMatches(int *h_index, int *h_index2, float *h_score, float *h_score2) { int ndiff = 0; for (int i=0;i<NPTS;i++) { ndiff += (h_index[i] != h_index2[i]); #ifdef DEBUG if (h_index[i] != h_index2[i]) std::cout << " " << i << " " << h_index[i] << " " << h_index2[i] << " " << h_score[i] << " " << h_score2[i] << std::endl; #endif } std::cout << "Number of incorrect matches: " << ndiff << std::endl; } __global__ void Match1(const float *__restrict d_pts1, const float *__restrict d_pts2, float *__restrict d_score, int *__restrict d_index) { int p1 = threadIdx.x + M1W*blockIdx.x; float max_score = 0.0f; int index = -1; for (int p2=0;p2<NPTS;p2++) { float score = 0.0f; for (int d=0;d<NDIM;d++) score += d_pts1[p1*NDIM + d]*d_pts2[p2*NDIM + d]; if (score>max_score) { max_score = score; index = p2; } } d_score[p1] = max_score; d_index[p1] = index; } __global__ void Match2(const float *__restrict d_pts1, const float *__restrict d_pts2, float *__restrict d_score, int *__restrict d_index) { __shared__ float buffer1[M2W*NDIM]; __shared__ float buffer2[M2H*NDIM]; __shared__ float scores[M2W*M2H]; int tx = threadIdx.x; int ty = threadIdx.y; int idx = tx + M2W*ty; int bp1 = M2W*blockIdx.x; if (ty<M2W) for (int d=tx;d<NDIM;d+=M2W) for (int j=ty;j<M2W;j+=M2H) buffer1[j*NDIM + d] = d_pts1[(bp1 + j)*NDIM + d]; __syncthreads(); float max_score = 0.0f; int index = -1; for (int bp2=0;bp2<NPTS;bp2+=M2H) { for (int d=tx;d<NDIM;d+=M2W) buffer2[ty*NDIM + d] = d_pts2[(bp2 + ty)*NDIM + d]; __syncthreads(); float score = 0.0f; for (int d=0;d<NDIM;d++) score += buffer1[tx*NDIM + d]*buffer2[ty*NDIM + d]; scores[idx] = score; __syncthreads(); if (ty==0) { for (int i=0;i<M2H;i++) { if (scores[i*M2W + tx]>max_score) { max_score = scores[i*M2W + tx]; index = bp2 + i; } } } __syncthreads(); } if (ty==0) { d_score[bp1 + tx] = max_score; d_index[bp1 + tx] = index; } } __global__ void Match3(const float *__restrict d_pts1, const float *__restrict d_pts2, float *__restrict d_score, int *__restrict d_index) { __shared__ float buffer1[M2W*(NDIM + 1)]; __shared__ float buffer2[M2H*NDIM]; __shared__ float scores[M2W*M2H]; int tx = threadIdx.x; int ty = threadIdx.y; int idx = tx + M2W*ty; int bp1 = M2W*blockIdx.x; if (ty<M2W) for (int d=tx;d<NDIM;d+=M2W) for (int j=ty;j<M2W;j+=M2H) buffer1[j*(NDIM + 1) + d] = d_pts1[(bp1 + j)*NDIM + d]; __syncthreads(); float max_score = 0.0f; int index = -1; for (int bp2=0;bp2<NPTS;bp2+=M2H) { for (int d=tx;d<NDIM;d+=M2W) buffer2[ty*NDIM + d] = d_pts2[(bp2 + ty)*NDIM + d]; __syncthreads(); float score = 0.0f; for (int d=0;d<NDIM;d++) score += buffer1[tx*(NDIM + 1) + d]*buffer2[ty*NDIM + d]; scores[idx] = score; __syncthreads(); if (ty==0) { for (int i=0;i<M2H;i++) { if (scores[i*M2W + tx]>max_score) { max_score = scores[i*M2W + tx]; index = bp2 + i; } } } __syncthreads(); } if (ty==0) { d_score[bp1 + tx] = max_score; d_index[bp1 + tx] = index; } } __global__ void Match4(const float *__restrict d_pts1, const float *__restrict d_pts2, float *__restrict d_score, int *__restrict d_index) { __shared__ float4 buffer1[M2W*(NDIM/4 + 1)]; __shared__ float4 buffer2[M2H*NDIM/4]; __shared__ float scores[M2W*M2H]; int tx = threadIdx.x; int ty = threadIdx.y; int idx = tx + M2W*ty; int bp1 = M2W*blockIdx.x; if (ty<M2W) for (int d=tx;d<NDIM/4;d+=M2W) for (int j=ty;j<M2W;j+=M2H) buffer1[j*(NDIM/4 + 1) + d] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d]; __syncthreads(); float max_score = 0.0f; int index = -1; for (int bp2=0;bp2<NPTS;bp2+=M2H) { for (int d=tx;d<NDIM/4;d+=M2W) buffer2[ty*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + ty)*(NDIM/4) + d]; __syncthreads(); float score = 0.0f; for (int d=0;d<NDIM/4;d++) { float4 v1 = buffer1[tx*(NDIM/4 + 1) + d]; float4 v2 = buffer2[ty*(NDIM/4) + d]; score += v1.x*v2.x; score += v1.y*v2.y; score += v1.z*v2.z; score += v1.w*v2.w; } scores[idx] = score; __syncthreads(); if (ty==0) { for (int i=0;i<M2H;i++) { if (scores[i*M2W + tx]>max_score) { max_score = scores[i*M2W + tx]; index = bp2 + i; } } } __syncthreads(); } if (ty==0) { d_score[bp1 + tx] = max_score; d_index[bp1 + tx] = index; } } __global__ void Match5(const float *__restrict d_pts1, const float *__restrict d_pts2, float *__restrict d_score, int *__restrict d_index) { __shared__ float4 buffer1[M5W*(NDIM/4 + 1)]; __shared__ float4 buffer2[M5H*NDIM/4]; __shared__ float scores[M5W*M5H]; int tx = threadIdx.x; int ty = threadIdx.y; int bp1 = M5W*blockIdx.x; if (ty<M5W) for (int d=tx;d<NDIM/4;d+=M5W) for (int j=ty;j<M5W;j+=M5H) buffer1[j*(NDIM/4 + 1) + d] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d]; __syncthreads(); float max_score = 0.0f; int index = -1; for (int bp2=0;bp2<NPTS;bp2+=M5H) { for (int d=tx;d<NDIM/4;d+=M5W) buffer2[ty*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + ty)*(NDIM/4) + d]; __syncthreads(); if (ty<M5H/M5R) { float score[M5R]; for (int dy=0;dy<M5R;dy++) score[dy] = 0.0f; for (int d=0;d<NDIM/4;d++) { float4 v1 = buffer1[tx*(NDIM/4 + 1) + d]; for (int dy=0;dy<M5R;dy++) { float4 v2 = buffer2[(M5R*ty + dy)*(NDIM/4) + d]; score[dy] += v1.x*v2.x; score[dy] += v1.y*v2.y; score[dy] += v1.z*v2.z; score[dy] += v1.w*v2.w; } } for (int dy=0;dy<M5R;dy++) scores[tx + M5W*(M5R*ty + dy)] = score[dy]; } __syncthreads(); if (ty==0) { for (int i=0;i<M5H;i++) { if (scores[i*M2W + tx]>max_score) { max_score = scores[i*M5W + tx]; index = bp2 + i; } } } __syncthreads(); } if (ty==0) { d_score[bp1 + tx] = max_score; d_index[bp1 + tx] = index; } } __global__ void Match6(const float *__restrict d_pts1, const float *__restrict d_pts2, float *__restrict d_score, int *__restrict d_index) { __shared__ float4 buffer1[M5W*(NDIM/4 + 1)]; __shared__ float4 buffer2[M5H*NDIM/4]; int tx = threadIdx.x; int ty = threadIdx.y; int bp1 = M5W*blockIdx.x; if (ty<M5W) for (int d=tx;d<NDIM/4;d+=M5W) for (int j=ty;j<M5W;j+=M5H) buffer1[j*(NDIM/4 + 1) + d] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d]; float max_score = 0.0f; int index = -1; for (int bp2=0;bp2<NPTS;bp2+=M5H) { for (int d=tx;d<NDIM/4;d+=M5W) buffer2[ty*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + ty)*(NDIM/4) + d]; __syncthreads(); if (ty<M5H/M5R) { float score[M5R]; for (int dy=0;dy<M5R;dy++) score[dy] = 0.0f; for (int d=0;d<NDIM/4;d++) { float4 v1 = buffer1[tx*(NDIM/4 + 1) + d]; for (int dy=0;dy<M5R;dy++) { float4 v2 = buffer2[(M5R*ty + dy)*(NDIM/4) + d]; score[dy] += v1.x*v2.x; score[dy] += v1.y*v2.y; score[dy] += v1.z*v2.z; score[dy] += v1.w*v2.w; } } for (int dy=0;dy<M5R;dy++) { if (score[dy]>max_score) { max_score = score[dy]; index = bp2 + M5R*ty + dy; } } } __syncthreads(); } float *scores = (float*)buffer1; int *indices = (int*)&scores[M5W*M5H/M5R]; if (ty<M5H/M5R) { scores[ty*M5W + tx] = max_score; indices[ty*M5W + tx] = index; } __syncthreads(); if (ty==0) { max_score = scores[tx]; index = indices[tx]; for (int y=0;y<M5H/M5R;y++) if (scores[y*M5W + tx]>max_score) { max_score = scores[y*M5W + tx]; index = indices[y*M5W + tx]; } d_score[bp1 + tx] = max_score; d_index[bp1 + tx] = index; } } __global__ void Match7(const float *__restrict d_pts1, const float *__restrict d_pts2, float *__restrict d_score, int *__restrict d_index) { __shared__ float4 buffer1[M7W*NDIM/4]; __shared__ float4 buffer2[M7H*NDIM/4]; int tx = threadIdx.x; int ty = threadIdx.y; int bp1 = M7W*blockIdx.x; for (int d=tx;d<NDIM/4;d+=M7W) for (int j=ty;j<M7W;j+=M7H/M7R) buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d]; float max_score = 0.0f; int index = -1; for (int bp2=0;bp2<NPTS;bp2+=M7H) { for (int d=tx;d<NDIM/4;d+=M7W) for (int j=ty;j<M7H;j+=M7H/M7R) buffer2[j*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + d]; __syncthreads(); float score[M7R]; for (int dy=0;dy<M7R;dy++) score[dy] = 0.0f; for (int d=0;d<NDIM/4;d++) { float4 v1 = buffer1[tx*NDIM/4 + (d + tx)%(NDIM/4)]; for (int dy=0;dy<M7R;dy++) { float4 v2 = buffer2[(M7R*ty + dy)*(NDIM/4) + d]; score[dy] += v1.x*v2.x; score[dy] += v1.y*v2.y; score[dy] += v1.z*v2.z; score[dy] += v1.w*v2.w; } } for (int dy=0;dy<M7R;dy++) { if (score[dy]>max_score) { max_score = score[dy]; index = bp2 + M7R*ty + dy; } } __syncthreads(); } float *scores = (float*)buffer1; int *indices = (int*)&scores[M7W*M7H/M7R]; scores[ty*M7W + tx] = max_score; indices[ty*M7W + tx] = index; __syncthreads(); if (ty==0) { max_score = scores[tx]; index = indices[tx]; for (int y=0;y<M7H/M7R;y++) if (scores[y*M7W + tx]>max_score) { max_score = scores[y*M7W + tx]; index = indices[y*M7W + tx]; } d_score[bp1 + tx] = max_score; d_index[bp1 + tx] = index; } } __global__ void Match8(const float *__restrict d_pts1, const float *__restrict d_pts2, float *__restrict d_score, int *__restrict d_index) { __shared__ float4 buffer1[M7W*NDIM/4]; __shared__ float4 buffer2[M7H*NDIM/4]; int tx = threadIdx.x; int ty = threadIdx.y; int bp1 = M7W*blockIdx.x; for (int d=tx;d<NDIM/4;d+=M7W) for (int j=ty;j<M7W;j+=M7H/M7R) buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d]; #define NRX 2 float max_score[NRX]; int index[NRX]; for (int i=0;i<NRX;i++) { max_score[i] = 0.0f; index[i] = -1; } int idx = ty*M7W + tx; int ix = idx%(M7W/NRX); int iy = idx/(M7W/NRX); for (int bp2=0;bp2<NPTS;bp2+=M7H) { for (int d=tx;d<NDIM/4;d+=M7W) for (int j=ty;j<M7H;j+=M7H/M7R) buffer2[j*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + d]; __syncthreads(); if (idx<M7W*M7H/M7R/NRX) { float score[M7R][NRX]; for (int dy=0;dy<M7R;dy++) for (int i=0;i<NRX;i++) score[dy][i] = 0.0f; for (int d=0;d<NDIM/4;d++) { float4 v1[NRX]; for (int i=0;i<NRX;i++) v1[i] = buffer1[((M7W/NRX)*i + ix)*NDIM/4 + (d + (M7W/NRX)*i + ix)%(NDIM/4)]; for (int dy=0;dy<M7R;dy++) { float4 v2 = buffer2[(M7R*iy + dy)*(NDIM/4) + d]; for (int i=0;i<NRX;i++) { score[dy][i] += v1[i].x*v2.x; score[dy][i] += v1[i].y*v2.y; score[dy][i] += v1[i].z*v2.z; score[dy][i] += v1[i].w*v2.w; } } } for (int dy=0;dy<M7R;dy++) { for (int i=0;i<NRX;i++) { if (score[dy][i]>max_score[i]) { max_score[i] = score[dy][i]; index[i] = bp2 + M7R*iy + dy; } } } } __syncthreads(); } float *scores = (float*)buffer1; int *indices = (int*)&scores[M7W*M7H/M7R]; if (idx<M7W*M7H/M7R/NRX) { for (int i=0;i<NRX;i++) { scores[iy*M7W + (M7W/NRX)*i + ix] = max_score[i]; indices[iy*M7W + (M7W/NRX)*i + ix] = index[i]; } } __syncthreads(); if (ty==0) { float max_score = scores[tx]; int index = indices[tx]; for (int y=0;y<M7H/M7R;y++) if (scores[y*M7W + tx]>max_score) { max_score = scores[y*M7W + tx]; index = indices[y*M7W + tx]; } d_score[bp1 + tx] = max_score; d_index[bp1 + tx] = index; } } __global__ void Match9(const float *__restrict d_pts1, const float *__restrict d_pts2, float *__restrict d_score, int *__restrict d_index) { #define NRX 2 __shared__ float4 buffer1[M7W*NDIM/4]; __shared__ float4 buffer2[M7H*NDIM/4]; int tx = threadIdx.x; int ty = threadIdx.y; int bp1 = M7W*blockIdx.x; for (int d=tx;d<NDIM/4;d+=M7W) for (int j=ty;j<M7W;j+=M7H/M7R/NRX) buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d]; float max_score[NRX]; int index[NRX]; for (int i=0;i<NRX;i++) { max_score[i] = 0.0f; index[i] = -1; } int idx = ty*M7W + tx; int ix = idx%(M7W/NRX); int iy = idx/(M7W/NRX); for (int bp2=0;bp2<NPTS;bp2+=M7H) { for (int d=tx;d<NDIM/4;d+=M7W) for (int j=ty;j<M7H;j+=M7H/M7R/NRX) buffer2[j*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + d]; __syncthreads(); float score[M7R][NRX]; for (int dy=0;dy<M7R;dy++) for (int i=0;i<NRX;i++) score[dy][i] = 0.0f; for (int d=0;d<NDIM/4;d++) { float4 v1[NRX]; for (int i=0;i<NRX;i++) v1[i] = buffer1[((M7W/NRX)*i + ix)*NDIM/4 + (d + (M7W/NRX)*i + ix)%(NDIM/4)]; for (int dy=0;dy<M7R;dy++) { float4 v2 = buffer2[(M7R*iy + dy)*(NDIM/4) + d]; for (int i=0;i<NRX;i++) { score[dy][i] += v1[i].x*v2.x; score[dy][i] += v1[i].y*v2.y; score[dy][i] += v1[i].z*v2.z; score[dy][i] += v1[i].w*v2.w; } } } for (int dy=0;dy<M7R;dy++) { for (int i=0;i<NRX;i++) { if (score[dy][i]>max_score[i]) { max_score[i] = score[dy][i]; index[i] = bp2 + M7R*iy + dy; } } } __syncthreads(); } float *scores = (float*)buffer1; int *indices = (int*)&scores[M7W*M7H/M7R]; if (idx<M7W*M7H/M7R/NRX) { for (int i=0;i<NRX;i++) { scores[iy*M7W + (M7W/NRX)*i + ix] = max_score[i]; indices[iy*M7W + (M7W/NRX)*i + ix] = index[i]; } } __syncthreads(); if (ty==0) { float max_score = scores[tx]; int index = indices[tx]; for (int y=0;y<M7H/M7R;y++) if (scores[y*M7W + tx]>max_score) { max_score = scores[y*M7W + tx]; index = indices[y*M7W + tx]; } d_score[bp1 + tx] = max_score; d_index[bp1 + tx] = index; } } __global__ void Match10(const float *__restrict d_pts1, const float *__restrict d_pts2, float *__restrict d_score, int *__restrict d_index) { #define NRX 2 #define NUM (NRX*M7R) // 32*8 threads __shared__ float4 buffer1[M7W*NDIM/4]; // 32*32 __shared__ float4 buffer2[M7H*NUM]; // 32*8 int tx = threadIdx.x; int ty = threadIdx.y; int bp1 = M7W*blockIdx.x; for (int d=tx;d<NDIM/4;d+=M7W) for (int j=ty;j<M7W;j+=M7H/M7R) buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d]; float max_score[NRX]; int index[NRX]; for (int i=0;i<NRX;i++) { max_score[i] = 0.0f; index[i] = -1; } int idx = ty*M7W + tx; int ix = idx%(M7W/NRX); int iy = idx/(M7W/NRX); for (int bp2=0;bp2<NPTS;bp2+=M7H) { float score[M7R][NRX]; for (int dy=0;dy<M7R;dy++) for (int i=0;i<NRX;i++) score[dy][i] = 0.0f; int d = (idx%NUM); int j = (idx/NUM); buffer2[j*NUM + d] = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + d]; __syncthreads(); for (int dp=0;dp<NDIM/4;dp+=NUM) { float4 temp; if (dp<(NDIM/4-NUM)) temp = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + dp + d + NUM]; if (idx<M7W*M7H/M7R/NRX) { for (int d=0;d<NUM;d++) { float4 v1[NRX]; #pragma unroll for (int i=0;i<NRX;i++) v1[i] = buffer1[(((M7W/NRX)*i + ix)<<5) + ((dp + d + (M7W/NRX)*i + ix)&31)]; //v1[i] = buffer1[((M7W/NRX)*i + ix)*NDIM/4 + (dp + d + (M7W/NRX)*i + ix)%(NDIM/4)]; #pragma unroll for (int dy=0;dy<M7R;dy++) { float4 v2 = buffer2[(M7R*iy + dy)*NUM + d]; #pragma unroll for (int i=0;i<NRX;i++) { score[dy][i] += v1[i].x*v2.x; score[dy][i] += v1[i].y*v2.y; score[dy][i] += v1[i].z*v2.z; score[dy][i] += v1[i].w*v2.w; } } } } __syncthreads(); if (dp<(NDIM/4-NUM)) { buffer2[j*NUM + d] = temp; __syncthreads(); } } for (int dy=0;dy<M7R;dy++) { for (int i=0;i<NRX;i++) { if (score[dy][i]>max_score[i]) { max_score[i] = score[dy][i]; index[i] = bp2 + M7R*iy + dy; } } } __syncthreads(); } float *scores = (float*)buffer1; int *indices = (int*)&scores[M7W*M7H/M7R]; if (idx<M7W*M7H/M7R/NRX) { for (int i=0;i<NRX;i++) { scores[iy*M7W + (M7W/NRX)*i + ix] = max_score[i]; indices[iy*M7W + (M7W/NRX)*i + ix] = index[i]; } } __syncthreads(); if (ty==0) { float max_score = scores[tx]; int index = indices[tx]; for (int y=0;y<M7H/M7R;y++) if (scores[y*M7W + tx]>max_score) { max_score = scores[y*M7W + tx]; index = indices[y*M7W + tx]; } d_score[bp1 + tx] = max_score; d_index[bp1 + tx] = index; } } int main(int argc, char *argv[]) { size_t space = sizeof(float)*NPTS*NDIM*2 + 8; std::vector<float> data(NPTS*NDIM*2 + 8); void *ptr = (void*)&data[0]; float *h_pts1 = (float*)std::align(32, sizeof(float)*NPTS*NDIM, ptr, space); ptr = (void*)&data[NPTS*NDIM]; float *h_pts2 = (float*)std::align(32, sizeof(float)*NPTS*NDIM, ptr, space); std::vector<int> h_index(NPTS); std::vector<float> h_score(NPTS); std::vector<int> h_index2(NPTS); std::vector<float> h_score2(NPTS); float *d_pts1, *d_pts2, *d_score; int *d_index; std::cout << std::endl; int psize = sizeof(float)*NPTS; std::cout << "Data size: " << 2.0*psize*NDIM/1024/1024 << " MB" << std::endl; hipMalloc((void **)&d_pts1, psize*NDIM); hipMalloc((void **)&d_pts2, psize*NDIM); hipMalloc((void **)&d_index, psize); hipMalloc((void **)&d_score, psize); for (int i=0;i<NPTS;i++) { float sum1 = 0.0f, sum2 = 0.0f; for (int d=0;d<NDIM;d++) { sum1 += h_pts1[i*NDIM + d] = (float)rand()/RAND_MAX; sum2 += h_pts2[i*NDIM + d] = (float)rand()/RAND_MAX; } sum1 = sqrt(NDIM)/sum1; sum2 = sqrt(NDIM)/sum2; for (int d=0;d<NDIM;d++) { h_pts1[i*NDIM + d] *= sum1; h_pts2[i*NDIM + d] *= sum2; } } auto start = std::chrono::high_resolution_clock::now(); MatchC1(h_pts1, h_pts2, h_score.data(), h_index.data()); auto end = std::chrono::high_resolution_clock::now(); auto elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start); auto delay = elapsed_seconds.count() * 1000; std::cout << "MatchCPU1: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl; hipMemcpy(d_pts1, h_pts1, psize*NDIM, hipMemcpyHostToDevice); hipMemcpy(d_pts2, h_pts2, psize*NDIM, hipMemcpyHostToDevice); dim3 blocks, threads; blocks = dim3(NPTS/M1W); threads = dim3(M1W); start = std::chrono::high_resolution_clock::now(); for (int i = 0; i < REPEAT; i++) hipLaunchKernelGGL(Match1, blocks, threads, 0, 0, d_pts1, d_pts2, d_score, d_index); hipDeviceSynchronize(); end = std::chrono::high_resolution_clock::now(); elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start); delay = elapsed_seconds.count() * 1000 / REPEAT; std::cout << "MatchGPU1: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl; hipMemcpy(h_index2.data(), d_index, psize, hipMemcpyDeviceToHost); hipMemcpy(h_score2.data(), d_score, psize, hipMemcpyDeviceToHost); CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data()); blocks = dim3(NPTS/M2W); threads = dim3(M2W, M2H); start = std::chrono::high_resolution_clock::now(); for (int i = 0; i < REPEAT; i++) hipLaunchKernelGGL(Match2, blocks, threads, 0, 0, d_pts1, d_pts2, d_score, d_index); hipDeviceSynchronize(); end = std::chrono::high_resolution_clock::now(); elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start); delay = elapsed_seconds.count() * 1000 / REPEAT; std::cout << "MatchGPU2: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl; hipMemcpy(h_index2.data(), d_index, psize, hipMemcpyDeviceToHost); hipMemcpy(h_score2.data(), d_score, psize, hipMemcpyDeviceToHost); CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data()); blocks = dim3(NPTS/M2W); threads = dim3(M2W, M2H); start = std::chrono::high_resolution_clock::now(); for (int i = 0; i < REPEAT; i++) hipLaunchKernelGGL(Match3, blocks, threads, 0, 0, d_pts1, d_pts2, d_score, d_index); hipDeviceSynchronize(); end = std::chrono::high_resolution_clock::now(); elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start); delay = elapsed_seconds.count() * 1000 / REPEAT; std::cout << "MatchGPU3: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl; hipMemcpy(h_index2.data(), d_index, psize, hipMemcpyDeviceToHost); hipMemcpy(h_score2.data(), d_score, psize, hipMemcpyDeviceToHost); CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data()); blocks = dim3(NPTS/M2W); threads = dim3(M2W, M2H); start = std::chrono::high_resolution_clock::now(); for (int i = 0; i < REPEAT; i++) hipLaunchKernelGGL(Match4, blocks, threads, 0, 0, d_pts1, d_pts2, d_score, d_index); hipDeviceSynchronize(); end = std::chrono::high_resolution_clock::now(); elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start); delay = elapsed_seconds.count() * 1000 / REPEAT; std::cout << "MatchGPU4: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl; hipMemcpy(h_index2.data(), d_index, psize, hipMemcpyDeviceToHost); hipMemcpy(h_score2.data(), d_score, psize, hipMemcpyDeviceToHost); CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data()); blocks = dim3(NPTS/M5W); threads = dim3(M5W, M5H); start = std::chrono::high_resolution_clock::now(); for (int i = 0; i < REPEAT; i++) hipLaunchKernelGGL(Match5, blocks, threads, 0, 0, d_pts1, d_pts2, d_score, d_index); hipDeviceSynchronize(); end = std::chrono::high_resolution_clock::now(); elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start); delay = elapsed_seconds.count() * 1000 / REPEAT; std::cout << "MatchGPU5: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl; hipMemcpy(h_index2.data(), d_index, psize, hipMemcpyDeviceToHost); hipMemcpy(h_score2.data(), d_score, psize, hipMemcpyDeviceToHost); CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data()); blocks = dim3(NPTS/M5W); threads = dim3(M5W, M5H); start = std::chrono::high_resolution_clock::now(); for (int i = 0; i < REPEAT; i++) hipLaunchKernelGGL(Match6, blocks, threads, 0, 0, d_pts1, d_pts2, d_score, d_index); hipDeviceSynchronize(); end = std::chrono::high_resolution_clock::now(); elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start); delay = elapsed_seconds.count() * 1000 / REPEAT; std::cout << "MatchGPU6: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl; hipMemcpy(h_index2.data(), d_index, psize, hipMemcpyDeviceToHost); hipMemcpy(h_score2.data(), d_score, psize, hipMemcpyDeviceToHost); CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data()); blocks = dim3(NPTS/M7W); threads = dim3(M7W, M7H/M7R); start = std::chrono::high_resolution_clock::now(); for (int i = 0; i < REPEAT; i++) hipLaunchKernelGGL(Match7, blocks, threads, 0, 0, d_pts1, d_pts2, d_score, d_index); hipDeviceSynchronize(); end = std::chrono::high_resolution_clock::now(); elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start); delay = elapsed_seconds.count() * 1000 / REPEAT; std::cout << "MatchGPU7: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl; hipMemcpy(h_index2.data(), d_index, psize, hipMemcpyDeviceToHost); hipMemcpy(h_score2.data(), d_score, psize, hipMemcpyDeviceToHost); CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data()); blocks = dim3(NPTS/M7W); threads = dim3(M7W, M7H/M7R); start = std::chrono::high_resolution_clock::now(); for (int i = 0; i < REPEAT; i++) hipLaunchKernelGGL(Match8, blocks, threads, 0, 0, d_pts1, d_pts2, d_score, d_index); hipDeviceSynchronize(); end = std::chrono::high_resolution_clock::now(); elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start); delay = elapsed_seconds.count() * 1000 / REPEAT; std::cout << "MatchGPU8: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl; hipMemcpy(h_index2.data(), d_index, psize, hipMemcpyDeviceToHost); hipMemcpy(h_score2.data(), d_score, psize, hipMemcpyDeviceToHost); CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data()); blocks = dim3(NPTS/M7W); threads = dim3(M7W, M7H/M7R/2); start = std::chrono::high_resolution_clock::now(); for (int i = 0; i < REPEAT; i++) hipLaunchKernelGGL(Match9, blocks, threads, 0, 0, d_pts1, d_pts2, d_score, d_index); hipDeviceSynchronize(); end = std::chrono::high_resolution_clock::now(); elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start); delay = elapsed_seconds.count() * 1000 / REPEAT; std::cout << "MatchGPU9: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl; hipMemcpy(h_index2.data(), d_index, psize, hipMemcpyDeviceToHost); hipMemcpy(h_score2.data(), d_score, psize, hipMemcpyDeviceToHost); CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data()); blocks = dim3(NPTS/M7W); threads = dim3(M7W, M7H/M7R); start = std::chrono::high_resolution_clock::now(); for (int i = 0; i < REPEAT; i++) hipLaunchKernelGGL(Match10, blocks, threads, 0, 0, d_pts1, d_pts2, d_score, d_index); hipDeviceSynchronize(); end = std::chrono::high_resolution_clock::now(); elapsed_seconds = std::chrono::duration_cast<std::chrono::duration<double>>(end - start); delay = elapsed_seconds.count() * 1000 / REPEAT; std::cout << "MatchGPU10: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl; hipMemcpy(h_index2.data(), d_index, psize, hipMemcpyDeviceToHost); hipMemcpy(h_score2.data(), d_score, psize, hipMemcpyDeviceToHost); CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data()); hipFree(d_pts1); hipFree(d_pts2); hipFree(d_index); hipFree(d_score); return 0; }
the_stack
#define CUDA_NUM_THREADS 512 // Computer the number of threads needed in GPU inline int get_n_threads(int n) { const int pow_2 = std::log(static_cast<float>(n)) / std::log(2.0); return max(min(1 << pow_2, CUDA_NUM_THREADS), 1); } __device__ int compute_index( int offset_x, int offset_y, int offset_z, int len_y, int len_z) { return offset_x * len_y * len_z + offset_y * len_z + offset_z; } __device__ float compute_weight(float x, float x0) { return 1 - abs(x - x0); } __global__ void gridding_dist_kernel(int n_grid_vertices, int n_pts, float min_x, float min_y, float min_z, int len_y, int len_z, const float *__restrict__ ptcloud, float *__restrict__ grid_weights, float *__restrict__ grid_pt_weights, int *__restrict__ grid_pt_indexes) { int batch_index = blockIdx.x; int index = threadIdx.x; int stride = blockDim.x; ptcloud += batch_index * n_pts * 3; grid_weights += batch_index * n_grid_vertices * 8; grid_pt_weights += batch_index * n_pts * 24; grid_pt_indexes += batch_index * n_pts * 8; for (int j = index; j < n_pts; j += stride) { float pt_x = ptcloud[j * 3 + 0]; float pt_y = ptcloud[j * 3 + 1]; float pt_z = ptcloud[j * 3 + 2]; int lower_x = std::floor(pt_x); int upper_x = std::ceil(pt_x); if (lower_x == upper_x) { upper_x += 1; } int lower_y = std::floor(pt_y); int upper_y = std::ceil(pt_y); if (lower_y == upper_y) { upper_y += 1; } int lower_z = std::floor(pt_z); int upper_z = std::ceil(pt_z); if (lower_z == upper_z) { upper_z += 1; } int lx_offset = lower_x - min_x, ux_offset = upper_x - min_x; int ly_offset = lower_y - min_y, uy_offset = upper_y - min_y; int lz_offset = lower_z - min_z, uz_offset = upper_z - min_z; // Compute weights and corresponding positions, a loop for 8 points // LLL -> Lower X, Lower Y, Lower Z grid_pt_indexes[j * 8 + 0] = compute_index(lx_offset, ly_offset, lz_offset, len_y, len_z) * 8 + 0; grid_pt_weights[j * 24 + 0] = compute_weight(pt_x, lower_x); grid_pt_weights[j * 24 + 1] = compute_weight(pt_y, lower_y); grid_pt_weights[j * 24 + 2] = compute_weight(pt_z, lower_z); // LLU -> Lower X, Lower Y, Upper Z grid_pt_indexes[j * 8 + 1] = compute_index(lx_offset, ly_offset, uz_offset, len_y, len_z) * 8 + 1; grid_pt_weights[j * 24 + 3] = compute_weight(pt_x, lower_x); grid_pt_weights[j * 24 + 4] = compute_weight(pt_y, lower_y); grid_pt_weights[j * 24 + 5] = compute_weight(pt_z, upper_z); // LUL -> Lower X, Upper Y, Lower Z grid_pt_indexes[j * 8 + 2] = compute_index(lx_offset, uy_offset, lz_offset, len_y, len_z) * 8 + 2; grid_pt_weights[j * 24 + 6] = compute_weight(pt_x, lower_x); grid_pt_weights[j * 24 + 7] = compute_weight(pt_y, upper_y); grid_pt_weights[j * 24 + 8] = compute_weight(pt_z, lower_z); // LUU -> Lower X, Upper Y, Upper Z grid_pt_indexes[j * 8 + 3] = compute_index(lx_offset, uy_offset, uz_offset, len_y, len_z) * 8 + 3; grid_pt_weights[j * 24 + 9] = compute_weight(pt_x, lower_x); grid_pt_weights[j * 24 + 10] = compute_weight(pt_y, upper_y); grid_pt_weights[j * 24 + 11] = compute_weight(pt_z, upper_z); // ULL -> Upper X, Lower Y, Lower Z grid_pt_indexes[j * 8 + 4] = compute_index(ux_offset, ly_offset, lz_offset, len_y, len_z) * 8 + 4; grid_pt_weights[j * 24 + 12] = compute_weight(pt_x, upper_x); grid_pt_weights[j * 24 + 13] = compute_weight(pt_y, lower_y); grid_pt_weights[j * 24 + 14] = compute_weight(pt_z, lower_z); // ULU -> Upper X, Lower Y, Upper Z grid_pt_indexes[j * 8 + 5] = compute_index(ux_offset, ly_offset, uz_offset, len_y, len_z) * 8 + 5; grid_pt_weights[j * 24 + 15] = compute_weight(pt_x, upper_x); grid_pt_weights[j * 24 + 16] = compute_weight(pt_y, lower_y); grid_pt_weights[j * 24 + 17] = compute_weight(pt_z, upper_z); // UUL -> Upper X, Upper Y, Lower Z grid_pt_indexes[j * 8 + 6] = compute_index(ux_offset, uy_offset, lz_offset, len_y, len_z) * 8 + 6; grid_pt_weights[j * 24 + 18] = compute_weight(pt_x, upper_x); grid_pt_weights[j * 24 + 19] = compute_weight(pt_y, upper_y); grid_pt_weights[j * 24 + 20] = compute_weight(pt_z, lower_z); // UUU -> Upper X, Upper Y, Upper Z grid_pt_indexes[j * 8 + 7] = compute_index(ux_offset, uy_offset, uz_offset, len_y, len_z) * 8 + 7; grid_pt_weights[j * 24 + 21] = compute_weight(pt_x, upper_x); grid_pt_weights[j * 24 + 22] = compute_weight(pt_y, upper_y); grid_pt_weights[j * 24 + 23] = compute_weight(pt_z, upper_z); } __syncthreads(); int gvtx_idx = 0; for (int j = index; j < n_pts; j += stride) { // LLL -> Lower X, Lower Y, Lower Z gvtx_idx = grid_pt_indexes[j * 8 + 0]; atomicAdd(&(grid_weights[gvtx_idx]), grid_pt_weights[j * 24 + 0] * grid_pt_weights[j * 24 + 1] * grid_pt_weights[j * 24 + 2]); // LLU -> Lower X, Lower Y, Upper Z gvtx_idx = grid_pt_indexes[j * 8 + 1]; atomicAdd(&(grid_weights[gvtx_idx]), grid_pt_weights[j * 24 + 3] * grid_pt_weights[j * 24 + 4] * grid_pt_weights[j * 24 + 5]); // LUL -> Lower X, Upper Y, Lower Z gvtx_idx = grid_pt_indexes[j * 8 + 2]; atomicAdd(&(grid_weights[gvtx_idx]), grid_pt_weights[j * 24 + 6] * grid_pt_weights[j * 24 + 7] * grid_pt_weights[j * 24 + 8]); // LUU -> Lower X, Upper Y, Upper Z gvtx_idx = grid_pt_indexes[j * 8 + 3]; atomicAdd(&(grid_weights[gvtx_idx]), grid_pt_weights[j * 24 + 9] * grid_pt_weights[j * 24 + 10] * grid_pt_weights[j * 24 + 11]); // ULL -> Upper X, Lower Y, Lower Z gvtx_idx = grid_pt_indexes[j * 8 + 4]; atomicAdd(&(grid_weights[gvtx_idx]), grid_pt_weights[j * 24 + 12] * grid_pt_weights[j * 24 + 13] * grid_pt_weights[j * 24 + 14]); // ULU -> Upper X, Lower Y, Upper Z gvtx_idx = grid_pt_indexes[j * 8 + 5]; atomicAdd(&(grid_weights[gvtx_idx]), grid_pt_weights[j * 24 + 15] * grid_pt_weights[j * 24 + 16] * grid_pt_weights[j * 24 + 17]); // UUL -> Upper X, Upper Y, Lower Z gvtx_idx = grid_pt_indexes[j * 8 + 6]; atomicAdd(&(grid_weights[gvtx_idx]), grid_pt_weights[j * 24 + 18] * grid_pt_weights[j * 24 + 19] * grid_pt_weights[j * 24 + 20]); // UUU -> Upper X, Upper Y, Upper Z gvtx_idx = grid_pt_indexes[j * 8 + 7]; atomicAdd(&(grid_weights[gvtx_idx]), grid_pt_weights[j * 24 + 21] * grid_pt_weights[j * 24 + 22] * grid_pt_weights[j * 24 + 23]); } } std::vector<torch::Tensor> gridding_distance_cuda_forward(float min_x, float max_x, float min_y, float max_y, float min_z, float max_z, torch::Tensor ptcloud, cudaStream_t stream) { int batch_size = ptcloud.size(0); int n_pts = ptcloud.size(1); int len_x = max_x - min_x + 1; int len_y = max_y - min_y + 1; int len_z = max_z - min_z + 1; int n_grid_vertices = len_x * len_y * len_z; torch::Tensor grid_weights = torch::zeros({batch_size, n_grid_vertices, 8}, torch::CUDA(torch::kFloat)); torch::Tensor grid_pt_weights = torch::zeros({batch_size, n_pts, 8, 3}, torch::CUDA(torch::kFloat)); torch::Tensor grid_pt_indexes = torch::zeros({batch_size, n_pts, 8}, torch::CUDA(torch::kInt)); gridding_dist_kernel<<<batch_size, get_n_threads(n_pts), 0, stream>>>( n_grid_vertices, n_pts, min_x, min_y, min_z, len_y, len_z, ptcloud.data_ptr<float>(), grid_weights.data_ptr<float>(), grid_pt_weights.data_ptr<float>(), grid_pt_indexes.data_ptr<int>()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("Error in gridding_distance_cuda_forward: %s\n", cudaGetErrorString(err)); } return {grid_weights, grid_pt_weights, grid_pt_indexes}; } __global__ void gridding_dist_grad_kernel( int n_grid_vertices, int n_pts, const float *__restrict__ grid_pt_weights, const int *__restrict__ grid_pt_indexes, const float *__restrict__ grad_grid, float *__restrict__ grad_ptcloud) { int batch_index = blockIdx.x; int index = threadIdx.x; int stride = blockDim.x; grid_pt_weights += batch_index * n_pts * 24; grid_pt_indexes += batch_index * n_pts * 8; grad_grid += batch_index * n_grid_vertices * 8; grad_ptcloud += batch_index * n_pts * 3; int gvtx_idx = 0; float grad_vtx = 0, x_weights = 0, y_weights = 0, z_weights = 0; for (int j = index; j < n_pts; j += stride) { // Compute gradient for the corresponding positions, a loop for 8 points // LLL -> Lower X, Lower Y, Lower Z gvtx_idx = grid_pt_indexes[j * 8 + 0]; grad_vtx = grad_grid[gvtx_idx]; x_weights = grid_pt_weights[j * 24 + 0]; y_weights = grid_pt_weights[j * 24 + 1]; z_weights = grid_pt_weights[j * 24 + 2]; atomicAdd(&(grad_ptcloud[j * 3 + 0]), -grad_vtx * y_weights * z_weights); atomicAdd(&(grad_ptcloud[j * 3 + 1]), -grad_vtx * x_weights * z_weights); atomicAdd(&(grad_ptcloud[j * 3 + 2]), -grad_vtx * x_weights * y_weights); // LLU -> Lower X, Lower Y, Upper Z gvtx_idx = grid_pt_indexes[j * 8 + 1]; grad_vtx = grad_grid[gvtx_idx]; x_weights = grid_pt_weights[j * 24 + 3]; y_weights = grid_pt_weights[j * 24 + 4]; z_weights = grid_pt_weights[j * 24 + 5]; atomicAdd(&(grad_ptcloud[j * 3 + 0]), -grad_vtx * y_weights * z_weights); atomicAdd(&(grad_ptcloud[j * 3 + 1]), -grad_vtx * x_weights * z_weights); atomicAdd(&(grad_ptcloud[j * 3 + 2]), grad_vtx * x_weights * y_weights); // LUL -> Lower X, Upper Y, Lower Z gvtx_idx = grid_pt_indexes[j * 8 + 2]; grad_vtx = grad_grid[gvtx_idx]; x_weights = grid_pt_weights[j * 24 + 6]; y_weights = grid_pt_weights[j * 24 + 7]; z_weights = grid_pt_weights[j * 24 + 8]; atomicAdd(&(grad_ptcloud[j * 3 + 0]), -grad_vtx * y_weights * z_weights); atomicAdd(&(grad_ptcloud[j * 3 + 1]), grad_vtx * x_weights * z_weights); atomicAdd(&(grad_ptcloud[j * 3 + 2]), -grad_vtx * x_weights * y_weights); // LUU -> Lower X, Upper Y, Upper Z gvtx_idx = grid_pt_indexes[j * 8 + 3]; grad_vtx = grad_grid[gvtx_idx]; x_weights = grid_pt_weights[j * 24 + 9]; y_weights = grid_pt_weights[j * 24 + 10]; z_weights = grid_pt_weights[j * 24 + 11]; atomicAdd(&(grad_ptcloud[j * 3 + 0]), -grad_vtx * y_weights * z_weights); atomicAdd(&(grad_ptcloud[j * 3 + 1]), grad_vtx * x_weights * z_weights); atomicAdd(&(grad_ptcloud[j * 3 + 2]), grad_vtx * x_weights * y_weights); // ULL -> Upper X, Lower Y, Lower Z gvtx_idx = grid_pt_indexes[j * 8 + 4]; grad_vtx = grad_grid[gvtx_idx]; x_weights = grid_pt_weights[j * 24 + 12]; y_weights = grid_pt_weights[j * 24 + 13]; z_weights = grid_pt_weights[j * 24 + 14]; atomicAdd(&(grad_ptcloud[j * 3 + 0]), grad_vtx * y_weights * z_weights); atomicAdd(&(grad_ptcloud[j * 3 + 1]), -grad_vtx * x_weights * z_weights); atomicAdd(&(grad_ptcloud[j * 3 + 2]), -grad_vtx * x_weights * y_weights); // ULU -> Upper X, Lower Y, Upper Z gvtx_idx = grid_pt_indexes[j * 8 + 5]; grad_vtx = grad_grid[gvtx_idx]; x_weights = grid_pt_weights[j * 24 + 15]; y_weights = grid_pt_weights[j * 24 + 16]; z_weights = grid_pt_weights[j * 24 + 17]; atomicAdd(&(grad_ptcloud[j * 3 + 0]), grad_vtx * y_weights * z_weights); atomicAdd(&(grad_ptcloud[j * 3 + 1]), -grad_vtx * x_weights * z_weights); atomicAdd(&(grad_ptcloud[j * 3 + 2]), grad_vtx * x_weights * y_weights); // UUL -> Upper X, Upper Y, Lower Z gvtx_idx = grid_pt_indexes[j * 8 + 6]; grad_vtx = grad_grid[gvtx_idx]; x_weights = grid_pt_weights[j * 24 + 18]; y_weights = grid_pt_weights[j * 24 + 19]; z_weights = grid_pt_weights[j * 24 + 20]; atomicAdd(&(grad_ptcloud[j * 3 + 0]), grad_vtx * y_weights * z_weights); atomicAdd(&(grad_ptcloud[j * 3 + 1]), grad_vtx * x_weights * z_weights); atomicAdd(&(grad_ptcloud[j * 3 + 2]), -grad_vtx * x_weights * y_weights); // UUU -> Upper X, Upper Y, Upper Z gvtx_idx = grid_pt_indexes[j * 8 + 7]; grad_vtx = grad_grid[gvtx_idx]; x_weights = grid_pt_weights[j * 24 + 21]; y_weights = grid_pt_weights[j * 24 + 22]; z_weights = grid_pt_weights[j * 24 + 23]; atomicAdd(&(grad_ptcloud[j * 3 + 0]), grad_vtx * y_weights * z_weights); atomicAdd(&(grad_ptcloud[j * 3 + 1]), grad_vtx * x_weights * z_weights); atomicAdd(&(grad_ptcloud[j * 3 + 2]), grad_vtx * x_weights * y_weights); } } torch::Tensor gridding_distance_cuda_backward(torch::Tensor grid_pt_weights, torch::Tensor grid_pt_indexes, torch::Tensor grad_grid, cudaStream_t stream) { int batch_size = grad_grid.size(0); int n_grid_vertices = grad_grid.size(1); int n_pts = grid_pt_indexes.size(1); torch::Tensor grad_ptcloud = torch::zeros({batch_size, n_pts, 3}, torch::CUDA(torch::kFloat)); gridding_dist_grad_kernel<<<batch_size, get_n_threads(n_pts), 0, stream>>>( n_grid_vertices, n_pts, grid_pt_weights.data_ptr<float>(), grid_pt_indexes.data_ptr<int>(), grad_grid.data_ptr<float>(), grad_ptcloud.data_ptr<float>()); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("Error in gridding_distance_cuda_backward: %s\n", cudaGetErrorString(err)); } return grad_ptcloud; }
the_stack
#include <cmath> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } void caffe_gpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } void caffe_gpu_axpy(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) } } void caffe_gpu_scal(const int N, const float alpha, float *X) { CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } void caffe_gpu_axpby(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal(N, beta, Y); caffe_gpu_axpy(N, alpha, X, Y); } void caffe_gpu_dot(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } void caffe_gpu_asum(const int n, const float* x, float* y) { CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); } void caffe_gpu_scale(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } __global__ void set_kernel(const int n, const float alpha, float* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } void caffe_gpu_set(const int N, const float alpha, float* Y) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(float) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) set_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } __global__ void add_scalar_kernel(const int n, const float alpha, float* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } __global__ void add_kernel(const int n, const float alpha, const float* a, const float beta, const float* b, float* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha * a[index] + beta * b[index]; } } void caffe_gpu_add(const int N, const float alpha, const float* a, const float beta, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>> (N, alpha, a, beta, b, y); } __global__ void sub_kernel(const int n, const float* a, const float* b, float* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } void caffe_gpu_sub(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } __global__ void mul_kernel(const int n, const float* a, const float* b, float* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } void caffe_gpu_mul(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } __global__ void add_mul_kernel(const int n, const float* a, const float* b, float* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = y[index] + a[index] * b[index]; } } void caffe_gpu_add_mul(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_mul_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } __global__ void div_kernel(const int n, const float* a, const float* b, float* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } void caffe_gpu_div(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } __global__ void abs_kernel(const int n, const float* a, float* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } void caffe_gpu_abs(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } __global__ void exp_kernel(const int n, const float* a, float* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } void caffe_gpu_exp(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } __global__ void log_kernel(const int n, const float* a, float* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } void caffe_gpu_log(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } __global__ void powx_kernel(const int n, const float* a, const float alpha, float* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } void caffe_gpu_powx(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, a, alpha, y); } void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); } void caffe_gpu_rng_uniform(const int n, const float a, const float b, float* r) { CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } __global__ void box_filter_x_kernel(const int num, const int channels, const int height, const int width,int radius, const float * id, float *od) { CUDA_KERNEL_LOOP(ind, height*channels*num) { float sum=0; for (int w = 0; w <= min(radius,width-1); w++) sum += id[ind*width+w]; od[ind*width+0] = sum; for (int w = 1; w < width-radius; w++) { sum += id[ind*width+w+radius]; if(w-radius > 0) sum -= id[ind*width+w-radius-1]; od[ind*width+w] = sum; } for (int w = max(width-radius,1); w < width; w++) { if(w-radius > 0) sum -= id[ind*width+w-radius-1]; od[ind*width+w] = sum; } } } __global__ void box_filter_y_kernel(const int num,const int channels, const int height,const int width, const int radius,const float area, const float * id, float *od) { CUDA_KERNEL_LOOP(ind, width*channels*num) { int c=ind / width; int w=ind % width; float sum=0; for (int h = 0; h <= min(radius,height-1); h++) sum += id[(c*height+h)*width+w]; od[(c*height+0)*width+w] = sum / area; for (int h = 1; h < height-radius; h++) { sum += id[(c*height+h+radius)*width+w]; if(h-radius > 0) sum -= id[(c*height+h-radius-1)*width+w]; od[(c*height+h)*width+w] = sum / area; } for (int h= max(height - radius,1); h < height; h++) { if(h-radius > 0) sum -= id[(c*height+h-radius-1)*width+w]; od[(c*height+h)*width+w] = sum / area; } } } void box_filter_gpu(const int num, const int channels, const int height,const int width, const int radius, const float *id, float *od,float * buffer) { box_filter_x_kernel<<<CAFFE_GET_BLOCKS(height*channels*num), CAFFE_CUDA_NUM_THREADS>>> (num,channels, height,width, radius, id, buffer); box_filter_y_kernel<<<CAFFE_GET_BLOCKS(width*channels*num), CAFFE_CUDA_NUM_THREADS>>> (num,channels, height,width, radius,min((2*radius+1),height) * min((2*radius+1),width), buffer, od); } __global__ void AdamUpdate(int N, float* g, float* m, float* v, float beta1, float beta2, float eps_hat, float corrected_local_rate) { CUDA_KERNEL_LOOP(i, N) { float gi = g[i]; float mi = m[i] = m[i]*beta1 + gi*(1-beta1); float vi = v[i] = v[i]*beta2 + gi*gi*(1-beta2); g[i] = corrected_local_rate * mi / (sqrt(vi) + eps_hat); } } void adam_update_gpu(int N, float* g, float* m, float* v, float beta1, float beta2, float eps_hat, float corrected_local_rate) { AdamUpdate <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>> (N, g, m, v, beta1, beta2, eps_hat, corrected_local_rate); CUDA_POST_KERNEL_CHECK; } __global__ void sum_kernel(int N, const float* in, float* sum) { __shared__ float buffer[CAFFE_CUDA_NUM_THREADS]; buffer[threadIdx.x]=0; for (int i = threadIdx.x; i < N; i += blockDim.x) buffer[threadIdx.x] += in[i]; __syncthreads(); for (int s = blockDim.x/2; s > 0; s >>= 1) { if (threadIdx.x < s) buffer[threadIdx.x] += buffer[threadIdx.x+s]; __syncthreads(); } if (threadIdx.x == 0) sum[0] = buffer[0]; } float caffe_gpu_sum(const int N, const float *in) { int gpu_id_; CUDA_CHECK(cudaGetDevice(&gpu_id_)); int i; for (i=0;i<Caffe::GPUs.size();i++) if (Caffe::GPUs[i] == gpu_id_) break; gpu_id_ = i; float cpu_sum; sum_kernel<<<1, CAFFE_CUDA_NUM_THREADS>>> (N, in, Caffe::gpu_scalar()[gpu_id_]); CUDA_CHECK(cudaMemcpy(&cpu_sum, Caffe::gpu_scalar()[gpu_id_], sizeof(float), cudaMemcpyDeviceToHost)); return cpu_sum; } __global__ void square_sum_kernel(int N, const float* in, float* sum) { __shared__ float buffer[CAFFE_CUDA_NUM_THREADS]; buffer[threadIdx.x]=0; for (int i = threadIdx.x; i < N; i += blockDim.x) buffer[threadIdx.x] += in[i]*in[i]; __syncthreads(); for (int s = blockDim.x/2; s > 0; s >>= 1) { if (threadIdx.x < s) buffer[threadIdx.x] += buffer[threadIdx.x+s]; __syncthreads(); } if (threadIdx.x == 0) sum[0] = buffer[0]; } float caffe_gpu_square_sum(const int N, const float *in) { int gpu_id_; CUDA_CHECK(cudaGetDevice(&gpu_id_)); int i; for (i=0;i<Caffe::GPUs.size();i++) if (Caffe::GPUs[i] == gpu_id_) break; gpu_id_ = i; float cpu_sum; square_sum_kernel<<<1, CAFFE_CUDA_NUM_THREADS>>> (N, in, Caffe::gpu_scalar()[gpu_id_]); CUDA_CHECK(cudaMemcpy(&cpu_sum, Caffe::gpu_scalar()[gpu_id_], sizeof(float), cudaMemcpyDeviceToHost)); return cpu_sum; } } // namespace caffe
the_stack
#include "k2/csrc/array_ops.h" #include "k2/csrc/device_guard.h" #include "k2/csrc/fsa_algo.h" #include "k2/csrc/fsa_utils.h" #include "k2/csrc/hash.h" #include "k2/csrc/ragged_ops.h" #include "k2/csrc/thread_pool.h" namespace k2 { namespace intersect_pruned_internal { /* Information associated with a state active on a particular frame.. */ struct StateInfo { /* abs_state_id is the state-index in a_fsas_. Note: the ind0 in here won't necessarily match the ind0 within FrameInfo::state if a_fsas_stride_ == 0. */ int32_t a_fsas_state_idx01; /* Caution: this is ACTUALLY A FLOAT that has been bit-twiddled using FloatToOrderedInt/OrderedIntToFloat so we can use atomic max. It represents a Viterbi-style 'forward probability'. (Viterbi, meaning: we use max not log-sum). You can take the pruned lattice and rescore it if you want log-sum. */ int32_t forward_loglike; /* Note: this `backward_loglike` is the best score of any path from here to the end, minus the best path in the overall FSA, i.e. it's the backward score you get if, at the final-state, you set backward_loglike == -forward_loglike. So backward_loglike + OrderedIntToFloat(forward_loglike) <= 0, and you can treat it somewhat like a posterior (except they don't sum to one as we're using max, not log-add). */ float backward_loglike; }; struct ArcInfo { // for an arc that wasn't pruned away... int32_t a_fsas_arc_idx012; // the arc-index in a_fsas_. float arc_loglike; // loglike on this arc: equals loglike from data // (nnet output, == b_fsas), plus loglike from // the arc in a_fsas. union { // these 2 different ways of storing the index of the destination state // are used at different stages of the algorithm; we give them different // names for clarity. int32_t dest_a_fsas_state_idx01; // The destination-state as an index // into a_fsas_. int32_t dest_info_state_idx1; // The destination-state as an idx1 into the // next FrameInfo's `arcs` or `states`, // omitting the FSA-index which can be worked // out from the structure of this frame's // ArcInfo. } u; float end_loglike; // loglike at the end of the arc just before // (conceptually) it joins the destination state. }; /* static std::ostream &operator<<(std::ostream &os, const StateInfo &s) { os << "StateInfo{" << s.a_fsas_state_idx01 << "," << OrderedIntToFloat(s.forward_loglike) << "," << s.backward_loglike << "}"; return os; } static std::ostream &operator<<(std::ostream &os, const ArcInfo &a) { os << "ArcInfo{" << a.a_fsas_arc_idx012 << "," << a.arc_loglike << "," << a.u.dest_a_fsas_state_idx01 << "," << a.end_loglike << "[i=" << FloatToOrderedInt(a.end_loglike) << "]" << "}"; return os; } */ } // namespace intersect_pruned_internal using namespace intersect_pruned_internal; // NOLINT /* Pruned intersection (a.k.a. composition) that corresponds to decoding for speech recognition-type tasks. Can use either different decoding graphs (one per acoustic sequence) or a shared graph */ class MultiGraphDenseIntersectPruned { public: /** Pruned intersection (a.k.a. composition) that corresponds to decoding for speech recognition-type tasks @param [in] a_fsas The decoding graphs, one per sequence. E.g. might just be a linear sequence of phones, or might be something more complicated. Must have either the same Dim0() as b_fsas, or Dim0()==1 in which case the graph is shared. @param [in] b_fsas The neural-net output, with each frame containing the log-likes of each phone. A series of sequences of (in general) different length. @param [in] search_beam "Default" search/decoding beam. The actual beam is dynamic and also depends on max_active and min_active. @param [in] output_beam Beam for pruning the output FSA, will typically be smaller than search_beam. @param [in] min_active Minimum number of FSA states that are allowed to be active on any given frame for any given intersection/composition task. This is advisory, in that it will try not to have fewer than this number active. @param [in] max_active Maximum number of FSA states that are allowed to be active on any given frame for any given intersection/composition task. This is advisory, in that it will try not to exceed that but may not always succeed. This determines the hash size. */ MultiGraphDenseIntersectPruned(FsaVec &a_fsas, DenseFsaVec &b_fsas, float search_beam, float output_beam, int32_t min_active, int32_t max_active) : a_fsas_(a_fsas), b_fsas_(b_fsas), search_beam_(search_beam), output_beam_(output_beam), min_active_(min_active), max_active_(max_active), dynamic_beams_(a_fsas.Context(), b_fsas.shape.Dim0(), search_beam), forward_semaphore_(1) { NVTX_RANGE(K2_FUNC); c_ = GetContext(a_fsas.shape, b_fsas.shape); T_ = b_fsas_.shape.MaxSize(1); K2_CHECK_GT(search_beam, 0); K2_CHECK_GT(output_beam, 0); K2_CHECK_GE(min_active, 0); K2_CHECK_GT(max_active, min_active); K2_CHECK(a_fsas.shape.Dim0() == b_fsas.shape.Dim0() || a_fsas.shape.Dim0() == 1); K2_CHECK_GE(b_fsas.shape.Dim0(), 1); int32_t num_seqs = b_fsas.shape.Dim0(); int32_t num_buckets = RoundUpToNearestPowerOfTwo(num_seqs * 4 * max_active); if (num_buckets < 128) num_buckets = 128; int32_t num_a_copies; if (a_fsas.shape.Dim0() == 1) { a_fsas_stride_ = 0; state_map_fsa_stride_ = a_fsas.TotSize(1); num_a_copies = b_fsas.shape.Dim0(); } else { K2_CHECK_EQ(a_fsas.shape.Dim0(), b_fsas.shape.Dim0()); a_fsas_stride_ = 1; state_map_fsa_stride_ = 0; num_a_copies = 1; } // +1, because all-ones is not a valid key. int64_t num_keys = num_a_copies * (int64_t)a_fsas.TotSize(1) + 1; // To reduce the number of template instantiations, we limit the // code to use either 32 or 36 or 40 bits. // 32 can be optimized in future so if the num_keys is less than // 1<<32, we favor that value. int32_t num_key_bits; if ((num_keys >> 32) == 0) num_key_bits = 32; else if ((num_keys >> 36) == 0) num_key_bits = 36; else { num_key_bits = 40; if ((num_keys >> 40) != 0) { K2_LOG(FATAL) << "Too many keys for hash, please extend this code " "with more options: num_keys=" << num_keys; } } state_map_ = Hash(c_, num_buckets, num_key_bits); { // set up do_pruning_after_ and prune_t_begin_end_. do_pruning_after_.resize(T_ + 1, (char)0); // each time we prune, prune 30 frames; but shift by 20 frames each // time so there are 10 frames of overlap. int32_t prune_num_frames = 30, prune_shift = 20, T = T_; K2_CHECK_GT(prune_num_frames, prune_shift); // The first begin_t is negative but will be rounded up to zero to get the // start of the range. The motivation is: we don't want to wait until we // have processed `prune_num_frames` frames to prune for the first time, // because that first interval of not-pruning, being larger than normal, // would dominate the maximum memory used by intersection. for (int32_t begin_t = prune_shift - prune_num_frames; ; begin_t += prune_shift) { int32_t prune_begin = std::max<int32_t>(0, begin_t), prune_end = begin_t + prune_num_frames; bool last = false; if (prune_end >= T) { prune_end = T; last = true; } K2_CHECK_LT(prune_begin, prune_end); do_pruning_after_[prune_end - 1] = (char)1; prune_t_begin_end_.push_back({prune_begin, prune_end}); if (last) break; } } } // The information we have for each frame of the pruned-intersection (really: // decoding) algorithm. We keep an array of these, one for each frame, up to // the length of the longest sequence we're decoding plus one. struct FrameInfo { // States that are active at the beginning of this frame. Indexed // [fsa_idx][state_idx], where fsa_idx indexes b_fsas_ (and a_fsas_, if // a_fsas_stride_ != 0); and state_idx just enumerates the active states // on this frame (as state_idx01's in a_fsas_). Ragged<StateInfo> states; // 2 axes: fsa, state // Indexed [fsa_idx][state_idx][arc_idx].. the first 2 indexes are // the same as those into 'states' (the first 2 levels of the structure // are shared), and the last one enumerates the arcs leaving each of those // states. // // Note: there may be indexes [fsa_idx] that have no states (because that // FSA had fewer frames than the max), and indexes [fsa_idx][state_idx] that // have no arcs due to pruning. Ragged<ArcInfo> arcs; // 3 axes: fsa, state, arc }; /* Does the main work of intersection/composition, but doesn't produce any output; the output is provided when you call FormatOutput(). */ void Intersect() { /* T is the largest number of (frames+1) of neural net output, or the largest number of frames of log-likelihoods we count the final frame with (0, -inf, -inf..) that is used for the final-arc. The largest number of states in the fsas represented by b_fsas equals T+1 (e.g. 1 frame would require 2 states, because that 1 frame is the arc from state 0 to state 1). So the #states is 2 greater than the actual number of frames in the neural-net output. */ int32_t num_fsas = b_fsas_.shape.Dim0(), T = T_; std::ostringstream os; os << "Intersect:T=" << T << ",num_fsas=" << num_fsas << ",TotSize(1)=" << b_fsas_.shape.TotSize(1); NVTX_RANGE(os.str().c_str()); ThreadPool* pool = GetThreadPool(); pool->SubmitTask([this]() { BackwardPassStatic(this); }); // we'll initially populate frames_[0.. T+1], but discard the one at T+1, // which has no arcs or states, the ones we use are from 0 to T. frames_.reserve(T + 2); frames_.push_back(InitialFrameInfo()); for (int32_t t = 0; t <= T; t++) { if (state_map_.NumKeyBits() == 32) { frames_.push_back(PropagateForward<32>(t, frames_.back().get())); } else if (state_map_.NumKeyBits() == 36) { frames_.push_back(PropagateForward<36>(t, frames_.back().get())); } else { K2_CHECK_EQ(state_map_.NumKeyBits(), 40); frames_.push_back(PropagateForward<40>(t, frames_.back().get())); } if (do_pruning_after_[t]) { // let a phase of backward-pass pruning commence. backward_semaphore_.Signal(c_); // note: normally we should acquire forward_semaphore_ without having to // wait. It avoids the backward pass getting too far behind the forward // pass, which could mean too much memory is used. forward_semaphore_.acquire(); } } // The FrameInfo for time T+1 will have no states. We did that // last PropagateForward so that the 'arcs' member of frames_[T] // is set up (it has no arcs but we need the shape). frames_.pop_back(); pool->WaitAllTasksFinished(); } void BackwardPass() { int32_t num_fsas = b_fsas_.shape.Dim0(), num_work_items = max_active_ * num_fsas * T_; ParallelRunner pr(c_); // if num_work_items is big enough, it will actually create a new stream. cudaStream_t stream = pr.NewStream(num_work_items); With w(stream); // This overrides whatever stream c_ contains with `stream`, if it's not NVTX_RANGE(K2_FUNC); for (size_t i = 0; i < prune_t_begin_end_.size(); i++) { backward_semaphore_.Wait(c_); int32_t prune_t_begin = prune_t_begin_end_[i].first, prune_t_end = prune_t_begin_end_[i].second; PruneTimeRange(prune_t_begin, prune_t_end); forward_semaphore_.release(); } } static void BackwardPassStatic(MultiGraphDenseIntersectPruned *c) { // WARNING(fangjun): this is run in a separate thread, so we have // to reset its default device. Otherwise, it will throw later // if the main thread is using a different device. DeviceGuard guard(c->c_); c->BackwardPass(); } // Return FrameInfo for 1st frame, with `states` set but `arcs` not set. std::unique_ptr<FrameInfo> InitialFrameInfo() { NVTX_RANGE("InitialFrameInfo"); int32_t num_fsas = b_fsas_.shape.Dim0(); std::unique_ptr<FrameInfo> ans = std::make_unique<FrameInfo>(); if (a_fsas_.Dim0() == 1) { int32_t start_states_per_seq = (a_fsas_.shape.TotSize(1) > 0), // 0 or 1 num_start_states = num_fsas * start_states_per_seq; ans->states = Ragged<StateInfo>( RegularRaggedShape(c_, num_fsas, start_states_per_seq), Array1<StateInfo>(c_, num_start_states)); StateInfo *states_data = ans->states.values.Data(); K2_EVAL( c_, num_start_states, lambda_set_states, (int32_t i)->void { StateInfo info; info.a_fsas_state_idx01 = 0; // start state of a_fsas_ info.forward_loglike = FloatToOrderedInt(0.0); states_data[i] = info; }); } else { Ragged<int32_t> start_states = GetStartStates(a_fsas_); ans->states = Ragged<StateInfo>(start_states.shape, Array1<StateInfo>(c_, start_states.NumElements())); StateInfo *ans_states_values_data = ans->states.values.Data(); const int32_t *start_states_values_data = start_states.values.Data(); K2_EVAL( c_, start_states.NumElements(), lambda_set_state_info, (int32_t states_idx01)->void { StateInfo info; info.a_fsas_state_idx01 = start_states_values_data[states_idx01]; info.forward_loglike = FloatToOrderedInt(0.0); ans_states_values_data[states_idx01] = info; }); } return ans; } void FormatOutput(FsaVec *ofsa, Array1<int32_t> *arc_map_a, Array1<int32_t> *arc_map_b) { NVTX_RANGE("FormatOutput"); int32_t T = T_; ContextPtr c_cpu = GetCpuContext(); Array1<ArcInfo *> arcs_data_ptrs(c_cpu, T + 1); Array1<int32_t *> arcs_row_splits1_ptrs(c_cpu, T + 1); for (int32_t t = 0; t <= T; t++) { arcs_data_ptrs.Data()[t] = frames_[t]->arcs.values.Data(); arcs_row_splits1_ptrs.Data()[t] = frames_[t]->arcs.RowSplits(1).Data(); } // transfer to GPU if we're using a GPU arcs_data_ptrs = arcs_data_ptrs.To(c_); ArcInfo **arcs_data_ptrs_data = arcs_data_ptrs.Data(); arcs_row_splits1_ptrs = arcs_row_splits1_ptrs.To(c_); int32_t **arcs_row_splits1_ptrs_data = arcs_row_splits1_ptrs.Data(); const int32_t *b_fsas_row_splits1 = b_fsas_.shape.RowSplits(1).Data(); const int32_t *a_fsas_row_splits1 = a_fsas_.RowSplits(1).Data(); int32_t a_fsas_stride = a_fsas_stride_; // 0 or 1 depending if the decoding // graph is shared. int32_t num_fsas = b_fsas_.shape.Dim0(); RaggedShape final_arcs_shape; { /* This block populates `final_arcs_shape`. It is the shape of a ragged tensor of arcs that conceptually would live at frames_[T+1]->arcs. It contains no actual arcs, but may contain some states, that represent "missing" final-states. The problem we are trying to solve is that there was a start-state for an FSA but no final-state because it did not survive pruning, and this could lead to an output FSA that is invalid or is misinterpreted (because we are interpreting a non-final state as a final state). */ Array1<int32_t> num_extra_states(c_, num_fsas + 1); int32_t *num_extra_states_data = num_extra_states.Data(); K2_EVAL(c_, num_fsas, lambda_set_num_extra_states, (int32_t i) -> void { int32_t final_t = b_fsas_row_splits1[i+1] - b_fsas_row_splits1[i]; int32_t *arcs_row_splits1_data = arcs_row_splits1_ptrs_data[final_t]; int32_t num_states_final_t = arcs_row_splits1_data[i + 1] - arcs_row_splits1_data[i]; K2_CHECK_LE(num_states_final_t, 1); // has_start_state is 1 if there is a start-state; note, we don't prune // the start-states, so they'll be present if they were present in a_fsas_. int32_t has_start_state = (a_fsas_row_splits1[i * a_fsas_stride] < a_fsas_row_splits1[i * a_fsas_stride + 1]); // num_extra_states_data[i] will be 1 if there was a start state but no final-state; // else, 0. num_extra_states_data[i] = has_start_state * (1 - num_states_final_t); }); ExclusiveSum(num_extra_states, &num_extra_states); RaggedShape top_shape = RaggedShape2(&num_extra_states, nullptr, -1), bottom_shape = RegularRaggedShape(c_, top_shape.NumElements(), 0); final_arcs_shape = ComposeRaggedShapes(top_shape, bottom_shape); } RaggedShape oshape; // see documentation of Stack() in ragged_ops.h for explanation. Array1<uint32_t> oshape_merge_map; { NVTX_RANGE("InitOshape"); // each of these have 3 axes. std::vector<RaggedShape *> arcs_shapes(T + 2); for (int32_t t = 0; t <= T; t++) arcs_shapes[t] = &(frames_[t]->arcs.shape); arcs_shapes[T + 1] = &final_arcs_shape; // oshape is a 4-axis ragged tensor which is indexed: // oshape[fsa_index][t][state_idx][arc_idx] int32_t axis = 1; oshape = Stack(axis, T + 2, arcs_shapes.data(), &oshape_merge_map); } int32_t *oshape_row_ids3 = oshape.RowIds(3).Data(), *oshape_row_ids2 = oshape.RowIds(2).Data(), *oshape_row_ids1 = oshape.RowIds(1).Data(), *oshape_row_splits3 = oshape.RowSplits(3).Data(), *oshape_row_splits2 = oshape.RowSplits(2).Data(), *oshape_row_splits1 = oshape.RowSplits(1).Data(); int32_t num_arcs = oshape.NumElements(); *arc_map_a = Array1<int32_t>(c_, num_arcs); *arc_map_b = Array1<int32_t>(c_, num_arcs); int32_t *arc_map_a_data = arc_map_a->Data(), *arc_map_b_data = arc_map_b->Data(); Array1<Arc> arcs_out(c_, num_arcs); Arc *arcs_out_data = arcs_out.Data(); const Arc *a_fsas_arcs = a_fsas_.values.Data(); int32_t b_fsas_num_cols = b_fsas_.scores.Dim1(); const int32_t *b_fsas_row_ids1 = b_fsas_.shape.RowIds(1).Data(); const uint32_t *oshape_merge_map_data = oshape_merge_map.Data(); K2_EVAL( c_, num_arcs, lambda_format_arc_data, (int32_t oarc_idx0123)->void { // by 'oarc' we mean arc with shape `oshape`. int32_t oarc_idx012 = oshape_row_ids3[oarc_idx0123], oarc_idx01 = oshape_row_ids2[oarc_idx012], oarc_idx0 = oshape_row_ids1[oarc_idx01], oarc_idx0x = oshape_row_splits1[oarc_idx0], oarc_idx0xx = oshape_row_splits2[oarc_idx0x], oarc_idx1 = oarc_idx01 - oarc_idx0x, oarc_idx01x_next = oshape_row_splits2[oarc_idx01 + 1]; int32_t m = oshape_merge_map_data[oarc_idx0123], t = m % (T + 2), // actually we won't get t == T or t == T + 1 // here since those frames have no arcs. arcs_idx012 = m / (T + 2); // arc_idx012 into FrameInfo::arcs on time t, // index of the arc on that frame. K2_CHECK_EQ(t, oarc_idx1); const ArcInfo *arcs_data = arcs_data_ptrs_data[t]; ArcInfo arc_info = arcs_data[arcs_idx012]; Arc arc; arc.src_state = oarc_idx012 - oarc_idx0xx; // Note: the idx1 w.r.t. the frame's `arcs` is an idx2 w.r.t. `oshape`. int32_t dest_state_idx012 = oarc_idx01x_next + arc_info.u.dest_info_state_idx1; arc.dest_state = dest_state_idx012 - oarc_idx0xx; arc.label = a_fsas_arcs[arc_info.a_fsas_arc_idx012].label; int32_t fsa_id = oarc_idx0, b_fsas_idx0x = b_fsas_row_splits1[fsa_id], b_fsas_idx01 = b_fsas_idx0x + t, b_fsas_idx2 = (arc.label + 1), b_fsas_arc_idx012 = b_fsas_idx01 * b_fsas_num_cols + b_fsas_idx2; arc.score = arc_info.arc_loglike; arc_map_a_data[oarc_idx0123] = arc_info.a_fsas_arc_idx012; arc_map_b_data[oarc_idx0123] = b_fsas_arc_idx012; arcs_out_data[oarc_idx0123] = arc; }); // Remove axis 1, which corresponds to time. *ofsa = FsaVec(RemoveAxis(oshape, 1), arcs_out); } /* Computes pruning cutoffs for this frame: these are the cutoffs for the arc "forward score", one per FSA. This is a dynamic process involving dynamic_beams_ which are updated on each frame (they start off at search_beam_). @param [in] arc_end_scores The "forward log-probs" (scores) at the end of each arc, i.e. its contribution to the following state. Is a tensor indexed [fsa_id][state][arc]; we will get rid of the [state] dim, combining it with the [arc] dim, so it's just [fsa_id][arc] It is conceptually unchanged by this operation but non-const because row-ids of its shape may need to be generated. @return Returns a vector of log-likelihood cutoffs, one per FSA (the cutoff will be -infinity for FSAs that don't have any active states). The cutoffs will be of the form: the best score for any arc, minus the dynamic beam. See the code for how the dynamic beam is adjusted; it will approach 'search_beam_' as long as the number of active states in each FSA is between min_active and max_active. */ Array1<float> GetPruningCutoffs(Ragged<float> &arc_end_scores) { NVTX_RANGE(K2_FUNC); int32_t num_fsas = arc_end_scores.shape.Dim0(); // get the maximum score from each sub-list (i.e. each FSA, on this frame). // Note: can probably do this with a cub Reduce operation using an operator // that has side effects (that notices when it's operating across a // boundary). // the max will be -infinity for any FSA-id that doesn't have any active // states (e.g. because that stream has finished). // Casting to ragged2 just considers the top 2 indexes, ignoring the 3rd. // i.e. it's indexed by [fsa_id][arc]. Ragged<float> end_scores_per_fsa = arc_end_scores.RemoveAxis(1); Array1<float> max_per_fsa(c_, end_scores_per_fsa.Dim0()); MaxPerSublist(end_scores_per_fsa, -std::numeric_limits<float>::infinity(), &max_per_fsa); const int32_t *arc_end_scores_row_splits1_data = arc_end_scores.RowSplits(1).Data(); const float *max_per_fsa_data = max_per_fsa.Data(); float *dynamic_beams_data = dynamic_beams_.Data(); float default_beam = search_beam_, max_active = max_active_, min_active = min_active_; K2_CHECK_LT(min_active, max_active); Array1<float> cutoffs(c_, num_fsas); float *cutoffs_data = cutoffs.Data(); K2_EVAL( c_, num_fsas, lambda_set_beam_and_cutoffs, (int32_t i)->void { float best_loglike = max_per_fsa_data[i], dynamic_beam = dynamic_beams_data[i]; int32_t active_states = arc_end_scores_row_splits1_data[i + 1] - arc_end_scores_row_splits1_data[i]; if (active_states <= max_active) { // Not constrained by max_active... if (active_states >= min_active || active_states == 0) { // Neither the max_active nor min_active constraints // apply. Gradually approach 'beam' // (Also approach 'beam' if active_states == 0; we might as // well, since there is nothing to prune here). dynamic_beam = 0.8 * dynamic_beam + 0.2 * default_beam; } else { // We violated the min_active constraint -> increase beam if (dynamic_beam < default_beam) dynamic_beam = default_beam; // gradually make the beam larger as long // as we are below min_active dynamic_beam *= 1.25; } } else { // We violated the max_active constraint -> decrease beam if (dynamic_beam > default_beam) dynamic_beam = default_beam; // Decrease the beam as long as we have more than // max_active active states. dynamic_beam *= 0.8; } dynamic_beams_data[i] = dynamic_beam; cutoffs_data[i] = best_loglike - dynamic_beam; }); return cutoffs; } /* Returns list of arcs on this frame, consisting of all arcs leaving the states active on 'cur_frame'. @param [in] t The time-index (on which to look up log-likes), t >= 0 @param [in] cur_frame The FrameInfo for the current frame; only its 'states' member is expected to be set up on entry. */ Ragged<ArcInfo> GetArcs(int32_t t, FrameInfo *cur_frame) { NVTX_RANGE(K2_FUNC); Ragged<StateInfo> &states = cur_frame->states; const StateInfo *state_values = states.values.Data(); // in a_fsas_ (the decoding graphs), maps from state_idx01 to arc_idx01x. const int32_t *fsa_arc_splits = a_fsas_.shape.RowSplits(2).Data(); int32_t num_states = states.values.Dim(); Array1<int32_t> num_arcs(c_, num_states + 1); int32_t *num_arcs_data = num_arcs.Data(); // `num_arcs` gives the num-arcs for each state in `states`. K2_EVAL( c_, num_states, num_arcs_lambda, (int32_t state_idx01)->void { int32_t a_fsas_state_idx01 = state_values[state_idx01].a_fsas_state_idx01, a_fsas_arc_idx01x = fsa_arc_splits[a_fsas_state_idx01], a_fsas_arc_idx01x_next = fsa_arc_splits[a_fsas_state_idx01 + 1], a_fsas_num_arcs = a_fsas_arc_idx01x_next - a_fsas_arc_idx01x; num_arcs_data[state_idx01] = a_fsas_num_arcs; }); ExclusiveSum(num_arcs, &num_arcs); // initialize shape of array that will hold arcs leaving the active states. // Its shape is [fsa_index][state][arc]; the top two levels are shared with // `states`. 'ai' means ArcInfo. RaggedShape ai_shape = ComposeRaggedShapes(states.shape, RaggedShape2(&num_arcs, nullptr, -1)); // from state_idx01 (into `states` or `ai_shape`) -> fsa_idx0 const int32_t *ai_row_ids1 = ai_shape.RowIds(1).Data(); // from arc_idx012 (into `ai_shape`) to state_idx01 const int32_t *ai_row_ids2 = ai_shape.RowIds(2).Data(); // from state_idx01 to arc_idx01x const int32_t *ai_row_splits2 = ai_shape.RowSplits(2).Data(); // from state_idx01 (into a_fsas_) to arc_idx01x (into a_fsas_) const int32_t *a_fsas_row_splits2 = a_fsas_.shape.RowSplits(2).Data(); const Arc *arcs = a_fsas_.values.Data(); // fsa_idx0 to ind0x (into b_fsas_), which gives the 1st row for this // sequence. const int32_t *b_fsas_row_ids1 = b_fsas_.shape.RowIds(1).Data(); const int32_t *b_fsas_row_splits1 = b_fsas_.shape.RowSplits(1).Data(); const float *score_data = b_fsas_.scores.Data(); int32_t scores_num_cols = b_fsas_.scores.Dim1(); auto scores_acc = b_fsas_.scores.Accessor(); Ragged<ArcInfo> ai(ai_shape); ArcInfo *ai_data = ai.values.Data(); // uninitialized K2_EVAL( c_, ai.values.Dim(), ai_lambda, (int32_t ai_arc_idx012)->void { int32_t ai_state_idx01 = ai_row_ids2[ai_arc_idx012], ai_fsa_idx0 = ai_row_ids1[ai_state_idx01], ai_arc_idx01x = ai_row_splits2[ai_state_idx01], ai_arc_idx2 = ai_arc_idx012 - ai_arc_idx01x; StateInfo sinfo = state_values[ai_state_idx01]; int32_t a_fsas_arc_idx01x = a_fsas_row_splits2[sinfo.a_fsas_state_idx01], a_fsas_arc_idx012 = a_fsas_arc_idx01x + ai_arc_idx2; Arc arc = arcs[a_fsas_arc_idx012]; int32_t scores_idx0x = b_fsas_row_splits1[ai_fsa_idx0], scores_idx01 = scores_idx0x + t, // t == ind1 into 'scores' scores_idx2 = arc.label + 1; // the +1 is so that -1 can be handled K2_DCHECK_LT(static_cast<uint32_t>(scores_idx2), static_cast<uint32_t>(scores_num_cols)); float acoustic_score = scores_acc(scores_idx01, scores_idx2); ArcInfo ai; ai.a_fsas_arc_idx012 = a_fsas_arc_idx012; ai.arc_loglike = acoustic_score + arc.score; ai.end_loglike = OrderedIntToFloat(sinfo.forward_loglike) + ai.arc_loglike; // at least currently, the ArcInfo object's src_state and dest_state // are idx1's not idx01's, i.e. they don't contain the FSA-index, // where as the ai element is an idx01, so we need to do this to // convert to an idx01; this relies on the fact that // sinfo.abs_state_id == arc.src_state + a_fsas_fsa_idx0x. ai.u.dest_a_fsas_state_idx01 = sinfo.a_fsas_state_idx01 + arc.dest_state - arc.src_state; ai_data[ai_arc_idx012] = ai; }); return ai; } // Later we may choose to support b_fsas_.Dim0() == 1 and a_fsas_.Dim0() > 1, // and we'll have to change various bits of code for that to work. inline int32_t NumFsas() const { return b_fsas_.shape.Dim0(); } /* Does the forward-propagation (basically: the decoding step) and returns a newly allocated FrameInfo* object for the next frame. num_key_bits (template argument): either 32 (normal case) or 40: it is the number number of bits in `state_map_idx`. @param [in] t Time-step that we are processing arcs leaving from; will be called with t=0, t=1, ... @param [in] cur_frame FrameInfo object for the states corresponding to time t; will have its 'states' member set up but not its 'arcs' member (this function will create that). @return Returns FrameInfo object corresponding to time t+1; will have its 'states' member set up but not its 'arcs' member. */ template <int32_t NUM_KEY_BITS> std::unique_ptr<FrameInfo> PropagateForward(int32_t t, FrameInfo *cur_frame) { NVTX_RANGE("PropagateForward"); int32_t num_fsas = NumFsas(); // Ragged<StateInfo> &states = cur_frame->states; // arc_info has 3 axes: fsa_id, state, arc. cur_frame->arcs = GetArcs(t, cur_frame); if (NUM_KEY_BITS > 32) { // a check. constexpr int32_t NUM_VALUE_BITS = 64 - NUM_KEY_BITS, shift = std::min<int32_t>(31, NUM_VALUE_BITS); // the 'min' part is to avoid a compiler warning about 'shift count too // large' for code that is anyway unreachable. K2_CHECK_EQ(cur_frame->arcs.NumElements() >> shift, 0) << "Too many arcs to store in hash; try smaller NUM_KEY_BITS (would " "require code change) or reduce max_states or minibatch size."; } Ragged<ArcInfo> &arc_info = cur_frame->arcs; ArcInfo *ai_data = arc_info.values.Data(); Array1<float> ai_data_array1(c_, cur_frame->arcs.values.Dim()); float *ai_data_array1_data = ai_data_array1.Data(); K2_EVAL( c_, ai_data_array1.Dim(), lambda_set_ai_data, (int32_t i)->void { ai_data_array1_data[i] = ai_data[i].end_loglike; }); Ragged<float> ai_loglikes(arc_info.shape, ai_data_array1); // `cutoffs` is of dimension num_fsas. Array1<float> cutoffs = GetPruningCutoffs(ai_loglikes); float *cutoffs_data = cutoffs.Data(); // write certain indexes (into ai.values) to state_map_.Data(). Keeps // track of the active states and will allow us to assign a numbering to // them. const int32_t *ai_row_ids1 = arc_info.shape.RowIds(1).Data(), *ai_row_ids2 = arc_info.shape.RowIds(2).Data(); int64_t state_map_fsa_stride = state_map_fsa_stride_; // renumber_states will be a renumbering that dictates which of the arcs in // 'ai' correspond to unique states. Only one arc for each dest-state is // kept (it doesn't matter which one). Renumbering renumber_states(c_, arc_info.NumElements()); char *keep_this_state_data = renumber_states.Keep().Data(); int32_t new_hash_size = RoundUpToNearestPowerOfTwo( int32_t(arc_info.NumElements() * 1.0)); if (new_hash_size > state_map_.NumBuckets()) { bool copy_data = false; // The hash is empty right now, so there is // nothing to copy. state_map_.Resize(new_hash_size, NUM_KEY_BITS, -1, copy_data); } auto state_map_acc = state_map_.GetAccessor<Hash::Accessor<NUM_KEY_BITS>>(); { NVTX_RANGE("LambdaSetStateMap"); K2_EVAL( c_, arc_info.NumElements(), lambda_set_state_map, (int32_t arc_idx012)->void { int32_t fsa_id = ai_row_ids1[ai_row_ids2[arc_idx012]]; int32_t dest_state_idx01 = ai_data[arc_idx012].u.dest_a_fsas_state_idx01; float end_loglike = ai_data[arc_idx012].end_loglike, cutoff = cutoffs_data[fsa_id]; char keep_this_state = 0; // only one arc entering any state will // have its 'keep_this_state_data' entry // set to 1. if (end_loglike > cutoff) { uint64_t state_map_idx = dest_state_idx01 + fsa_id * state_map_fsa_stride; if (state_map_acc.Insert(state_map_idx, (uint64_t)arc_idx012)) keep_this_state = 1; } keep_this_state_data[arc_idx012] = keep_this_state; }); } int32_t num_states = renumber_states.NumNewElems(); // state_reorder_data maps from (state_idx01 on next frame) to (the // arc_idx012 on this frame which is the source arc which we arbitrarily // choose as being "responsible" for the creation of that state). const int32_t *state_reorder_data = renumber_states.Old2New().Data(); // state_to_fsa_id maps from an index into the next frame's // FrameInfo::states.values() vector to the sequence-id (fsa_id) associated // with it. It should be non-decreasing. Array1<int32_t> state_to_fsa_id(c_, num_states); { // This block sets 'state_to_fsa_id'. NVTX_RANGE("LambdaSetStateToFsaId"); int32_t *state_to_fsa_id_data = state_to_fsa_id.Data(); K2_EVAL( c_, arc_info.NumElements(), lambda_state_to_fsa_id, (int32_t arc_idx012)->void { int32_t fsa_id = ai_row_ids1[ai_row_ids2[arc_idx012]], this_state_j = state_reorder_data[arc_idx012], next_state_j = state_reorder_data[arc_idx012 + 1]; if (next_state_j > this_state_j) { state_to_fsa_id_data[this_state_j] = fsa_id; } }); K2_DCHECK(IsMonotonic(state_to_fsa_id)); } std::unique_ptr<FrameInfo> ans = std::make_unique<FrameInfo>(); Array1<int32_t> states_row_splits1(c_, num_fsas + 1); RowIdsToRowSplits(state_to_fsa_id, &states_row_splits1); ans->states = Ragged<StateInfo>( RaggedShape2(&states_row_splits1, &state_to_fsa_id, num_states), Array1<StateInfo>(c_, num_states)); StateInfo *ans_states_data = ans->states.values.Data(); const int32_t minus_inf_int = FloatToOrderedInt(-std::numeric_limits<float>::infinity()); K2_EVAL( c_, num_states, lambda_init_loglike, (int32_t i)->void { ans_states_data[i].forward_loglike = minus_inf_int; }); { NVTX_RANGE("LambdaModifyStateMap"); // Modify the elements of `state_map` to refer to the indexes into // `ans->states` / `kept_states_data`, rather than the indexes into // ai_data. This will decrease some of the values in `state_map`, in // general. K2_EVAL( c_, arc_info.NumElements(), lambda_modify_state_map, (int32_t arc_idx012)->void { int32_t fsa_id = ai_row_ids1[ai_row_ids2[arc_idx012]]; int32_t dest_state_idx01 = ai_data[arc_idx012].u.dest_a_fsas_state_idx01; int32_t this_j = state_reorder_data[arc_idx012], next_j = state_reorder_data[arc_idx012 + 1]; if (next_j > this_j) { uint64_t state_map_idx = dest_state_idx01 + fsa_id * state_map_fsa_stride; uint64_t value, *key_value_addr = nullptr; bool ans = state_map_acc.Find(state_map_idx, &value, &key_value_addr); K2_DCHECK(ans); K2_DCHECK_EQ(static_cast<int32_t>(value), arc_idx012); // Note: this_j is an idx01 into ans->states. previously it // contained an arc_idx012 (of the entering arc that won the // race). state_map_acc.SetValue(key_value_addr, state_map_idx, (uint64_t)this_j); } }); } // We'll set up the data of the kept states below... StateInfo *kept_states_data = ans->states.values.Data(); { int32_t *ans_states_row_splits1_data = ans->states.RowSplits(1).Data(); NVTX_RANGE("LambdaSetStates"); K2_EVAL( c_, arc_info.NumElements(), lambda_set_arcs_and_states, (int32_t arc_idx012)->void { int32_t fsa_id = ai_row_ids1[ai_row_ids2[arc_idx012]]; ArcInfo &info = ai_data[arc_idx012]; int32_t dest_a_fsas_state_idx01 = info.u.dest_a_fsas_state_idx01; uint64_t state_map_idx = dest_a_fsas_state_idx01 + fsa_id * state_map_fsa_stride; uint64_t state_idx01; const uint64_t minus_one = ~(uint64_t)0; if (!state_map_acc.Find(state_map_idx, &state_idx01)) state_idx01 = minus_one; // The destination state did not survive // pruning. int32_t state_idx1; if (state_idx01 != minus_one) { int32_t state_idx0x = ans_states_row_splits1_data[fsa_id]; state_idx1 = static_cast<int32_t>(state_idx01) - state_idx0x; } else { state_idx1 = -1; // Meaning: invalid. } // state_idx1 is the idx1 into ans->states, of the destination // state. info.u.dest_info_state_idx1 = state_idx1; if (state_idx1 < 0) return; // multiple threads may write the same value to the address written // to in the next line. kept_states_data[state_idx01].a_fsas_state_idx01 = dest_a_fsas_state_idx01; int32_t end_loglike_int = FloatToOrderedInt(info.end_loglike); // Set the forward log-like of the dest state to the largest of any // of those of the incoming arcs. Note: we initialized this in // lambda_init_loglike above. AtomicMax(&(kept_states_data[state_idx01].forward_loglike), end_loglike_int); }); } { NVTX_RANGE("LambdaResetStateMap"); const int32_t *next_states_row_ids1 = ans->states.shape.RowIds(1).Data(); K2_EVAL( c_, ans->states.NumElements(), lambda_reset_state_map, (int32_t state_idx01)->void { int32_t a_fsas_state_idx01 = kept_states_data[state_idx01].a_fsas_state_idx01, fsa_idx0 = next_states_row_ids1[state_idx01]; int64_t state_map_idx = a_fsas_state_idx01 + fsa_idx0 * state_map_fsa_stride; state_map_acc.Delete(state_map_idx); }); } return ans; } /* Sets backward_loglike fields of StateInfo to the negative of the forward prob if (this is the final-state or !only_final_probs), else -infinity. This is used in computing the backward loglikes/scores for purposes of pruning. This may be done after we're finished decoding/intersecting, or while we are still decoding. Note: something similar to this (setting backward-prob == forward-prob) is also done in PropagateBackward() when we detect final-states. That's needed because not all sequences have the same length, so some may have reached their final state earlier. (Note: we only get to the final-state of a_fsas_ if we've reached the final frame of the input, because for non-final frames we always have -infinity as the log-prob corresponding to the symbol -1.) While we are still decoding, a background process will do pruning concurrently with the forward computation, for purposes of reducing memory usage (and so that most of the pruning can be made concurrent with the forward computation). In this case we want to avoid pruning away anything that wouldn't have been pruned away if we were to have waited to the end; and it turns out that setting the backward probs to the negative of the forward probs (i.e. for all states, not just final states) accomplishes this. The issue was mentioned in the "Exact Lattice Generation.." paper and also in the code for Kaldi's lattice-faster-decoder; search for "As in [3], to save memory..." @param [in] cur_frame Frame on which to set the backward probs */ void SetBackwardProbsFinal(FrameInfo *cur_frame) { NVTX_RANGE("SetBackwardProbsFinal"); Ragged<StateInfo> &cur_states = cur_frame->states; // 2 axes: fsa,state int32_t num_states = cur_states.values.Dim(); if (num_states == 0) return; StateInfo *cur_states_data = cur_states.values.Data(); const int32_t *a_fsas_row_ids1_data = a_fsas_.shape.RowIds(1).Data(), *a_fsas_row_splits1_data = a_fsas_.shape.RowSplits(1).Data(), *cur_states_row_ids1_data = cur_states.RowIds(1).Data(); double minus_inf = -std::numeric_limits<double>::infinity(); K2_EVAL(c_, num_states, lambda_set_backward_prob, (int32_t state_idx01) -> void { StateInfo *info = cur_states_data + state_idx01; double backward_loglike, forward_loglike = OrderedIntToFloat(info->forward_loglike); if (forward_loglike - forward_loglike == 0) { // not -infinity... // canonically we'd set this to zero, but setting it to the forward // loglike when this is the final-state (in a_fsas_) has the effect of // making the (forward+backward) probs equivalent to the logprob minus // the best-path log-prob, which is convenient for pruning. If this // is not actually the last frame of this sequence, which can happen // if this was called before the forward decoding process was // finished, what we are doing is a form of pruning that is guaranteed // not to prune anything out that would not have been pruned out if we // had waited until the real end of the file to do the pruning. backward_loglike = -forward_loglike; } else { backward_loglike = minus_inf; } info->backward_loglike = backward_loglike; }); } /* Does backward propagation of log-likes, which means setting the backward_loglike field of the StateInfo variable (for cur_frame); and works out which arcs and which states are to be pruned on cur_frame; this information is output to Array1<char>'s which are supplied by the caller. These backward log-likes are normalized in such a way that you can add them with the forward log-likes to produce the log-likelihood ratio vs the best path (this will be non-positive). (To do this, for the final state we have to set the backward log-like to the negative of the forward log-like; see SetBackwardProbsFinal()). This function also prunes arc-indexes on `cur_frame` and state-indexes on `next_frame`. @param [in] t The time-index (on which to look up log-likes); equals time index of `cur_frame`; t >= 0 @param [in] cur_frame The FrameInfo for the frame on which we want to set the forward log-like, and output pruning info for arcs and states @param [in] next_frame The next frame's FrameInfo, on which to look up log-likes for the next frame; the `backward_loglike` values of states on `next_frame` are assumed to already be set, either by SetBackwardProbsFinal() or a previous call to PropagateBackward(). @param [out] cur_frame_states_keep An array, created by the caller, to which we'll write 1s for elements of cur_frame->states which we need to keep, and 0s for others. @param [out] cur_frame_arcs_keep An array, created by the caller, to which we'll write 1s for elements of cur_frame->arcs which we need to keep (because they survived pruning), and 0s for others. */ void PropagateBackward(int32_t t, FrameInfo *cur_frame, FrameInfo *next_frame, Array1<char> *cur_frame_states_keep, Array1<char> *cur_frame_arcs_keep) { NVTX_RANGE("PropagateBackward"); int32_t num_states = cur_frame->states.NumElements(), num_arcs = cur_frame->arcs.NumElements(); K2_CHECK_EQ(num_states, cur_frame_states_keep->Dim()); K2_CHECK_EQ(num_arcs, cur_frame_arcs_keep->Dim()); int32_t *a_fsas_row_ids1_data = a_fsas_.shape.RowIds(1).Data(), *a_fsas_row_splits1_data = a_fsas_.shape.RowSplits(1).Data(); float minus_inf = -std::numeric_limits<float>::infinity(); Ragged<float> arc_backward_prob(cur_frame->arcs.shape, Array1<float>(c_, cur_frame->arcs.NumElements())); float *arc_backward_prob_data = arc_backward_prob.values.Data(); ArcInfo *ai_data = cur_frame->arcs.values.Data(); int32_t *arcs_rowids1 = cur_frame->arcs.shape.RowIds(1).Data(), *arcs_rowids2 = cur_frame->arcs.shape.RowIds(2).Data(), *arcs_row_splits1 = cur_frame->arcs.shape.RowSplits(1).Data(), *arcs_row_splits2 = cur_frame->arcs.shape.RowSplits(2).Data(); float output_beam = output_beam_; // compute arc backward probs, and set elements of 'keep_cur_arcs_data' int32_t next_num_states = next_frame->states.TotSize(1); char *keep_cur_arcs_data = cur_frame_arcs_keep->Data(), *keep_cur_states_data = cur_frame_states_keep->Data(); const int32_t *next_states_row_splits1_data = next_frame->states.RowSplits(1).Data(); StateInfo *next_states_data = next_frame->states.values.Data(); StateInfo *cur_states_data = cur_frame->states.values.Data(); K2_EVAL(c_, num_arcs, lambda_set_arc_backward_prob_and_keep, (int32_t arcs_idx012) -> void { ArcInfo *arc = ai_data + arcs_idx012; int32_t state_idx01 = arcs_rowids2[arcs_idx012], seq_idx0 = arcs_rowids1[state_idx01], // 'seq' == fsa-idx in b next_states_idx0x = next_states_row_splits1_data[seq_idx0]; // Note: if dest_state_idx1 == -1, dest_state_idx01 has a meaningless // value below, but it's never referenced. int32_t dest_state_idx1 = arc->u.dest_info_state_idx1, dest_state_idx01 = next_states_idx0x + dest_state_idx1; float backward_loglike = minus_inf; char keep_this_arc = 0; if (dest_state_idx1 == -1) { // dest_state_idx1 == -1 means this arc was already pruned in // the forward pass.. do nothing. } else { float arc_loglike = arc->arc_loglike; float dest_state_backward_loglike = next_states_data[dest_state_idx01].backward_loglike; // 'backward_loglike' is the loglike at the beginning of the arc backward_loglike = arc_loglike + dest_state_backward_loglike; float src_state_forward_loglike = OrderedIntToFloat( cur_states_data[arcs_rowids2[arcs_idx012]].forward_loglike); // should be <= 0.0, mathematically. K2_CHECK_LT(backward_loglike, -src_state_forward_loglike + 2.0); if (backward_loglike + src_state_forward_loglike >= -output_beam) { keep_this_arc = 1; } else { backward_loglike = minus_inf; // Don't let arcs outside beam // contribute to their start-states's // backward prob (we'll use that to // prune the start-states away.) } } keep_cur_arcs_data[arcs_idx012] = keep_this_arc; arc_backward_prob_data[arcs_idx012] = backward_loglike; }); /* note, the elements of state_backward_prob that don't have arcs leaving them will be set to the supplied default. */ Array1<float> state_backward_prob(c_, num_states); MaxPerSublist(arc_backward_prob, minus_inf, &state_backward_prob); const float *state_backward_prob_data = state_backward_prob.Data(); const int32_t *cur_states_row_ids1 = cur_frame->states.shape.RowIds(1).Data(); int32_t num_fsas = NumFsas(); K2_DCHECK_EQ(cur_frame->states.shape.Dim0(), num_fsas); K2_EVAL( c_, cur_frame->states.NumElements(), lambda_set_state_backward_prob, (int32_t state_idx01)->void { StateInfo *info = cur_states_data + state_idx01; int32_t fsas_state_idx01 = info->a_fsas_state_idx01, a_fsas_idx0 = a_fsas_row_ids1_data[fsas_state_idx01], fsas_state_idx0x_next = a_fsas_row_splits1_data[a_fsas_idx0 + 1]; float forward_loglike = OrderedIntToFloat(info->forward_loglike), backward_loglike; // `is_final_state` means this is the final-state in a_fsas. this // implies it's final in b_fsas too, since they both would have seen // symbols -1. int32_t is_final_state = (fsas_state_idx01 + 1 >= fsas_state_idx0x_next); if (is_final_state) { // Note: there is only one final-state. backward_loglike = -forward_loglike; } else { backward_loglike = state_backward_prob_data[state_idx01]; } info->backward_loglike = backward_loglike; keep_cur_states_data[state_idx01] = (backward_loglike != minus_inf); }); } /* This function does backward propagation and pruning of arcs and states for a specific time range. @param [in] begin_t Lowest `t` value to call PropagateBackward() for and to prune its arcs and states. Require t >= 0. @param [in] end_t One-past-the-highest `t` value to call PropagateBackward() and to prune its arcs and states. Require that `frames_[t+1]` already be set up; this requires at least end_t <= T. Arcs on frames t >= end_t and states on frame t > end_t are ignored; the backward probs on time end_t are set by SetBackwardProbsFinal(), see its documentation to understand what this does if we haven't yet reached the end of one of the sequences. After this function is done, the arcs for `frames_[t]` with begin_t <= t < end_t and the states for `frames_[t]` with begin_t < t < end_t will have their numbering changed. (We don't renumber the states on begin_t because that would require the dest-states of the arcs on time `begin_t - 1` to be modified). TODO: check this... */ void PruneTimeRange(int32_t begin_t, int32_t end_t) { NVTX_RANGE(K2_FUNC); SetBackwardProbsFinal(frames_[end_t].get()); ContextPtr cpu = GetCpuContext(); int32_t num_fsas = b_fsas_.shape.Dim0(), num_t = end_t - begin_t; Array1<int32_t> old_states_offsets(cpu, num_t + 1), old_arcs_offsets(cpu, num_t + 1); int32_t tot_states = 0, tot_arcs = 0; { int32_t *old_states_offsets_data = old_states_offsets.Data(), *old_arcs_offsets_data = old_arcs_offsets.Data(); for (int32_t i = 0; i <= num_t; i++) { int32_t t = begin_t + i; old_states_offsets_data[i] = tot_states; old_arcs_offsets_data[i] = tot_arcs; if (i < num_t) { tot_states += frames_[t]->arcs.TotSize(1); tot_arcs += frames_[t]->arcs.TotSize(2); } } } // contains respectively: row_splits1_ptrs, row_ids1_ptrs, // row_splits1_ptrs, row_splits2_ptrs, // old_arcs_ptrs (really type ArcInfo*), // old_states_ptrs (really type StateInfo*). Array1<void*> old_all_ptrs(cpu, num_t * 6); Renumbering renumber_states(c_, tot_states), renumber_arcs(c_, tot_arcs); { void **all_p = old_all_ptrs.Data(); int32_t **old_row_splits1_ptrs_data = (int32_t**)all_p, **old_row_ids1_ptrs_data = (int32_t**)all_p + num_t, **old_row_splits2_ptrs_data = (int32_t**)all_p + 2 * num_t, **old_row_ids2_ptrs_data = (int32_t**)all_p + 3 * num_t; StateInfo **old_states_ptrs_data = (StateInfo**)all_p + 4 * num_t; ArcInfo **old_arcs_ptrs_data = (ArcInfo**)all_p + 5 * num_t; int32_t *old_states_offsets_data = old_states_offsets.Data(), *old_arcs_offsets_data = old_arcs_offsets.Data(); for (int32_t t = end_t - 1; t >= begin_t; --t) { int32_t i = t - begin_t; Array1<char> this_states_keep = renumber_states.Keep().Arange(old_states_offsets_data[i], old_states_offsets_data[i + 1]), this_arcs_keep = renumber_arcs.Keep().Arange(old_arcs_offsets_data[i], old_arcs_offsets_data[i + 1]); FrameInfo *cur_frame = frames_[t].get(); PropagateBackward(t, cur_frame, frames_[t+1].get(), &this_states_keep, &this_arcs_keep); old_row_splits1_ptrs_data[i] = cur_frame->arcs.RowSplits(1).Data(); old_row_ids1_ptrs_data[i] = cur_frame->arcs.RowIds(1).Data(); old_row_splits2_ptrs_data[i] = cur_frame->arcs.RowSplits(2).Data(); old_row_ids2_ptrs_data[i] = cur_frame->arcs.RowIds(2).Data(); old_arcs_ptrs_data[i] = cur_frame->arcs.values.Data(); old_states_ptrs_data[i] = cur_frame->states.values.Data(); // We can't discard any states on t == begin_t because: if it is not t == // 0, it would be inconvenient to map the dest-states of arcs on t - 1; // and if it is t == 0, this may remove the start-state, which would make // it more complex to avoid invalid FSAs (e.g. with an end-state but no // start-state, or in which we incorrectly interpret a non-start state as // the start state). if (i == 0) // t == begin_t this_states_keep = (char)1; // set all elements of the array // `states_keep` to 1. } } old_states_offsets = old_states_offsets.To(c_); old_arcs_offsets = old_arcs_offsets.To(c_); Array1<int32_t> new_states_offsets = renumber_states.Old2New(true)[old_states_offsets], new_arcs_offsets = renumber_arcs.Old2New(true)[old_arcs_offsets]; int32_t new_num_states = renumber_states.NumNewElems(), new_num_arcs = renumber_arcs.NumNewElems(); // These arrays map to the (t - begin_t) corresponding to this state or arc // in the new numbering, i.e. the frame index minus begin_t. Array1<int32_t> new_state_to_frame(c_, new_num_states), new_arc_to_frame(c_, new_num_arcs); RowSplitsToRowIds(new_states_offsets, &new_state_to_frame); RowSplitsToRowIds(new_arcs_offsets, &new_arc_to_frame); const int32_t *old_states_offsets_data = old_states_offsets.Data(), *new_states_offsets_data = new_states_offsets.Data(), *old_arcs_offsets_data = old_arcs_offsets.Data(), *new_arcs_offsets_data = new_arcs_offsets.Data(), *new_state_to_frame_data = new_state_to_frame.Data(), *new_arc_to_frame_data = new_arc_to_frame.Data(), *states_old2new_data = renumber_states.Old2New().Data(), *states_new2old_data = renumber_states.New2Old().Data(), *arcs_old2new_data = renumber_arcs.Old2New().Data(), *arcs_new2old_data = renumber_arcs.New2Old().Data(); // Allocate the new row_splits and row_ids vectors for the shapes on the // individual frames, and the new arc-info and state-info. Array2<int32_t> all_row_splits1(c_, num_t, num_fsas + 1); auto all_row_splits1_acc = all_row_splits1.Accessor(); Array1<int32_t> all_row_ids1(c_, new_num_states); // the "+ num_t" below is for the extra element of each row_splits array. Array1<int32_t> all_row_splits2(c_, new_num_states + num_t); Array1<int32_t> all_row_ids2(c_, new_num_arcs); Array1<StateInfo> all_states(c_, new_num_states); Array1<ArcInfo> all_arcs(c_, new_num_arcs); int32_t *all_row_ids1_data = all_row_ids1.Data(), *all_row_ids2_data = all_row_ids2.Data(), *all_row_splits2_data = all_row_splits2.Data(); StateInfo *all_states_data = all_states.Data(); ArcInfo *all_arcs_data = all_arcs.Data(); old_all_ptrs = old_all_ptrs.To(c_); void **all_p = old_all_ptrs.Data(); K2_EVAL2(c_, num_t, num_fsas + 1, lambda_set_new_row_splits1, (int32_t t_offset, int32_t seq_idx) -> void { // note, t_offset is t - t_start. int32_t *old_row_splits1 = (int32_t*) all_p[t_offset]; int32_t old_idx0x = old_row_splits1[seq_idx]; // "pos" means position in appended states vector // old_start_pos means start for this `t`. int32_t old_start_pos = old_states_offsets_data[t_offset], old_pos = old_start_pos + old_idx0x, new_start_pos = states_old2new_data[old_start_pos], new_pos = states_old2new_data[old_pos], new_idx0x = new_pos - new_start_pos; all_row_splits1_acc(t_offset, seq_idx) = new_idx0x; // TODO: set elem zero of row-splits? if (seq_idx == 0) { // We assign the `seq_idx == 0` version of the kernel to set the initial // zero in each row_splits vector. all_row_splits2_data[new_pos + t_offset] = 0; } }); K2_EVAL(c_, new_num_states, lambda_per_state, (int32_t new_i) -> void { // new_i is position in appended vector of all states. int32_t t_offset = new_state_to_frame_data[new_i], old_state_start_pos = old_states_offsets_data[t_offset], new_arc_start_pos = new_arcs_offsets_data[t_offset], old_arc_start_pos = old_arcs_offsets_data[t_offset], old_i = states_new2old_data[new_i], old_state_idx01 = old_i - old_state_start_pos; // this old_states_data is from its FrameInfo::states. const StateInfo *old_states_data = (StateInfo*)all_p[4 * num_t + t_offset]; const int32_t *old_row_ids1_data = (int32_t*)all_p[1 * num_t + t_offset], *old_row_splits2_data = (int32_t*)all_p[2 * num_t + t_offset]; // set the row-ids1 (these contain FSA-ids). all_row_ids1_data[new_i] = old_row_ids1_data[old_state_idx01]; { // set the row-splits2. // We make each kernel responsible for the *next* row_splits entry, // i.e. for its new_state_idx01 plus one. This solves the problem of no // kernel being responsible for the last row-splits entry. We // separately wrote the zeros for the 1st row-splits entry, in a // previous kernel. // // It's safe to use old_state_idx01+1 instead of doing the same mapping // from new_i+1 that we do from new_i to old_state_idx01, because // we know this state was kept (because it has a new_i index.) int32_t old_arc_idx01x_next = old_row_splits2_data[old_state_idx01+1], old_arc_pos_next = old_arc_idx01x_next + old_arc_start_pos, new_arc_pos_next = arcs_old2new_data[old_arc_pos_next], new_arc_idx01x_next = new_arc_pos_next - new_arc_start_pos; // "+ t_offset" is to compensate for the extra element of each row_splits // vector. The "+ 1" is about the "next", i.e. each kernel is responsible // for the next row_splits element, and none is responsible for the initial zero; // that is set in a previous kernel. all_row_splits2_data[new_i + t_offset + 1] = new_arc_idx01x_next; } all_states_data[new_i] = old_states_data[old_state_idx01]; }); K2_EVAL(c_, new_num_arcs, lambda_set_arcs, (int32_t new_i) -> void { // new_i is position in appended vector of all arcs int32_t t_offset = new_arc_to_frame_data[new_i], new_state_start_pos = new_states_offsets_data[t_offset], old_state_start_pos = old_states_offsets_data[t_offset], next_old_state_start_pos = old_states_offsets_data[t_offset + 1], old_arc_start_pos = old_arcs_offsets_data[t_offset], old_i = arcs_new2old_data[new_i], old_arc_idx012 = old_i - old_arc_start_pos; ArcInfo *old_info_data = (ArcInfo*)all_p[5 * num_t + t_offset]; int32_t *old_row_ids2_data = (int32_t*)all_p[3 * num_t + t_offset], *old_row_ids1_data = (int32_t*)all_p[1 * num_t + t_offset], *next_old_row_splits1_data = (int32_t*)all_p[t_offset + 1]; int32_t old_src_state_idx01 = old_row_ids2_data[old_arc_idx012], fsa_idx0 = old_row_ids1_data[old_src_state_idx01], old_src_state_pos = old_src_state_idx01 + old_state_start_pos, new_src_state_pos = states_old2new_data[old_src_state_pos], new_src_state_idx01 = new_src_state_pos - new_state_start_pos; all_row_ids2_data[new_i] = new_src_state_idx01; ArcInfo info = old_info_data[old_arc_idx012]; if (t_offset + 1 == num_t) { // Do nothing; this is the last frame of the batch of frames that we are // pruning, so we don't need to renumber the destination-states of the // arcs leaving it because the next frame's states have not been pruned // (so the numbering stays the same). } else { // idx1 of the state in the next frame's `states` object. int32_t dest_info_state_idx1 = info.u.dest_info_state_idx1; // the naming below is unusual; by "pos" we mean position in the old or // new "all_states" or "all_arcs" vectors, which have all frames appended. // (the new ones physically exist; the old ones don't, but they are the // numberings used in renumber_states.Keep() and renumber_arcs.Keep().) int32_t old_dest_state_idx0x = next_old_row_splits1_data[fsa_idx0], old_dest_state_idx01 = old_dest_state_idx0x + dest_info_state_idx1, old_dest_state_idx0x_pos = next_old_state_start_pos + old_dest_state_idx0x, old_dest_state_idx01_pos = next_old_state_start_pos + old_dest_state_idx01, new_dest_state_idx0x_pos = states_old2new_data[old_dest_state_idx0x_pos], new_dest_state_idx01_pos = states_old2new_data[old_dest_state_idx01_pos], new_dest_state_idx1 = new_dest_state_idx01_pos - new_dest_state_idx0x_pos; info.u.dest_info_state_idx1 = new_dest_state_idx1; } all_arcs_data[new_i] = info; }); // Now reconstruct the states and arcs for all the frames we pruned, from // sub-parts of the arrays we just created. new_states_offsets = new_states_offsets.To(cpu); new_arcs_offsets = new_arcs_offsets.To(cpu); new_states_offsets_data = new_states_offsets.Data(); new_arcs_offsets_data = new_arcs_offsets.Data(); for (int32_t i = 0; i < num_t; i++) { // i corresponds to "t_offset". int32_t state_offset = new_states_offsets_data[i], next_state_offset = new_states_offsets_data[i + 1], arc_offset = new_arcs_offsets_data[i], next_arc_offset = new_arcs_offsets_data[i + 1]; // next line: operator[] into Array2 gives Array1, one row. Array1<int32_t> row_splits1 = all_row_splits1.Row(i), row_ids1 = all_row_ids1.Arange(state_offset, next_state_offset), row_splits2 = all_row_splits2.Arange(state_offset + i, next_state_offset + (i+1)), row_ids2 = all_row_ids2.Arange(arc_offset, next_arc_offset); Array1<ArcInfo> arcs = all_arcs.Arange(arc_offset, next_arc_offset); RaggedShape arcs_shape = RaggedShape3(&row_splits1, &row_ids1, -1, &row_splits2, &row_ids2, -1); int32_t t = begin_t + i; frames_[t]->arcs = Ragged<ArcInfo>(arcs_shape, arcs); Array1<StateInfo> states = all_states.Arange(state_offset, next_state_offset); RaggedShape states_shape = GetLayer(arcs_shape, 0); frames_[t]->states = Ragged<StateInfo>(states_shape, states); } } ContextPtr c_; FsaVec &a_fsas_; // Note: a_fsas_ has 3 axes. int32_t a_fsas_stride_; // 1 if we use a different FSA per sequence // (a_fsas_.Dim0() > 1), 0 if the decoding graph is // shared (a_fsas_.Dim0() == 1). DenseFsaVec &b_fsas_; int32_t T_; // == b_fsas_.shape.MaxSize(1). float search_beam_; float output_beam_; int32_t min_active_; int32_t max_active_; Array1<float> dynamic_beams_; // dynamic beams (initially just search_beam_ // but change due to max_active/min_active // constraints). int32_t state_map_fsa_stride_; // state_map_fsa_stride_ is a_fsas_.TotSize(1) // if a_fsas_.Dim0() == 1, else 0. Hash state_map_; // state_map_ maps from: // key == (state_map_fsa_stride_*n) + a_fsas_state_idx01, // where n is the fsa_idx, i.e. the index into b_fsas_ // to // value, where at different stages of PropagateForward(), // value is an arc_idx012 (into cur_frame->arcs), and // then later a state_idx01 into the next frame's `state` // member. // The 1st dim is needed because If all the // streams share the same FSA in a_fsas_, we need // separate maps for each). This map is used on // each frame to compute and store the mapping // from active states to the position in the // `states` array. Between frames, all values // have -1 in them. std::vector<std::unique_ptr<FrameInfo>> frames_; // logically an array of bool, of size T_ + 1; for each 0 <= t <= T, after the // forward pass finishes propagation with cur_frame_ == t, if // do_pruning_after_[t] is false it will continue as normal; otherwise (if // true), it will signal `semaphore_`. std::vector<char> do_pruning_after_; // For each t for which do_pruning_after_[t] is true, there will be a // pair (begin_t, end_t) in prune_t_begin_end giving the // arguments for which we will invoke PruneTimeRange() after the forward-pass // for time t has completed. The size of this array equals the sum // of nonzero elements of do_pruning_after_. std::vector<std::pair<int32_t, int32_t> > prune_t_begin_end_; // Each time the forward-pass finishes forward processing for a t value for // which do_pruning_after_[t] is true, it will signal this semaphore; the // backward-pass thread (which does pruning) will wait on it as many times as // do_pruning_after_[t] is set to true. Semaphore backward_semaphore_; // The function of forward_semaphore_ is to ensure that the backward (pruning) // pass doesn't "get too far behind" relative to the forward pass, which might // cause us to use more memory than expected. (Note: the backward pass is // normally a bit faster than the forward pass, so typically this won't be a // problem). Each time the backward pass has finished one round of pruning it // signals this semaphore. each time after the forward pass signals the // backward pass that it's ready to prune, it waits on this semaphore // immediately afterward. But because forward_semaphore_ is initialized to 1 // rather than zero, the effect is that the forward pass is waiting for the // *previous* phase of backward pruning to complete, rather than the current // one. k2std::counting_semaphore forward_semaphore_; }; void IntersectDensePruned(FsaVec &a_fsas, DenseFsaVec &b_fsas, float search_beam, float output_beam, int32_t min_active_states, int32_t max_active_states, FsaVec *out, Array1<int32_t> *arc_map_a, Array1<int32_t> *arc_map_b) { NVTX_RANGE("IntersectDensePruned"); FsaVec a_vec = FsaToFsaVec(a_fsas); MultiGraphDenseIntersectPruned intersector(a_vec, b_fsas, search_beam, output_beam, min_active_states, max_active_states); intersector.Intersect(); intersector.FormatOutput(out, arc_map_a, arc_map_b); } } // namespace k2
the_stack
using namespace std::chrono; #define warp_size 32 #define SharedPartDevice 64 #define FULL_MASK 0xffffffff #define NBytes Nuints #define PRINT 0 #define Number_of_Diagonals 9 #define F_ReadLength 100 #define BitVal(data,y) ( (data>>y) & 1) // Return Data.Y value #define SetBit(data,y) data |= (1 << y) // Set Data.Y to 1 __global__ void sneaky_snake(const uint* F_ReadSeq, const uint* F_RefSeq, int* Ftest_Results, const int NumReads, const int F_ErrorThreshold) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= NumReads) return; // const int NBytes = 8; uint ReadsPerThread[NBytes]; uint RefsPerThread[NBytes]; #pragma unroll for (int i = 0; i < NBytes; i++) { ReadsPerThread[i] = F_ReadSeq[tid*8 + i]; RefsPerThread[i] = F_RefSeq[tid*8 + i]; } ///////////////////////////////////////////////////////////////////////////// Ftest_Results[tid] = 1; uint ReadCompTmp = 0; uint RefCompTmp = 0; uint DiagonalResult = 0; uint ReadTmp1 = 0; uint ReadTmp2 = 0; uint RefTmp1 = 0; uint RefTmp2 = 0; uint CornerCase = 0; int localCounter= 0; int localCounterMax=0; int globalCounter = 0; int Max_leading_zeros = 0; int AccumulatedErrs = 0; int ShiftValue = 0; int Diagonal = 0; int j = 0; //specifying the j-th uint that we are reading in each read-ref comparison (can be from 0 to 7) while ( (j < 7) && (globalCounter < 200)) { Diagonal = 0; RefTmp1 = RefsPerThread[j] << ShiftValue; RefTmp2 = RefsPerThread[j + 1] >> 32 - ShiftValue; ReadTmp1 = ReadsPerThread[j] << ShiftValue; ReadTmp2 = ReadsPerThread[j + 1] >> 32 - ShiftValue; ReadCompTmp = ReadTmp1 | ReadTmp2; RefCompTmp = RefTmp1 | RefTmp2; DiagonalResult = ReadCompTmp ^ RefCompTmp; localCounterMax = __clz(DiagonalResult); //////////////////// Upper diagonals ///////////////////// for(int e = 1; e <= F_ErrorThreshold; e++) { Diagonal += 1; CornerCase = 0; if ( (j == 0) && ( (ShiftValue - (2*e)) < 0 ) ) { ReadTmp1 = ReadsPerThread[j] >> ( (2*e) - ShiftValue ); ReadTmp2 = 0; ReadCompTmp = ReadTmp1 | ReadTmp2; RefCompTmp = RefTmp1 | RefTmp2; DiagonalResult = ReadCompTmp ^ RefCompTmp; CornerCase = 0; for(int Ci = 0; Ci < (2*e) - ShiftValue; Ci++) { SetBit(CornerCase, 31 - Ci); } DiagonalResult = DiagonalResult | CornerCase; localCounter = __clz(DiagonalResult); } else if ( (ShiftValue - (2*e) ) < 0 ) { ReadTmp1 = ReadsPerThread[j-1] << 32 - ( (2*e) - ShiftValue ); ReadTmp2 = ReadsPerThread[j] >> (2*e) - ShiftValue; ReadCompTmp = ReadTmp1 | ReadTmp2; RefCompTmp = RefTmp1 | RefTmp2; DiagonalResult = ReadCompTmp ^ RefCompTmp; localCounter = __clz(DiagonalResult); } else { ReadTmp1 = ReadsPerThread[j] << ShiftValue - (2*e); ReadTmp2 = ReadsPerThread[j+1] >> 32 - (ShiftValue - (2*e) ) ; ReadCompTmp = ReadTmp1 | ReadTmp2; RefCompTmp = RefTmp1 | RefTmp2; DiagonalResult = ReadCompTmp ^ RefCompTmp; localCounter = __clz(DiagonalResult); } if (localCounter>localCounterMax) localCounterMax=localCounter; } /* sh = shift up = upper diagonal RC = ReadCompTmp FC = RefCompTmp D = DiagonalResult DN = diagonal LC = localCounter */ //////////////////// Lower diagonals ///////////////////// for(int e = 1; e <= F_ErrorThreshold; e++) { Diagonal += 1; CornerCase = 0; if ( j<5)// ( (globalCounter + ShiftValue + (2*e) + 32) < 200) ) { if ( (ShiftValue + (2*e) ) < 32) { ReadTmp1 = ReadsPerThread[j] << ShiftValue + (2*e); ReadTmp2 = ReadsPerThread[j+1] >> 32 - ( ShiftValue + (2*e) ); ReadCompTmp = ReadTmp1 | ReadTmp2; RefCompTmp = RefTmp1 | RefTmp2; DiagonalResult = ReadCompTmp ^ RefCompTmp; localCounter = __clz(DiagonalResult); } else { ReadTmp1 = ReadsPerThread[j+1] << ( ShiftValue + (2*e) ) % 32; ReadTmp2 = ReadsPerThread[j+2] >> 32 - ( ( ShiftValue + (2*e) ) % 32 ); ReadCompTmp = ReadTmp1 | ReadTmp2; RefCompTmp = RefTmp1 | RefTmp2; DiagonalResult = 0xffffffff;//ReadCompTmp ^ RefCompTmp; DiagonalResult = ReadCompTmp ^ RefCompTmp; localCounter = __clz(DiagonalResult); } } else { //printf("HI3"); ReadTmp1 = ReadsPerThread[j] << ShiftValue + (2*e); ReadTmp2 = ReadsPerThread[j+1] >> 32 - ( ShiftValue + (2*e) ); ReadCompTmp = ReadTmp1 | ReadTmp2; RefCompTmp = RefTmp1 | RefTmp2; DiagonalResult = ReadCompTmp ^ RefCompTmp; CornerCase = 0; if ((globalCounter+32)>200 ) { for(int Ci = ((globalCounter+32)-200); Ci < (((globalCounter+32)-200)+ 2*e); Ci++) { SetBit(CornerCase, Ci); } } else if ((globalCounter+32)>=(200- (2*e))){ for(int Ci = 0; Ci < (2*e); Ci++) { SetBit(CornerCase, Ci); } } DiagonalResult = DiagonalResult | CornerCase; localCounter = __clz(DiagonalResult); } if (localCounter>localCounterMax) localCounterMax=localCounter; } /* CC = CornerCase sh = shift up = upper diagonal RC = ReadCompTmp FC = RefCompTmp D = DiagonalResult DN = diagonal LC = localCounter */ Max_leading_zeros = 0; if ( (j == 6) && ( ((localCounterMax/2)*2) >= 8) ) { Max_leading_zeros = 8; break; } else if( ((localCounterMax/2)*2) > Max_leading_zeros) { Max_leading_zeros = ((localCounterMax/2)*2); } if ( ( (Max_leading_zeros/2) < 16) && (j < 5) ) { AccumulatedErrs += 1; } else if ( (j == 6) && ( (Max_leading_zeros/2) < 4) ) { AccumulatedErrs += 1; } if(AccumulatedErrs > F_ErrorThreshold) { Ftest_Results[tid] = 0; break; } if(ShiftValue + Max_leading_zeros + 2 >= 32) { j += 1; } // ShiftValue_2Ref = (ShiftValue_2Ref + Max_leading_zeros + 2) %32; if (Max_leading_zeros == 32) { globalCounter += Max_leading_zeros; } else { ShiftValue = ((ShiftValue + Max_leading_zeros + 2) % 32); globalCounter += (Max_leading_zeros + 2); } } } int main(int argc, const char * const argv[]) { if (argc != 4) { printf("Incorrect arguments..\nUsage: ./%s [ReadLength] [ReadandRefFile] [#reads]\n", argv[0]); exit(-1); } int ReadLength = atoi(argv[1]);//in my inputs, it is always 100. Just for the generality we keep it as a variable int NumReads = atoi(argv[3]); // Number of reads int Size_of_uint_in_Bit = 32; //in Bits FILE * fp; char * line = NULL; size_t len = 0; ssize_t read; char *p;//when reading each char_basepair from the file, we read it into the p. int Number_of_warps_inside_each_block = 8; int Concurrent_threads_In_Block = warp_size * Number_of_warps_inside_each_block; int Number_of_blocks_inside_each_kernel = (NumReads + Concurrent_threads_In_Block - 1) / Concurrent_threads_In_Block; int F_ErrorThreshold =0; uint* ReadSeq = (uint * ) calloc(NumReads * 8, sizeof(uint)); uint* RefSeq = (uint * ) calloc(NumReads * 8, sizeof(uint)); int* DFinal_Results = (int * ) calloc(NumReads, sizeof(int)); int tokenIndex=1; fp = fopen(argv[2], "r"); if (!fp){ printf("Sorry, the file does not exist or you do not have access permission\n"); return 0; } for(int this_read = 0; this_read < NumReads; this_read++) { read = getline(&line, &len, fp); tokenIndex=1; for (p = strtok(line, "\t"); p != NULL; p = strtok(NULL, "\t")) { if (tokenIndex==1) { for (int j = 0; j < ReadLength; j++) { if(p[j] == 'A') { //do nothing (this is like storing 00) } else if (p[j] == 'C') { ReadSeq[((j*2/Size_of_uint_in_Bit) + this_read * NBytes)] = SetBit(ReadSeq[((j*2/Size_of_uint_in_Bit) + this_read * NBytes)], 31 - ((j%(Size_of_uint_in_Bit/2)) * 2 + 1)); } else if (p[j] == 'G') { ReadSeq[((j*2/Size_of_uint_in_Bit) + this_read * NBytes)] = SetBit(ReadSeq[((j*2/Size_of_uint_in_Bit) + this_read * NBytes)], 31 - ((j%(Size_of_uint_in_Bit/2)) * 2)); } else if (p[j] == 'T') { ReadSeq[((j*2/Size_of_uint_in_Bit) + this_read * NBytes)] = SetBit(ReadSeq[((j*2/Size_of_uint_in_Bit) + this_read * NBytes)], 31 - ((j%(Size_of_uint_in_Bit/2)) * 2)); ReadSeq[((j*2/Size_of_uint_in_Bit) + this_read * NBytes)] = SetBit(ReadSeq[((j*2/Size_of_uint_in_Bit) + this_read * NBytes)], 31 - ((j%(Size_of_uint_in_Bit/2)) * 2 + 1)); } //printf("%c",p[j]); //printf(" %08x", ReadSeq[((j*2/Size_of_uint_in_Bit) + this_read * NBytes)]); } } else if(tokenIndex==2) { for (int j = 0; j < ReadLength; j++) { if(p[j] == 'A') { //do nothing (this is like storing 00) } else if (p[j] == 'C') { RefSeq[((j*2/Size_of_uint_in_Bit) + this_read * NBytes)] = SetBit(RefSeq[((j*2/Size_of_uint_in_Bit) + this_read * NBytes)], 31 - ((j%(Size_of_uint_in_Bit/2)) * 2 + 1)); } else if (p[j] == 'G') { RefSeq[((j*2/Size_of_uint_in_Bit) + this_read * NBytes)] = SetBit(RefSeq[((j*2/Size_of_uint_in_Bit) + this_read * NBytes)], 31 - ((j%(Size_of_uint_in_Bit/2)) * 2)); } else if (p[j] == 'T') { RefSeq[((j*2/Size_of_uint_in_Bit) + this_read * NBytes)] = SetBit(RefSeq[((j*2/Size_of_uint_in_Bit) + this_read * NBytes)], 31 - ((j%(Size_of_uint_in_Bit/2)) * 2)); RefSeq[((j*2/Size_of_uint_in_Bit) + this_read * NBytes)] = SetBit(RefSeq[((j*2/Size_of_uint_in_Bit) + this_read * NBytes)], 31 - ((j%(Size_of_uint_in_Bit/2)) * 2 + 1)); } //printf("%c",p[j]); //printf(" %08x", RefSeq[((j*2/Size_of_uint_in_Bit) + this_read * NBytes)]); } } tokenIndex=tokenIndex+1; } } fclose(fp); uint* Dev_ReadSeq; uint* Dev_RefSeq; int* Dev_Results; cudaMalloc((void**)&Dev_ReadSeq, sizeof(uint) * NumReads * 8); cudaMalloc((void**)&Dev_RefSeq, sizeof(uint) * NumReads * 8); cudaMalloc((void**)&Dev_Results, sizeof(int) * NumReads); dim3 grid (Number_of_blocks_inside_each_kernel); dim3 block (Concurrent_threads_In_Block); cudaMemcpy(Dev_ReadSeq, ReadSeq, sizeof(uint) * NumReads * 8, cudaMemcpyHostToDevice); cudaMemcpy(Dev_RefSeq, RefSeq, sizeof(uint) * NumReads * 8, cudaMemcpyHostToDevice); for (int n = 0; n < 100; n++) { for (int loopPar = 0; loopPar <= 25; loopPar++) { F_ErrorThreshold = (loopPar*ReadLength)/100; high_resolution_clock::time_point t1 = high_resolution_clock::now(); sneaky_snake<<<grid, block>>>(Dev_ReadSeq, Dev_RefSeq, Dev_Results, NumReads, F_ErrorThreshold); cudaMemcpy(DFinal_Results, Dev_Results, sizeof(int) * NumReads, cudaMemcpyDeviceToHost); high_resolution_clock::time_point t2 = high_resolution_clock::now(); double elapsed_time = duration_cast<microseconds>(t2 - t1).count(); int accepted = 0; for(int i = 0; i < NumReads; i++) { if(DFinal_Results[i] == 1) accepted += 1; } printf("E: \t %d \t Snake-on-GPU: \t %5.4f \t Accepted: \t %10d \t Rejected: \t %10d\n", F_ErrorThreshold, elapsed_time, accepted, NumReads - accepted); } } free(ReadSeq); free(RefSeq); free(DFinal_Results); cudaFree(Dev_ReadSeq); cudaFree(Dev_RefSeq); cudaFree(Dev_Results); return 0; }
the_stack
#include "smplx/smplx.hpp" #include "smplx/util.hpp" #include "smplx/internal/cuda_util.cuh" namespace smplx { namespace { using cuda_util::device::BLOCK_SIZE; using cuda_util::from_host_eigen_sparse_matrix; using cuda_util::from_host_eigen_matrix; using cuda_util::to_host_eigen_matrix; namespace device { /** Rodrigues formula: d_pose_full (#joints,3) -> out(#joints,9); * also copies out to upper-left 3x3 part of out_joint_local_transform * (#joints,12) * Note this is no longer used; however I have not deleted it since * it may be useful in the future */ /* __global__ void rodrigues(float* RESTRICT d_pose_full, float* RESTRICT out, float* RESTRICT out_joint_local_transform) { const int in_idx = threadIdx.x * 3; const int out_idx = threadIdx.x * 9; const int out_transform_idx = threadIdx.x * 12; float theta = norm3df(d_pose_full[in_idx], d_pose_full[in_idx + 1], d_pose_full[in_idx + 2]); if (fabsf(theta) < 1e-5f) { for (int i = out_idx; i < out_idx + 9; ++i) { out[i] = 0.f; } out_joint_local_transform[out_transform_idx + 1] = out_joint_local_transform[out_transform_idx + 2] = out_joint_local_transform[out_transform_idx + 4] = out_joint_local_transform[out_transform_idx + 6] = out_joint_local_transform[out_transform_idx + 8] = out_joint_local_transform[out_transform_idx + 9] = 0.f; out_joint_local_transform[out_transform_idx] = out_joint_local_transform[out_transform_idx + 5] = out_joint_local_transform[out_transform_idx + 10] = 1.f; } else { float cm1 = cos(theta) - 1.f; float s = sin(theta); const float a = d_pose_full[in_idx] /= theta; const float b = d_pose_full[in_idx + 1] /= theta; const float c = d_pose_full[in_idx + 2] /= theta; out[out_idx] = cm1; out[out_idx + 1] = -s * c; out[out_idx + 2] = s * b; out[out_idx + 3] = s * c; out[out_idx + 4] = cm1; out[out_idx + 5] = -s * a; out[out_idx + 6] = -s * b; out[out_idx + 7] = s * a; out[out_idx + 8] = cm1; for (int j = 0; j < 3; ++j) { for (int k = 0; k < 3; ++k) { out_joint_local_transform[out_transform_idx + j * 4 + k] = (out[out_idx + j * 3 + k] -= cm1 * d_pose_full[in_idx + j] * d_pose_full[in_idx + k]); } // Un-subtract identity out_joint_local_transform[out_transform_idx + j * 4 + j] += 1.f; } } } */ /** Joint regressor: multiples sparse matrix in CSR represented by * (model_jr_values(nnz), ..inner(nnz), ..outer(#joints+1)) to * d_verts_shaped(#verts,3) row-major * -> outputs to out(#joints, 3) row-major * TODO: Optimize. The matrix is very wide and this is not efficient */ __global__ void joint_regressor(float* RESTRICT d_verts_shaped, float* RESTRICT model_jr_values, int* RESTRICT model_jr_inner, int* RESTRICT model_jr_outer, float* RESTRICT out_joints) { const int joint = threadIdx.y, idx = threadIdx.x; out_joints[joint * 3 + idx] = 0.f; for (int i = model_jr_outer[joint]; i < model_jr_outer[joint + 1]; ++i) { out_joints[joint * 3 + idx] += model_jr_values[i] * d_verts_shaped[model_jr_inner[i] * 3 + idx]; } } /** Linear blend skinning kernel. * d_joint_global_transform (#joints, 12) row-major; * global-space homogeneous transforms (bottom row dropped) * at each joint from local_to_global * d_points_shaped (#points, 3) row-major; vertices after blendshapes applied * (model_weights_values(nnz), ..inner(nnz), ..outer(#joints+1)) sparse LBS weights in CSR * -> out_verts(#points, 3) resulting vertices after deformation */ __global__ void lbs(float* RESTRICT d_joint_global_transform, float* RESTRICT d_verts_shaped, float* RESTRICT model_weights_values, int* RESTRICT model_weights_inner, int* RESTRICT model_weights_outer, float* RESTRICT out_verts, // transformed joint pos const int n_joints, const int n_verts) { const int vert = blockDim.x * blockIdx.x + threadIdx.x; // Vert idx if (vert < n_verts) { for (int i = 0; i < 3; ++i) { out_verts[vert * 3 + i] = 0.f; for (int joint_it = model_weights_outer[vert]; joint_it < model_weights_outer[vert + 1]; ++joint_it) { const int joint_row_idx = model_weights_inner[joint_it] * 12 + i * 4; for (int j = 0; j < 3; ++j) { out_verts[vert * 3 + i] += model_weights_values[joint_it] * d_joint_global_transform[joint_row_idx + j] * d_verts_shaped[vert * 3 + j]; } out_verts[vert * 3 + i] += model_weights_values[joint_it] * d_joint_global_transform[joint_row_idx + 3]; } } } } } // namespace device } // namespace /* struct { float* params = nullptr; float* verts = nullptr; float* blendshape_params = nullptr; float* joint_transforms = nullptr; } device; */ template<class ModelConfig> __host__ void Body<ModelConfig>::_cuda_load() { cudaCheck(cudaMalloc((void**)&device.verts, model.n_verts() * 3 * sizeof(float))); cudaCheck(cudaMalloc((void**)&device.blendshape_params, model.n_blend_shapes() * sizeof(float))); cudaCheck(cudaMalloc((void**)&device.joint_transforms, model.n_joints() * 12 * sizeof(float))); cudaCheck(cudaMalloc((void**)&device.verts_shaped, model.n_verts() * 3 * sizeof(float))); cudaCheck(cudaMalloc((void**)&device.joints_shaped, model.n_joints() * 3 * sizeof(float))); } template<class ModelConfig> __host__ void Body<ModelConfig>::_cuda_free() { if (device.verts) cudaFree(device.verts); if (device.blendshape_params) cudaFree(device.blendshape_params); if (device.joint_transforms) cudaFree(device.joint_transforms); if (device.verts_shaped) cudaFree(device.verts_shaped); if (device.joints_shaped) cudaFree(device.joints_shaped); } template<class ModelConfig> __host__ void Body<ModelConfig>::_cuda_maybe_retrieve_verts() const { if (!_verts_retrieved) { _verts.resize(model.n_verts(), 3); cudaMemcpy(_verts.data(), device.verts, _verts.size() * sizeof(float), cudaMemcpyDeviceToHost); _verts_retrieved = true; } } template<class ModelConfig> __host__ void Body<ModelConfig>::_cuda_maybe_retrieve_verts_shaped() const { if (!_verts_shaped_retrieved) { _verts_shaped.resize(model.n_verts(), 3); cudaMemcpy(_verts_shaped.data(), device.verts_shaped, _verts_shaped.size() * sizeof(float), cudaMemcpyDeviceToHost); _verts_shaped_retrieved = true; } } template<class ModelConfig> SMPLX_HOST void Body<ModelConfig>::_cuda_update( float* h_blendshape_params, float* h_joint_transforms, bool enable_pose_blendshapes) { // Verts will be updated _verts_retrieved = false; _verts_shaped_retrieved = false; // Copy parameters to GPU cudaCheck(cudaMemcpyAsync(device.blendshape_params, h_blendshape_params, ModelConfig::n_blend_shapes() * sizeof(float), cudaMemcpyHostToDevice)); // Shape blendshapes cudaCheck(cudaMemcpyAsync(device.verts_shaped, model.device.verts, model.n_verts() * 3 * sizeof(float), cudaMemcpyDeviceToDevice)); cuda_util::mmv_block<float, true>(model.device.blend_shapes, device.blendshape_params, device.verts_shaped, ModelConfig::n_verts() * 3, ModelConfig::n_shape_blends()); // Joint regressor // TODO: optimize sparse matrix multiplication, maybe use ELL format dim3 jr_blocks(3, model.n_joints()); device::joint_regressor<<<1, jr_blocks>>>( device.verts_shaped, model.device.joint_reg.values, model.device.joint_reg.inner, model.device.joint_reg.outer, device.joints_shaped); if (enable_pose_blendshapes) { // Pose blendshapes. // Note: this is the most expensive operation. cuda_util::mmv_block<float, true>(model.device.blend_shapes + ModelConfig::n_shape_blends() * 3 * ModelConfig::n_verts(), device.blendshape_params + ModelConfig::n_shape_blends(), device.verts_shaped, ModelConfig::n_verts() * 3, ModelConfig::n_pose_blends()); } // Compute global joint transforms, this part can't be parallized and // is horribly slow on GPU; we do it on CPU instead // Actually, this is pretty bad too, TODO try implementing on GPU again cudaCheck(cudaMemcpyAsync(_joints_shaped.data(), device.joints_shaped, model.n_joints() * 3 * sizeof(float), cudaMemcpyDeviceToHost)); _local_to_global(); cudaCheck(cudaMemcpyAsync(device.joint_transforms, _joint_transforms.data(), _joint_transforms.size() * sizeof(float), cudaMemcpyHostToDevice)); // weights: (#verts, #joints) device::lbs<<<(model.verts.size() - 1) / BLOCK_SIZE + 1, BLOCK_SIZE>>>( device.joint_transforms, device.verts_shaped, model.device.weights.values, model.device.weights.inner, model.device.weights.outer, device.verts, model.n_joints(), model.n_verts()); } // Instantiation template class Body<model_config::SMPL>; template class Body<model_config::SMPLH>; template class Body<model_config::SMPLX>; template class Body<model_config::SMPLXpca>; template class Body<model_config::SMPLX_v1>; template class Body<model_config::SMPLXpca_v1>; } // namespace smplx
the_stack
#include "Curve.h" #include <iostream> #include <fstream> using namespace std; // 宏:SMCURVE_VALID 和 SMCURVE_NOT_VALID // 定义了 smCurveCordiXY 是否有效的范围值。 #define SMCURVE_VALID 1 #define SMCURVE_NOT_VALID 0 // 宏:TANGENT_VALID 和 TANGENT_NOT_VALID // 定义了 tangent 是否有效的范围值。 #define TANGENT_VALID 1 #define TANGENT_NOT_VALID 0 // Host 静态方法:_calCrvXY(得到某曲线数据对应的属性坐标) // 计算得到输入数据的起点坐标、终点坐标、最右点的 x 坐标、最左点的 x 坐标、 // 最下点的 y 坐标、最上点的 y 坐标、曲线坐标的平均值,并将得到的属性值赋 // 给输入曲线内相对应的属性值。 static __host__ int _calCrvXY( Curve *incrv, // 输入曲线 int *data, // 输入坐标数据 int size // 输入坐标数据的大小 ); // Host 静态方法:_calCrvXY(得到某曲线数据对应的属性坐标) __host__ int _calCrvXY(Curve *incrv, int *data, int size) { // 检查输入曲线和输入坐标数据是否为 NULL。 if (incrv == NULL || data == NULL) return NULL_POINTER; // 检查输入数据的大小是否小于 1。 if (size < 1) return INVALID_DATA; // 定义一个一维数组用于保存计算得到的四个属性坐标,初始化为坐标数据的 // 第一个值。 int tempXY[4] = { data[0], data[0], data[0], data[0] }; // 定义曲线坐标的和。 int sumX = 0; int sumY = 0; // 遍历输入数据,计算属性坐标。 for (int i = 0; i < size; i++) { if (data[2 * i] > tempXY[0]) { // 计算曲线上的最右点的 x 坐标 tempXY[0] = data[2 * i]; } else if(data[2 * i] < tempXY[1]) { // 计算曲线上的最左点的 x 坐标 tempXY[1] = data[2 * i]; } if (data[2 * i + 1] > tempXY[2]) { // 计算曲线上的最下点的 y 坐标 tempXY[2] = data[2 * i + 1]; } else if(data[2 * i + 1] < tempXY[3]) { // 计算曲线上的最上点的 y 坐标 tempXY[3] = data[2 * i + 1]; } // 分别计算 x、y坐标的和。 sumX += data[2 * i]; sumY += data[2 * i + 1]; } // 将计算得到的属性值赋给曲线 incrv->maxCordiX = tempXY[0]; incrv->minCordiX = tempXY[1]; incrv->maxCordiY = tempXY[2]; incrv->minCordiY = tempXY[3]; incrv->aveX = sumX / size; incrv->aveY = sumY / size; // 根据曲线内点首尾两点是否互为 8 邻域来判断曲线是否闭合。 int diffX = data[0] - data[(size - 1) * 2]; int diffY = data[1] - data[(size - 1) * 2 + 1]; if (diffX >= -1 && diffX <= 1 && diffY >= -1 && diffY <= 1) { incrv->closed = true; incrv->startCordiX = incrv->endCordiX = data[0]; incrv->startCordiY = incrv->endCordiY = data[1]; } else { incrv->closed = false; incrv->startCordiX = data[0]; incrv->startCordiY = data[1]; incrv->endCordiX = data[(size - 1) * 2]; incrv->endCordiY = data[(size - 1) * 2 + 1]; } return NO_ERROR; } // Host 静态方法:newCurve(创建曲线) __host__ int CurveBasicOp::newCurve(Curve **outcrv) { // 检查用于盛放新曲线的指针是否为 NULL。 if (outcrv == NULL) return NULL_POINTER; // 申请一个新的 CurveCuda 型数据,本方法最后会将其中的 crvMeta 域返回给 // outcrv,这样 outcrv 就有了一个对应的 CurveCuda 型伴随数据。 CurveCuda *crvCud = new CurveCuda; // 初始化各种元数据。 crvCud->crvMeta.closed = 0; crvCud->crvMeta.startCordiX = 0; crvCud->crvMeta.startCordiY = 0; crvCud->crvMeta.endCordiX = 0; crvCud->crvMeta.endCordiY = 0; crvCud->crvMeta.maxCordiX = 0; crvCud->crvMeta.minCordiX = 0; crvCud->crvMeta.maxCordiY = 0; crvCud->crvMeta.minCordiY = 0; crvCud->crvMeta.aveX = 0; crvCud->crvMeta.aveY = 0; crvCud->crvMeta.curveLength = 0; crvCud->crvMeta.crvData = NULL; crvCud->crvMeta.smWindowSize = 0; crvCud->crvMeta.smCurveCordiXY = NULL; crvCud->crvMeta.tangent = NULL; crvCud->crvMeta.geoProperty = false; crvCud->crvMeta.primaryProperties = NULL; crvCud->capacity = 0; crvCud->deviceId = -1; crvCud->smCurveIsValid = SMCURVE_NOT_VALID; crvCud->tangentIsValid = TANGENT_NOT_VALID; // 将 CurveCuda 型数据中的 crvMeta 赋值给输出参数。 *outcrv = &(crvCud->crvMeta); // 处理完毕,退出。 return NO_ERROR; } // Host 静态方法:deleteCurve(销毁曲线) __host__ int CurveBasicOp::deleteCurve(Curve *incrv) { // 检查曲线的指针是否为 NULL。 if (incrv == NULL) return NULL_POINTER; // 根据输入参数的 Curve 指针,得到对应的 CurveCuda 型数据。 CurveCuda *incrvCud = CURVE_CUDA(incrv); // 检查曲线所在的地址空间是否合法,如果曲线所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (incrvCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 释放曲线数据。 if (incrv->crvData == NULL || incrv->curveLength == 0) { // 如果输入曲线是空的,则不进行曲线数据释放操作(因为本来也没有数据可被 // 释放)。 // Do Nothing; } if (incrvCud->deviceId < 0) { // 对于数据存储于 Host 内存,直接利用 delete 关键字释放曲线数据。 delete[] incrv->crvData; // 如果 smoothed 曲线有效,则释放数据。 if (incrvCud->smCurveIsValid == SMCURVE_VALID) delete[] incrv->smCurveCordiXY; // 如果曲线斜率数据有效,则释放数据。 if (incrvCud->tangentIsValid == TANGENT_VALID) delete[] incrv->tangent; } else { // 对于数据存储于 Device 内存中,则需要首先切换设备,将该设备作为当前 // Device 设备,然后释放之,最后还需要将设备切换回来以保证后续处理的正 // 确性。 cudaSetDevice(incrvCud->deviceId); cudaFree(incrv->crvData); // 如果 smoothed 曲线有效,则释放数据。 if (incrvCud->smCurveIsValid == SMCURVE_VALID) cudaFree(incrv->smCurveCordiXY); // 如果曲线斜率数据有效,则释放数据。 if (incrvCud->tangentIsValid == TANGENT_VALID) cudaFree(incrv->tangent); cudaSetDevice(curdevid); } // 最后还需要释放曲线的元数据 delete incrvCud; // 处理完毕,返回。 return NO_ERROR; } // Host 静态方法:makeAtCurrentDevice(在当前 Device 内存中构建数据) __host__ int CurveBasicOp::makeAtCurrentDevice(Curve *crv, size_t curveLength, int *crvData) { // 检查输入曲线是否为 NULL if (crv == NULL) return NULL_POINTER; // 检查给定的曲线中坐标点数量 if (curveLength < 1) return INVALID_DATA; // 检查曲线是否为空曲线 if (crv->crvData != NULL) return UNMATCH_IMG; // 获取 crv 对应的 CurveCuda 型数据。 CurveCuda *crvCud = CURVE_CUDA(crv); // 定义错误变量 cudaError_t cuerrcode; // 如果初始化曲线长度不为 0,为曲线内数据开空间。 if (curveLength != 0) { // 在当前的 Device 上申请存储指定坐标数量的曲线所需要的内存空间。 cuerrcode = cudaMalloc((void **)(&crv->crvData), 2 * curveLength * sizeof (int)); if (cuerrcode != cudaSuccess) { crv->crvData = NULL; return CUDA_ERROR; } // 判断 smoothed 曲线是否有效,若有效则开空间。 if (crvCud->smCurveIsValid == SMCURVE_VALID) { // 为 smoothed 曲线数据开空间。 cuerrcode = cudaMalloc((void **)(&crv->smCurveCordiXY), 2 * curveLength * sizeof (float)); if (cuerrcode != cudaSuccess) { cudaFree(crv->crvData); crv->smCurveCordiXY = NULL; return CUDA_ERROR; } } // 判断切线斜率是否有效,若有效则开空间。 if (crvCud->tangentIsValid == TANGENT_VALID) { // 为曲线上各点处的切线斜率数据开空间。 cuerrcode = cudaMalloc((void **)(&crv->tangent), curveLength * sizeof (float)); if (cuerrcode != cudaSuccess) { cudaFree(crv->crvData); if (crvCud->smCurveIsValid == SMCURVE_VALID) cudaFree(crv->smCurveCordiXY); crv->tangent = NULL; return CUDA_ERROR; } } } // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 修改曲线的元数据。 crv->curveLength = curveLength; crvCud->deviceId = curdevid; if (crvData == NULL) { // 当输入坐标数据为空时,设置曲线内实际点数量为 0. crvCud->capacity = 0; } else { // 当输入坐标数据不为空时,将输入坐标数据拷贝到曲线内的坐标数据中。 cuerrcode = cudaMemcpy(crv->crvData, crvData, crv->curveLength * 2 * sizeof (int), cudaMemcpyHostToDevice); if (cuerrcode != cudaSuccess) { // 拷贝错误,释放申请的内存。 cudaFree(crv->crvData); if (crvCud->smCurveIsValid == SMCURVE_VALID) cudaFree(crv->smCurveCordiXY); if (crvCud->tangentIsValid == TANGENT_VALID) cudaFree(crv->tangent); return CUDA_ERROR; } // 曲线内实际点数量等于曲线内点空间的数量。 crvCud->capacity = curveLength; // 局部变量,错误码。 int errcode; // 计算曲线坐标内的各属性值 errcode = _calCrvXY(crv, crvData, curveLength); if (errcode != NO_ERROR) { cudaFree(crv->crvData); if (crvCud->smCurveIsValid == SMCURVE_VALID) cudaFree(crv->smCurveCordiXY); if (crvCud->tangentIsValid == TANGENT_VALID) cudaFree(crv->tangent); crvCud->capacity = 0; crv->curveLength = 0; return errcode; } } // 处理完毕,退出。 return NO_ERROR; } // Host 静态方法:makeAtHost(在 Host 内存中构建数据) __host__ int CurveBasicOp::makeAtHost(Curve *crv, size_t curveLength, int *crvData) { // 检查输入曲线是否为 NULL if (crv == NULL) return NULL_POINTER; // 检查给定的曲线中坐标点数量 if (curveLength < 1) return INVALID_DATA; // 检查曲线是否为空曲线 if (crv->crvData != NULL) return UNMATCH_IMG; // 获取 crv 对应的 CurveCuda 型数据。 CurveCuda *crvCud = CURVE_CUDA(crv); // 为曲线数据在 Host 内存中申请空间,不赋值。 crv->crvData = new int[curveLength * 2]; if (crv->crvData == NULL) return OUT_OF_MEM; // 判断 smoothed 曲线是否有效,若有效则开空间。 if (crvCud->smCurveIsValid == SMCURVE_VALID) { // 为 smoothed 曲线数据开空间。 crv->smCurveCordiXY = new float[curveLength * 2]; if (crv->smCurveCordiXY == NULL) { delete [] crv->crvData; return OUT_OF_MEM; } } // 判断切线斜率是否有效,若有效则开空间。 if (crvCud->tangentIsValid == TANGENT_VALID) { // 为曲线上各点处的切线斜率数据开空间。 crv->tangent = new float[curveLength]; if (crv->tangent == NULL) { delete [] crv->crvData; if (crvCud->smCurveIsValid == SMCURVE_VALID) delete [] crv->smCurveCordiXY; return OUT_OF_MEM; } } if (crvData == NULL) { // 当输入数据为空时,设置曲线内实际点的数量为 0。 // 不为曲线赋值。 crvCud->capacity = 0; } else { // 当输入坐标数据不为空时,直接将输入坐标数据赋值给 // 曲线内的坐标数据。 // 将 crvData 内的坐标数据拷贝到 crv->crvData 中。memcpy 不返回错误, // 因此,没有进行错误检查。 memcpy(crv->crvData, crvData, curveLength * 2 * sizeof (int)); // 曲线内实际点数量等于曲线内点空间的数量。 crvCud->capacity = curveLength; // 局部变量,错误码。 int errcode; // 计算曲线坐标内的各属性值 errcode = _calCrvXY(crv, crvData, curveLength); if (errcode != NO_ERROR) { cudaFree(crv->crvData); if (crvCud->smCurveIsValid == SMCURVE_VALID) cudaFree(crv->smCurveCordiXY); if (crvCud->tangentIsValid == TANGENT_VALID) cudaFree(crv->tangent); crvCud->capacity = 0; crv->curveLength = 0; return errcode; } } // 设置曲线中的元数据 crv->curveLength = curveLength; crvCud->deviceId = -1; // 处理完毕,返回。 return NO_ERROR; } // Host 静态方法:readFromFile(从文件读取曲线) __host__ int CurveBasicOp::readFromFile(const char *filepath, Curve *outcrv) { // 这段代码仅支持 int 型尺寸为 2、4、8 三种情况。目前绝大部分的系统,采用了 // sizeof (int) == 4 的情况,少数早期的 DOS 和 Windows 系统中 sizeof (int) // == 2。 if (sizeof (int) != 2 && sizeof (int) != 4 && sizeof (int) != 8) return UNIMPLEMENT; // 检查文件路径和曲线是否为 NULL。 if (filepath == NULL || outcrv == NULL) return NULL_POINTER; // 根据输入参数的 Curve 型指针,得到对应的 CurveCuda 型数据。 CurveCuda *outcrvCud = CURVE_CUDA(outcrv); // 检查曲线所在的地址空间是否合法,如果曲线所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (outcrvCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 打开曲线文件。 ifstream crvfile(filepath, ios::in | ios::binary); if (!crvfile) return NO_FILE; // 将文件读指针挪到文件的开头处。该步骤虽然显得多余,但是却可以确保操作的正 // 确。 crvfile.seekg(0, ios::beg); // 读取文件的前四个字节,这是文件的类型头,如果类型头为 CRVT,则说明该文件 // 是曲线文件。 char typestr[5] = { '\0' }; crvfile.read(typestr, 4); if (strcmp(typestr, "CRVT") != 0) return WRONG_FILE; // 从文件中获取曲线中包含的是否闭合的标记。如果标记不为 0 且不为 1,则报错 int closed = 0; crvfile.read(reinterpret_cast<char *>(&closed), 4); if (closed != 1 && closed != 0) return WRONG_FILE; // 从文件中获取曲线中包含的起点的 x 坐标。 int startCordiX = 0; crvfile.read(reinterpret_cast<char *>(&startCordiX), 4); if (startCordiX < 0) return WRONG_FILE; // 从文件中获取曲线中包含的起点的 y 坐标。 int startCordiY = 0; crvfile.read(reinterpret_cast<char *>(&startCordiY), 4); if (startCordiY < 0) return WRONG_FILE; // 从文件中获取曲线中包含的终点的 x 坐标。 int endCordiX = 0; crvfile.read(reinterpret_cast<char *>(&endCordiX), 4); if (endCordiX < 0) return WRONG_FILE; // 从文件中获取曲线中包含的终点的 y 坐标。 int endCordiY = 0; crvfile.read(reinterpret_cast<char *>(&endCordiY), 4); if (endCordiY < 0) return WRONG_FILE; // 从文件中获取曲线中包含的最右点的 x 坐标。 int maxCordiX = 0; crvfile.read(reinterpret_cast<char *>(&maxCordiX), 4); if (maxCordiX < 0) return WRONG_FILE; // 从文件中获取曲线中包含的最左点的 x 坐标 int minCordiX = 0; crvfile.read(reinterpret_cast<char *>(&minCordiX), 4); if (minCordiX < 0) return WRONG_FILE; // 从文件中获取曲线中包含的最下点的 y 坐标 int maxCordiY = 0; crvfile.read(reinterpret_cast<char *>(&maxCordiY), 4); if (maxCordiY < 0) return WRONG_FILE; // 从文件中获取曲线中包含的最上点的 y 坐标 int minCordiY = 0; crvfile.read(reinterpret_cast<char *>(&minCordiY), 4); if (minCordiY < 0) return WRONG_FILE; // 从文件中获取曲线中包含的 x 坐标的平均值 int aveX = 0; crvfile.read(reinterpret_cast<char *>(&aveX), 4); if (aveX < 0) return WRONG_FILE; // 从文件中获取曲线中包含的 y 坐标的平均值 int aveY = 0; crvfile.read(reinterpret_cast<char *>(&aveY), 4); if (aveY < 0) return WRONG_FILE; // 从文件中获取曲线中包含的点空间的数量。如果坐标点数量小于 1,则报错。 size_t curveLength = 0; crvfile.read(reinterpret_cast<char *>(&curveLength), 4); if (curveLength < 1) return WRONG_FILE; // 从文件中获取曲线中包含的实际点的数量。 size_t capacity = 0; crvfile.read(reinterpret_cast<char *>(&capacity), 4); if (capacity < 1) return WRONG_FILE; // 为在内存中保存曲线的坐标点而申请新的数据空间。为了避免频繁的数据申请与释 // 放,如果发现原来曲线中的坐标点数量和新的数据中坐标点数量相同,且原来的数 // 据存储于 Host 内存,则会重用这段内存空间,不去重新申请内存。 int *newdata; bool reusedata; if (outcrv->crvData != NULL && outcrv->curveLength == curveLength && outcrvCud->deviceId == -1) { // 若数据可以重用,则使用原来的内存空间。 newdata = outcrv->crvData; reusedata = true; } else { // 若数据不能重用,则重新申请合适的内存空间。 newdata = new int[curveLength * 2]; reusedata = false; if (newdata == NULL) { return OUT_OF_MEM; } } // 读取坐标点数据。因为文件中存储的坐标点采用了 32 位有符号整形数,这里需要 // 根据系统中 int 型数据的尺寸采取不同的转换策略。 if (sizeof (int) == 2) { // 对于 sizeof (int) == 2 的系统通常 long 型数据为 32 位,因此需要逐个 // 读取后转成 int 型存放到数据数组中。 long tmp; for (int i = 0; i < curveLength * 2; i++) { crvfile.read(reinterpret_cast<char *>(&tmp), 4); newdata[i] = (int)tmp; } } else if (sizeof (int) == 8) { // 对于 sizeof (int) == 8 的系统通常 short 型数据为 32 位,因此需要逐个 // 读取后转成 int 型存放到数据数组中。 short tmp; for (int i = 0; i < curveLength * 2; i++) { crvfile.read(reinterpret_cast<char *>(&tmp), 4); newdata[i] = (int)tmp; } } else { // 对于 sizeof (int) == 4 的系统,不需要进行任何的转换,读取后的数据可 // 读取存放到数据数组中。 crvfile.read(reinterpret_cast<char *>(newdata), curveLength * 2 * 4); } // 当数据已经成功的读取后,释放原来数据占用的内存空间,防止内存泄漏。 if (outcrv->crvData != NULL && !reusedata) { if (outcrvCud->deviceId == -1) { // 如果原来的数据存放在 Host 内存中,则直接通过 delete 关键字释放。 delete[] outcrv->crvData; } else { // 如果原来的数据存放在 Device 内存中,则切换到相应的 Device 后,使 // 用 cudaFree 释放。 cudaSetDevice(outcrvCud->deviceId); cudaFree(outcrv->crvData); cudaSetDevice(curdevid); } } // 从文件中获取曲线中 smooth 曲线标记。 int smCurveIsValid = 0; crvfile.read(reinterpret_cast<char *>(&smCurveIsValid), 4); if (smCurveIsValid != SMCURVE_VALID && smCurveIsValid != SMCURVE_NOT_VALID) return WRONG_FILE; // 新的 smooth 曲线数据,如果标记有效,则为其开空间,否则程序结束后自动舍弃。 float *newsmdata; // 曲线的 smWindowSize 数据,如果标记有效,则读取数据。 int smWindowSize = 0; // 是否重用原曲线smooth 曲线数据标记。 bool reusesmdata = true; // 如果 smooth 曲线标记有效,则读取 smooth 曲线数据。 if (smCurveIsValid == SMCURVE_VALID) { crvfile.read(reinterpret_cast<char *>(&smWindowSize), 4); // 为在内存中保存曲线的 smooth 曲线数据而申请新的数据空间。为了避免频繁 // 的数据申请与释放,如果发现原来曲线中的坐标点数量和新的数据中坐标点数 // 量相同,且原来的数据存储于 Host 内存,则会重用这段内存空间,不去重新 // 申请内存。 if (outcrv->smCurveCordiXY != NULL && outcrv->curveLength == curveLength && outcrvCud->deviceId == -1) { // 若数据可以重用,则使用原来的内存空间。 newsmdata = outcrv->smCurveCordiXY; reusesmdata = true; } else { // 若数据不能重用,则重新申请合适的内存空间。 newsmdata = new float[curveLength * 2]; reusesmdata = false; if (newsmdata == NULL) { if (reusedata == false) delete[] newdata; return OUT_OF_MEM; } } // 读取 smooth 曲线数据。 crvfile.read(reinterpret_cast<char *>(newsmdata), curveLength * 2 * 4); // 当数据已经成功的读取后,释放原来数据占用的内存空间,防止内存泄漏。 if (outcrv->smCurveCordiXY != NULL && !reusesmdata) { if (outcrvCud->deviceId == -1) { // 如果原来的数据存放在 Host 内存中,则直接通过 delete 关键字释 // 放。 delete[] outcrv->smCurveCordiXY; } else { // 如果原来的数据存放在 Device 内存中,则切换到相应的 Device // 后,使用 cudaFree 释放。 cudaSetDevice(outcrvCud->deviceId); cudaFree(outcrv->smCurveCordiXY); cudaSetDevice(curdevid); } } } // 从文件中获取曲线中曲线斜率标记标记。 int tangentIsValid = 0; crvfile.read(reinterpret_cast<char *>(&tangentIsValid), 4); if (tangentIsValid != TANGENT_VALID && tangentIsValid != TANGENT_NOT_VALID) return WRONG_FILE; // 新的曲线斜率数据,如果标记有效,则为其开空间否则自动舍弃。 float *newtandata; // 如果曲线斜率标记有效,则读取曲线斜率数据。 if (tangentIsValid == TANGENT_VALID) { // 为在内存中保存曲线的斜率数据而申请新的数据空间。为了避免频繁的数据申 // 请与释放,如果发现原来曲线中的坐标点数量和新的数据中坐标点数量相同, // 且原来的数据存储于 Host 内存,则会重用这段内存空间,不去重新申请内 // 存。 bool reusetandata; if (outcrv->tangent != NULL && outcrv->curveLength == curveLength && outcrvCud->deviceId == -1) { // 若数据可以重用,则使用原来的内存空间。 newtandata = outcrv->tangent; reusetandata = true; } else { // 若数据不能重用,则重新申请合适的内存空间。 newtandata = new float[curveLength]; reusetandata = false; if (newtandata == NULL) { if (reusedata == false) delete[] newdata; if (smCurveIsValid == SMCURVE_VALID && reusesmdata == false) delete[] newsmdata; return OUT_OF_MEM; } } // 读取 smooth 曲线数据。 crvfile.read(reinterpret_cast<char *>(newtandata), curveLength * 4); // 当数据已经成功的读取后,释放原来数据占用的内存空间,防止内存泄漏。 if (outcrv->tangent != NULL && !reusetandata) { if (outcrvCud->deviceId == -1) { // 如果原来的数据存放在 Host 内存中,则直接通过 delete 关键字释放。 delete[] outcrv->tangent; } else { // 如果原来的数据存放在 Device 内存中,则切换到相应的 Device 后,使 // 用 cudaFree 释放。 cudaSetDevice(outcrvCud->deviceId); cudaFree(outcrv->tangent); cudaSetDevice(curdevid); } } } // 使用新的数据更新曲线的元数据。 outcrv->closed = (closed == 1 ? true : false); outcrv->startCordiX = startCordiX; outcrv->startCordiY = startCordiY; outcrv->endCordiX = endCordiX; outcrv->endCordiY = endCordiY; outcrv->maxCordiX = maxCordiX; outcrv->minCordiX = minCordiX; outcrv->maxCordiY = maxCordiY; outcrv->minCordiY = minCordiY; outcrv->aveX = aveX; outcrv->aveY = aveY; outcrv->curveLength = curveLength; outcrv->crvData = newdata; outcrv->smWindowSize = smWindowSize; outcrv->smCurveCordiXY = newsmdata; outcrv->tangent = newtandata; outcrvCud->capacity = capacity; outcrvCud->deviceId = -1; outcrvCud->smCurveIsValid = smCurveIsValid; outcrvCud->tangentIsValid = tangentIsValid; // 处理完毕,返回。 return NO_ERROR; } // Host 静态方法:writeToFile(将曲线写入文件) __host__ int CurveBasicOp::writeToFile(const char *filepath, Curve *incrv) { // 这段代码仅支持 int 型尺寸为 2、4、8 三种情况。目前绝大部分的系统,采用了 // sizeof (int) == 4 的情况,少数早期的 DOS 和 Windows 系统中 sizeof (int) // == 2。 if (sizeof (int) != 2 && sizeof (int) != 4 && sizeof (int) != 8) return UNIMPLEMENT; // 检查文件路径和曲线是否为 NULL。 if (filepath == NULL || incrv == NULL) return NULL_POINTER; // 打开需要写入的文件。 ofstream crvfile(filepath, ios::out | ios::binary); if (!crvfile) return NO_FILE; // 将曲线的数据拷贝回 Host 内存中,这样曲线就可以被下面的代码所读取,然后将 // 曲线的数据写入到磁盘中。这里需要注意的是,安排曲线的拷贝过程在文件打开之 // 后是因为,如果一旦文件打开失败,则不会改变曲线在内存中的存储状态,这可能 // 会对后续处理更加有利。 int errcode; errcode = CurveBasicOp::copyToHost(incrv); if (errcode < 0) return errcode; // 向文件中写入文件类型字符串 static char typestr[] = "CRVT"; crvfile.write(typestr, 4); // 向文件中写入曲线含有的是否闭合标记,写入之前将 bool 型转化为 int 型。 int closed = incrv->closed == true ? 1 : 0; crvfile.write(reinterpret_cast<char *>(&closed), 4); // 向文件中写入曲线含有的起点的 x 坐标。 crvfile.write(reinterpret_cast<char *>(&incrv->startCordiX), 4); // 向文件中写入曲线含有的起点的 y 坐标。 crvfile.write(reinterpret_cast<char *>(&incrv->startCordiY), 4); // 向文件中写入曲线含有的终点的 x 坐标。 crvfile.write(reinterpret_cast<char *>(&incrv->endCordiX), 4); // 向文件中写入曲线含有的终点的 y 坐标。 crvfile.write(reinterpret_cast<char *>(&incrv->endCordiY), 4); // 向文件中写入曲线含有的最右点的 x 坐标。 crvfile.write(reinterpret_cast<char *>(&incrv->maxCordiX), 4); // 向文件中写入曲线含有的最左点的 x 坐标。 crvfile.write(reinterpret_cast<char *>(&incrv->minCordiX), 4); // 向文件中写入曲线含有的最下点的 y 坐标。 crvfile.write(reinterpret_cast<char *>(&incrv->maxCordiY), 4); // 向文件中写入曲线含有的最上点的 y 坐标。 crvfile.write(reinterpret_cast<char *>(&incrv->minCordiY), 4); // 向文件中写入曲线含有的 x 坐标的平均值。 crvfile.write(reinterpret_cast<char *>(&incrv->aveX), 4); // 向文件中写入曲线含有的 y 坐标的平均值。 crvfile.write(reinterpret_cast<char *>(&incrv->aveY), 4); // 向文件中写入曲线含有的点空间的数量。 crvfile.write(reinterpret_cast<char *>(&incrv->curveLength), 4); // 获取 crv 对应的 CurveCuda 型数据。 CurveCuda *incrvCud = CURVE_CUDA(incrv); // 向文件中写入曲线含有的实际点的数量。 crvfile.write(reinterpret_cast<char *>(&incrvCud->capacity), 4); // 向文件中写入坐标数据,因为考虑到。为了保证每个整型数据占用 4 个字节,这 // 里对不同的情况进行了处理。不过针对目前绝大部分系统来说,sizeof (int) == // 4,因此绝大部分情况下,编译器会选择 else 分支。如果委托方认为系统是运行 // 在 sizeof (int) == 4 的系统之上,也可以删除前面的两个分支,直接使用最后 // 的 else 分支。 if (sizeof (int) == 2) { // 对于 sizeof (int) == 2 的系统来说,long 通常是 32 位的,因此,需要逐 // 个的将数据转换成 32 位的 long 型,然后进行处理。 long tmp; for (int i = 0; i < incrv->curveLength * 2; i++) { tmp = (long)(incrv->crvData[i]); crvfile.write(reinterpret_cast<char *>(&tmp), 4); } } else if (sizeof (int) == 8) { // 对于 sizeof (int) == 8 的系统来说,short 通常是 32 位的,因此,需要 // 逐个的将数据转换成 32 位的 short 型,然后进行处理。 short tmp; for (int i = 0; i < incrv->curveLength * 2; i++) { tmp = (short)(incrv->crvData[i]); crvfile.write(reinterpret_cast<char *>(&tmp), 4); } } else { // 如果 sizeof (int) == 4,则可以直接将数据写入磁盘,而不需要任何的转换 // 过程。 crvfile.write(reinterpret_cast<char *>(incrv->crvData), incrv->curveLength * 2 * 4); } // 向文件中写入 smooth 曲线标记。 crvfile.write(reinterpret_cast<char *>(&incrvCud->smCurveIsValid), 4); // 如果 smooth 曲线标记有效,则将数据直接写入磁盘。 if (incrvCud->smCurveIsValid == SMCURVE_VALID) { // 向文件中写入曲线的 smWindowSize 数据。 crvfile.write(reinterpret_cast<char *>(&incrv->smWindowSize), 4); // 向文件中写入 smooth 曲线数据。 crvfile.write(reinterpret_cast<char *>(incrv->smCurveCordiXY), incrv->curveLength * 2 * 4); } // 向文件中写入曲线斜率数据标记。 crvfile.write(reinterpret_cast<char *>(&incrvCud->tangentIsValid), 4); // 如果曲线斜率数据标记有效,则将数据直接写入磁盘。 if (incrvCud->tangentIsValid == TANGENT_VALID) { // 向文件中写入曲线斜率数据。 crvfile.write(reinterpret_cast<char *>(incrv->tangent), incrv->curveLength * 4); } // 处理完毕,返回。 return NO_ERROR; } // Host 静态方法:copyToCurrentDevice(将曲线拷贝到当前 Device 内存上) __host__ int CurveBasicOp::copyToCurrentDevice(Curve *crv) { // 检查曲线是否为 NULL。 if (crv == NULL) return NULL_POINTER; // 根据输入参数的 Curve 型指针,得到对应的 CurveCuda 型数据。 CurveCuda *crvCud = CURVE_CUDA(crv); // 检查曲线所在的地址空间是否合法,如果曲线所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (crvCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 如果曲线是一个不包含数据的空曲线,则报错。 if (crv->crvData == NULL || crv->curveLength == 0) return UNMATCH_IMG; // 对于不同的情况,将曲线数据拷贝到当前设备上。 if (crvCud->deviceId < 0) { // 如果曲线的数据位于 Host 内存上,则需要在当前 Device 的内存空间上申请 // 空间,然后将 Host 内存上的数据拷贝到当前 Device 上。 int *devptr; // 新的坐标数据空间,在当前 Device 上。 float *devsm; // 新的 smoothed 曲线数据空间。 float *devtangent; // 新的斜率数据空间。 cudaError_t cuerrcode; // CUDA 调用返回的错误码。 // 在当前设备上申请坐标数据的空间。 cuerrcode = cudaMalloc((void **)(&devptr), crv->curveLength * 2 * sizeof (int)); if (cuerrcode != cudaSuccess) return CUDA_ERROR; // 将原来存储在 Host 上坐标数据拷贝到当前 Device 上。 cuerrcode = cudaMemcpy(devptr, crv->crvData, crv->curveLength * 2 * sizeof (int), cudaMemcpyHostToDevice); if (cuerrcode != cudaSuccess) { cudaFree(devptr); return CUDA_ERROR; } // 释放掉原来存储于 Host 内存上的数据。 delete[] crv->crvData; // 更新模版数据,把新的在当前 Device 上申请的数据和相关数据写入模版元数 // 据中。 crv->crvData = devptr; // 判断 smoothed 曲线是否有效;如果有效,操作同曲线坐标数据。 if (crvCud->smCurveIsValid == SMCURVE_VALID) { // 申请空间。 cuerrcode = cudaMalloc((void **)(&devsm), crv->curveLength * 2 * sizeof (float)); if (cuerrcode != cudaSuccess) { cudaFree(devptr); return CUDA_ERROR; } // 拷贝数据。 cuerrcode = cudaMemcpy(devsm, crv->smCurveCordiXY, crv->curveLength * 2 * sizeof (float), cudaMemcpyHostToDevice); if (cuerrcode != cudaSuccess) { cudaFree(devptr); cudaFree(devsm); return CUDA_ERROR; } // 释放位于 host 端的原数据并将位于 device 端的数据重新赋值给曲线。 delete[] crv->smCurveCordiXY; crv->smCurveCordiXY = devsm; } // 判断曲线是斜率否有效;如果有效,操作同曲线坐标数据。 if (crvCud->tangentIsValid == TANGENT_VALID) { // 申请空间。 cuerrcode = cudaMalloc((void **)(&devtangent), crv->curveLength * 2 * sizeof (float)); if (cuerrcode != cudaSuccess) { cudaFree(devptr); if (crvCud->smCurveIsValid == SMCURVE_VALID) cudaFree(devsm); return CUDA_ERROR; } // 拷贝数据。 cuerrcode = cudaMemcpy(devtangent, crv->tangent, crv->curveLength * sizeof (float), cudaMemcpyHostToDevice); if (cuerrcode != cudaSuccess) { cudaFree(devptr); if (crvCud->smCurveIsValid == SMCURVE_VALID) cudaFree(devsm); cudaFree(devtangent); return CUDA_ERROR; } // 释放位于 host 端的原数据并将位于 device 端的数据重新赋值给曲线。 delete[] crv->tangent; crv->tangent = devtangent; } // 更新模版数据。 crvCud->deviceId = curdevid; // 操作完毕,返回。 return NO_ERROR; } else if (crvCud->deviceId != curdevid) { // 对于数据存在其他 Device 的情况,仍旧要在当前 Device 上申请数据空间, // 并从另一个 Device 上拷贝数据到新申请的当前 Device 的数据空间中。 cudaError_t cuerrcode; // CUDA 调用返回的错误码。 // 新申请的当前 Device 上的坐标数据。 int *devptr; // 新的 smoothed 曲线数据空间。 float *devsm; // 新的斜率数据空间。 float *devtangent; // 在当前 Device 上申请坐标数据空间。 cuerrcode = cudaMalloc((void **)(&devptr), crv->curveLength * 2 * sizeof (int)); if (cuerrcode != cudaSuccess) return CUDA_ERROR; // 将数据从曲线原来的存储位置拷贝到当前的 Device 上。 cuerrcode = cudaMemcpyPeer(devptr, curdevid, crv->crvData, crvCud->deviceId, crv->curveLength * 2 * sizeof (int)); if (cuerrcode != cudaSuccess) { cudaFree(devptr); return CUDA_ERROR; } // 释放掉曲线在原来的 Device 上的数据。 cudaFree(crv->crvData); // 将新的曲线数据信息写入到曲线元数据中。 crv->crvData = devptr; // 判断 smoothed 曲线是否有效;如果有效,操作同曲线坐标数据。 if (crvCud->smCurveIsValid == SMCURVE_VALID) { // 申请空间。 cuerrcode = cudaMalloc((void **)(&devsm), crv->curveLength * 2 * sizeof (float)); if (cuerrcode != cudaSuccess) { cudaFree(devptr); return CUDA_ERROR; } // 拷贝数据。 cuerrcode = cudaMemcpyPeer(devsm, curdevid, crv->smCurveCordiXY, crvCud->deviceId, crv->curveLength * 2 * sizeof (float)); if (cuerrcode != cudaSuccess) { cudaFree(devptr); cudaFree(devsm); return CUDA_ERROR; } // 释放位于 其他 Device 端的原数据并将位于 device 端的数据重新赋 // 值给曲线。 cudaFree(crv->smCurveCordiXY); crv->smCurveCordiXY = devsm; } // 判断曲线是斜率否有效;如果有效,操作同曲线坐标数据。 if (crvCud->tangentIsValid == TANGENT_VALID) { // 申请空间。 cuerrcode = cudaMalloc((void **)(&devtangent), crv->curveLength * sizeof (float)); if (cuerrcode != cudaSuccess) { cudaFree(devptr); if (crvCud->smCurveIsValid == SMCURVE_VALID) cudaFree(devsm); return CUDA_ERROR; } // 拷贝数据。 cuerrcode = cudaMemcpyPeer(devptr, curdevid, crv->crvData, crvCud->deviceId, crv->curveLength * sizeof (float)); if (cuerrcode != cudaSuccess) { cudaFree(devptr); if (crvCud->smCurveIsValid == SMCURVE_VALID) cudaFree(devsm); cudaFree(devtangent); return CUDA_ERROR; } // 释放位于 其他 Device 端的原数据并将位于 device 端的数据重新赋 // 值给曲线。 cudaFree(crv->tangent); crv->tangent = devtangent; } crvCud->deviceId = curdevid; // 操作完成,返回。 return NO_ERROR; } // 对于其他情况,即曲线数据本来就在当前 Device 上,则直接返回,不进行任何的 // 操作。 return NO_ERROR; } // Host 静态方法:copyToCurrentDevice(将曲线拷贝到当前 Device 内存上) __host__ int CurveBasicOp::copyToCurrentDevice(Curve *srccrv, Curve *dstcrv) { // 检查输入曲线是否为 NULL。 if (srccrv == NULL || dstcrv == NULL) return NULL_POINTER; // 如果输出曲线为 NULL 或者和输入曲线为同一个曲线,则转而调用对应的 // In-place 版本的函数。 if (dstcrv == NULL || dstcrv == srccrv) return copyToCurrentDevice(srccrv); // 获取 srccrv 和 dstcrv 对应的 CurveCuda 型指针。 CurveCuda *srccrvCud = CURVE_CUDA(srccrv); CurveCuda *dstcrvCud = CURVE_CUDA(dstcrv); // 用来存放旧的 dstcrv 数据,使得在拷贝操作失败时可以恢复为原来的可用的数据 // 信息,防止系统进入一个混乱的状态。 CurveCuda olddstcrvCud = *dstcrvCud; // 旧的 dstcrv 数据 bool reusedata = true; // 记录是否重用了原来的曲线数据空间。 // 该值为 ture,则原来的数据空间被重 // 用,不需要在之后释放数据,否则 // 需要在最后释放旧的空间。 // 如果源曲线是一个空曲线,则不进行任何操作,直接报错。 if (srccrv->crvData == NULL || srccrv->curveLength == 0) return INVALID_DATA; // 检查曲线所在的地址空间是否合法,如果曲线所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (srccrvCud->deviceId >= devcnt || dstcrvCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 如果目标曲线中存在有数据,则需要根据情况,若原来的数据不存储在当前的 // Device 上,或者即使存储在当前的 Device 上,但数据尺寸不匹配,则需要释放 // 掉原来申请的空间,以便重新申请合适的内存空间。此处不进行真正的释放操作, // 其目的在于当后续操作出现错误时,可以很快的恢复 dstcrv 中原来的信息,使得 // 整个系统不会处于一个混乱的状态,本函数会在最后,确定 dstcrv 被成功的更换 // 为了新的数据以后,才会真正的将原来的曲线数据释放掉。 if (dstcrvCud->deviceId != curdevid) { // 对于数据存在 Host 与其他的 Device 上,则直接释放掉原来的数据空间。 reusedata = false; dstcrv->crvData = NULL; } else if (dstcrv->curveLength != srccrv->curveLength) { // 对于数据存在于当前 Device 上,则需要检查数据的尺寸是否和源曲线相匹 // 配。如果目标曲线和源曲线的尺寸不匹配则仍旧需要释放目标曲线原来的数据 // 空间。 reusedata = false; dstcrv->crvData = NULL; } // 将目标曲线的属性更改为源曲线的属性。 dstcrv->closed = srccrv->closed; dstcrv->startCordiX = srccrv->startCordiX; dstcrv->startCordiY = srccrv->startCordiY; dstcrv->endCordiX = srccrv->endCordiX; dstcrv->endCordiY = srccrv->endCordiY; dstcrv->maxCordiX = srccrv->maxCordiX; dstcrv->minCordiX = srccrv->minCordiX; dstcrv->maxCordiY = srccrv->maxCordiY; dstcrv->minCordiY = srccrv->minCordiY; dstcrv->aveX = srccrv->aveX; dstcrv->aveY = srccrv->aveY; dstcrv->curveLength = srccrv->curveLength; // 将目标曲线的实际点数量更改为源曲线的实际点数量。 dstcrvCud->capacity = srccrvCud->capacity; // 更改目标曲线的数据存储位置为当前 Device。 dstcrvCud->deviceId = curdevid; // 更新目标曲线的标记数据。 dstcrvCud->smCurveIsValid = srccrvCud->smCurveIsValid; dstcrvCud->tangentIsValid = srccrvCud->tangentIsValid; // 如果目标曲线需要重新申请空间(因为上一步将无法重用原来内存空间的情况的 // dstcrv->crvData 都置为 NULL,因此此处通过检查 dstcrv->crvData == NULL来 // 确定是否需要重新申请空间),则在当前的 Device 内存中申请空间。 cudaError_t cuerrcode; if (dstcrv->crvData == NULL) { // 申请坐标数据的内存空间 cuerrcode = cudaMalloc((void **)(&dstcrv->crvData), srccrv->curveLength * 2 * sizeof (int)); if (cuerrcode != cudaSuccess) { // 如果空间申请操作失败,则恢复原来的目标曲线的数据,以防止系统进入 // 混乱状态。 *dstcrvCud = olddstcrvCud; return CUDA_ERROR; } // 判断 smoothed 曲线是否有效;如果有效,则申请空间。 if (dstcrvCud->smCurveIsValid == SMCURVE_VALID) { cuerrcode = cudaMalloc((void **)(&dstcrv->smCurveCordiXY), srccrv->curveLength * 2 * sizeof (float)); if (cuerrcode != cudaSuccess) { // 如果空间申请操作失败,则恢复原来的目标曲线的数据,以防止系统进入 // 混乱状态。 cudaFree(dstcrv->crvData); *dstcrvCud = olddstcrvCud; return CUDA_ERROR; } } // 判断曲线斜率数据是否有效;如果有效,则申请空间。 if (dstcrvCud->tangentIsValid == TANGENT_VALID) { cuerrcode = cudaMalloc((void **)(&dstcrv->tangent), srccrv->curveLength * sizeof (float)); if (cuerrcode != cudaSuccess) { // 如果空间申请操作失败,则恢复原来的目标曲线的数据,以防止系统进入 // 混乱状态。 cudaFree(dstcrv->crvData); if (dstcrvCud->smCurveIsValid == SMCURVE_VALID) cudaFree(dstcrv->smCurveCordiXY); *dstcrvCud = olddstcrvCud; return CUDA_ERROR; } } } // 将数据拷贝到目标曲线内。 if (srccrvCud->deviceId < 0) { // 如果源曲线存储于 Host,则通过 cudaMemcpy 将数据从 Host 拷贝到 Device // 上。 // 拷贝数据 cuerrcode = cudaMemcpy(dstcrv->crvData, srccrv->crvData, srccrv->curveLength * 2 * sizeof (int), cudaMemcpyHostToDevice); if (cuerrcode != cudaSuccess) { // 报错处理分为两个步骤:第一步,如果数据空间不是重用原来的数据空间时, // 则需要释放掉新申请的数据空间;第二步,恢复原来的目标曲线的元数据。 if (!reusedata) { cudaFree(dstcrv->crvData); if (dstcrvCud->smCurveIsValid == SMCURVE_VALID) cudaFree(dstcrv->smCurveCordiXY); if (dstcrvCud->tangentIsValid == TANGENT_VALID) cudaFree(dstcrv->tangent); } *dstcrvCud = olddstcrvCud; return CUDA_ERROR; } // 判断 smoothed 曲线是否有效;如果有效,则拷贝数据。 if (dstcrvCud->smCurveIsValid == SMCURVE_VALID) { cuerrcode = cudaMemcpy(dstcrv->smCurveCordiXY, srccrv->smCurveCordiXY, srccrv->curveLength * 2 * sizeof (float), cudaMemcpyHostToDevice); if (cuerrcode != cudaSuccess) { // 报错处理分为两个步骤:第一步,如果数据空间不是重用原来的数据空间时, // 则需要释放掉新申请的数据空间;第二步,恢复原来的目标曲线的元数据。 if (!reusedata) { cudaFree(dstcrv->crvData); cudaFree(dstcrv->smCurveCordiXY); if (dstcrvCud->tangentIsValid == TANGENT_VALID) cudaFree(dstcrv->tangent); } *dstcrvCud = olddstcrvCud; return CUDA_ERROR; } } // 判断曲线斜率数据是否有效;如果有效,则拷贝数据。 if (dstcrvCud->tangentIsValid == TANGENT_VALID) { cuerrcode = cudaMemcpy(dstcrv->tangent, srccrv->tangent, srccrv->curveLength * sizeof (float), cudaMemcpyHostToDevice); if (cuerrcode != cudaSuccess) { // 报错处理分为两个步骤:第一步,如果数据空间不是重用原来的数据空间时, // 则需要释放掉新申请的数据空间;第二步,恢复原来的目标曲线的元数据。 if (!reusedata) { cudaFree(dstcrv->crvData); if (dstcrvCud->smCurveIsValid == SMCURVE_VALID) cudaFree(dstcrv->smCurveCordiXY); cudaFree(dstcrv->tangent); } *dstcrvCud = olddstcrvCud; return CUDA_ERROR; } } } else { // 如果源曲线存储于 Device,则通过 cudaMemcpyPeer 进行设备间的数据拷 // 贝。 // 拷贝曲线数据 cuerrcode = cudaMemcpyPeer(dstcrv->crvData, curdevid, srccrv->crvData, srccrvCud->deviceId, srccrv->curveLength * 2 * sizeof (int)); if (cuerrcode != cudaSuccess) { // 报错处理分为两个步骤:第一步,如果数据空间不是重用原来的数据空间时, // 则需要释放掉新申请的数据空间;第二步,恢复原来的目标曲线的元数据。 if (!reusedata) { cudaFree(dstcrv->crvData); if (dstcrvCud->smCurveIsValid == SMCURVE_VALID) cudaFree(dstcrv->smCurveCordiXY); if (dstcrvCud->tangentIsValid == TANGENT_VALID) cudaFree(dstcrv->tangent); } *dstcrvCud = olddstcrvCud; return CUDA_ERROR; } // 判断 smoothed 曲线是否有效;如果有效,则拷贝数据。 if (dstcrvCud->smCurveIsValid == SMCURVE_VALID) { cuerrcode = cudaMemcpyPeer(dstcrv->smCurveCordiXY, curdevid, srccrv->smCurveCordiXY, srccrvCud->deviceId, srccrv->curveLength * 2 * sizeof (float)); if (cuerrcode != cudaSuccess) { // 报错处理分为两个步骤:第一步,如果数据空间不是重用原来的数据空间时, // 则需要释放掉新申请的数据空间;第二步,恢复原来的目标曲线的元数据。 if (!reusedata) { cudaFree(dstcrv->crvData); cudaFree(dstcrv->smCurveCordiXY); if (dstcrvCud->tangentIsValid == TANGENT_VALID) cudaFree(dstcrv->tangent); } *dstcrvCud = olddstcrvCud; return CUDA_ERROR; } } // 判断曲线斜率数据是否有效;如果有效,则拷贝数据。 if (dstcrvCud->tangentIsValid == TANGENT_VALID) { cuerrcode = cudaMemcpyPeer(dstcrv->tangent, curdevid, srccrv->tangent, srccrvCud->deviceId, srccrv->curveLength * sizeof (float)); if (cuerrcode != cudaSuccess) { // 报错处理分为两个步骤:第一步,如果数据空间不是重用原来的数据空间时, // 则需要释放掉新申请的数据空间;第二步,恢复原来的目标曲线的元数据。 if (!reusedata) { cudaFree(dstcrv->crvData); if (dstcrvCud->smCurveIsValid == SMCURVE_VALID) cudaFree(dstcrv->smCurveCordiXY); cudaFree(dstcrv->tangent); } *dstcrvCud = olddstcrvCud; return CUDA_ERROR; } } } // 到此步骤已经说明新的曲线数据空间已经成功的申请并拷贝了新的数据,因此,旧 // 的数据空间已毫无用处。本步骤就是释放掉旧的数据空间以防止内存泄漏。这里, // 作为拷贝的 olddstcrvCud 是局部变量,因此相应的元数据会在本函数退出后自动 // 释放,不用理会。 if (olddstcrvCud.crvMeta.crvData != NULL) { if (olddstcrvCud.deviceId < 0) { // 如果旧数据空间是 Host 内存上的,则需要无条件释放。 delete [] olddstcrvCud.crvMeta.crvData; if(olddstcrvCud.smCurveIsValid == SMCURVE_VALID) delete [] olddstcrvCud.crvMeta.smCurveCordiXY; if (olddstcrvCud.tangentIsValid == TANGENT_VALID) delete [] olddstcrvCud.crvMeta.tangent; } else if (!reusedata) { // 如果旧数据空间不是当前 Device 内存上的其他 Device 内存上的数据, // 则也需要无条件的释放。 cudaSetDevice(olddstcrvCud.deviceId); cudaFree(olddstcrvCud.crvMeta.crvData); if(olddstcrvCud.smCurveIsValid == SMCURVE_VALID) cudaFree(olddstcrvCud.crvMeta.smCurveCordiXY); if (olddstcrvCud.tangentIsValid == TANGENT_VALID) cudaFree(olddstcrvCud.crvMeta.tangent); cudaSetDevice(curdevid); } } return NO_ERROR; } // Host 静态方法:copyToHost(将曲线拷贝到 Host 内存上) __host__ int CurveBasicOp::copyToHost(Curve *crv) { // 检查曲线是否为 NULL。 if (crv == NULL) return NULL_POINTER; // 根据输入参数的 Curve 型指针,得到对应的 CurveCuda 型数据。 CurveCuda *crvCud = CURVE_CUDA(crv); // 检查曲线所在的地址空间是否合法,如果曲线所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (crvCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 如果曲线是一个不好含数据的空曲线,则报错。 if (crv->crvData == NULL || crv->curveLength == 0) return UNMATCH_IMG; // 对于不同的情况,将曲线数据拷贝到当前设备上。 if (crvCud->deviceId < 0) { // 如果曲线位于 Host 内存上,则不需要进行任何操作。 return NO_ERROR; } else { // 如果曲线的数据位于 Device 内存上,则需要在 Host 的内存空间上申请空 // 间,然后将数据拷贝到 Host 上。 cudaError_t cuerrcode; // CUDA 调用返回的错误码。 // 新的数据空间,在 Host 上。 int *hostptr; // 新的 smooth 曲线数据空间。 float *hostsm; // 新的切线斜率数据空间。 float *hosttangent; // 在 Host 上申请坐标数据空间。 hostptr = new int[crv->curveLength * 2]; if (hostptr == NULL) return OUT_OF_MEM; // 判断 smooth 曲线标记是否有效;如果有效则申请空间。 if (crvCud->smCurveIsValid == SMCURVE_VALID) { // 在 Host 上申请新的 smooth 曲线数据空间。 hostsm = new float[crv->curveLength * 2]; if (hostsm == NULL) { delete [] hostptr; return OUT_OF_MEM; } } // 判断曲线斜率标记是否有效;如果有效则申请空间。 if (crvCud->tangentIsValid ==TANGENT_VALID) { // 在 Host 上申请新的切线斜率数据空间。 hosttangent = new float[crv->curveLength]; if (hosttangent == NULL) { delete [] hostptr; if (crvCud->smCurveIsValid == SMCURVE_VALID) delete [] hostsm; return OUT_OF_MEM; } } // 将设备切换到数据所在的 Device 上。 cudaSetDevice(crvCud->deviceId); // 拷贝曲线数据 cuerrcode = cudaMemcpy(hostptr, crv->crvData, crv->curveLength * 2 * sizeof (int), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { // 如果拷贝失败,则需要释放掉刚刚申请的内存空间,以防止内存泄漏。之 // 后报错返回。 delete [] hostptr; if (crvCud->smCurveIsValid == SMCURVE_VALID) delete [] hostsm; if (crvCud->tangentIsValid == TANGENT_VALID) delete [] hosttangent; return CUDA_ERROR; } // 释放掉原来存储于 Device 内存上的曲线数据。 cudaFree(crv->crvData); // 判断 smooth 曲线标记是否有效;如果有效则拷贝数据。 if (crvCud->smCurveIsValid == SMCURVE_VALID) { cuerrcode = cudaMemcpy(hostsm, crv->smCurveCordiXY, crv->curveLength * 2 * sizeof (float), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { // 如果拷贝失败,则需要释放掉刚刚申请的内存空间,以防止内存泄漏。之 // 后报错返回。 delete [] hostptr; delete [] hostsm; if (crvCud->tangentIsValid == TANGENT_VALID) delete [] hosttangent; return CUDA_ERROR; } // 释放掉原来存储于 Device 内存上的 smooth 曲线数据。 cudaFree(crv->smCurveCordiXY); } // 判断曲线斜率标记是否有效;如果有效则拷贝数据。 if (crvCud->tangentIsValid ==TANGENT_VALID) { cuerrcode = cudaMemcpy(hosttangent, crv->tangent, crv->curveLength * sizeof (float), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { // 如果拷贝失败,则需要释放掉刚刚申请的内存空间,以防止内存泄漏。之 // 后报错返回。 delete [] hostptr; if (crvCud->smCurveIsValid == SMCURVE_VALID) delete [] hostsm; delete [] hosttangent; return CUDA_ERROR; } // 释放掉原来存储于 Device 内存上的曲线斜率数据。 cudaFree(crv->tangent); } // 对 Device 内存的操作完毕,将设备切换回当前 Device。 cudaSetDevice(curdevid); // 更新曲线数据,把新的在当前 Device 上申请的数据和相关数据写入曲线元数 // 据中。 crv->crvData = hostptr; if (crvCud->smCurveIsValid == SMCURVE_VALID) crv->smCurveCordiXY = hostsm; if (crvCud->tangentIsValid == TANGENT_VALID) crv->tangent = hosttangent; crvCud->deviceId = -1; // 操作完毕,返回。 return NO_ERROR; } // 程序永远也不会到达这个分支,因此如果到达这个分支,则说明系统紊乱。对于多 // 数编译器来说,会对此句报出不可达语句的 Warning,因此这里将其注释掉,以防 // 止不必要的 Warning。 //return UNKNOW_ERROR; } // Host 静态方法:copyToHost(将曲线拷贝到 Host 内存上) __host__ int CurveBasicOp::copyToHost(Curve *srccrv, Curve *dstcrv) { // 检查输入曲线是否为 NULL。 if (srccrv == NULL || dstcrv == NULL) return NULL_POINTER; // 如果输出曲线为 NULL 或者和输入曲线同为一个曲线,则调用对应的 In-place 版 // 本的函数。 if (dstcrv == NULL || dstcrv == srccrv) return copyToHost(srccrv); // 获取 srccrv 和 dstcrv 对应的 CurveCuda 型指针。 CurveCuda *srccrvCud = CURVE_CUDA(srccrv); CurveCuda *dstcrvCud = CURVE_CUDA(dstcrv); // 用来存放旧的 dstcrv 数据,使得在拷贝操作失败时可以恢复为原来的可用的数据 // 信息,防止系统进入一个混乱的状态。 CurveCuda olddstcrvCud = *dstcrvCud; // 旧的 dstcrv 数据 bool reusedata = true; // 记录是否重用了原来的曲线数据空间。 // 该值为 true,则原来的数据空间被重 // 用。不需要在之后释放数据,否则需要 // 释放就的空间。 // 如果源曲线是一个空曲线,则不进行任何操作,直接报错。 if (srccrv->crvData == NULL || srccrv->curveLength == 0) return INVALID_DATA; // 检查曲线所在的地址空间是否合法,如果曲线所在地址空间不属于 Host 或任何一 // 个 Device,则该函数报“数据溢出”错误,表示无法处理。 int devcnt; cudaGetDeviceCount(&devcnt); if (srccrvCud->deviceId >= devcnt || dstcrvCud->deviceId >= devcnt) return OP_OVERFLOW; // 获取当前 Device ID。 int curdevid; cudaGetDevice(&curdevid); // 如果目标曲线中存在有数据,则需要根据情况,若原来的数据不存储在 Host 上, // 或者即使存储在 Host 上,但数据尺寸不匹配,则需要释放掉原来申请的空间,以 // 便重新申请合适的内存空间。此处不进行真正的释放操作,其目的在于当后续操作 // 出现错误时,可以很快的恢复 dstcrv 中原来的信息,使得整个系统不会处于一个 // 混乱的状态,本函数会在最后,确定 dstcrv 被成功的更换为了新的数据以后,才 // 会真正的将原来的曲线数据释放掉。 if (dstcrvCud->deviceId >= 0) { // 对于数据存在于 Device 上,则亦直接释放掉原来的数据空间。 reusedata = false; dstcrv->crvData = NULL; } else if (srccrv->curveLength != dstcrv->curveLength) { // 对于数据存在于 Host 上,则需要检查数据的尺寸是否和源曲线相匹配。检查 // 的标准:源曲线和目标曲线的尺寸相同时,可重用原来的空间。 reusedata = false; dstcrv->crvData = NULL; } // 将目标曲线的属性更改为源曲线的属性。 dstcrv->closed = srccrv->closed; dstcrv->startCordiX = srccrv->startCordiX; dstcrv->startCordiY = srccrv->startCordiY; dstcrv->endCordiX = srccrv->endCordiX; dstcrv->endCordiY = srccrv->endCordiY; dstcrv->maxCordiX = srccrv->maxCordiX; dstcrv->minCordiX = srccrv->minCordiX; dstcrv->maxCordiY = srccrv->maxCordiY; dstcrv->minCordiY = srccrv->minCordiY; dstcrv->aveX = srccrv->aveX; dstcrv->aveY = srccrv->aveY; dstcrv->curveLength = srccrv->curveLength; // 将目标曲线的实际点数量更改为源曲线的实际点数量。 dstcrvCud->capacity = srccrvCud->capacity; // 更改目标曲线的数据存储位置为 Host。 dstcrvCud->deviceId = -1; // 更新目标曲线的标记数据。 dstcrvCud->smCurveIsValid = srccrvCud->smCurveIsValid; dstcrvCud->tangentIsValid = srccrvCud->tangentIsValid; // 如果目标曲线的 crvData == NULL,说明目标曲线原本要么是一个空曲线,要么目 // 标曲线原本的数据空间不合适,需要重新申请。这时,需要为目标曲线重新在 // Host 上申请一个合适的数据空间。 if (dstcrv->crvData == NULL) { // 申请曲线数据空间 dstcrv->crvData = new int[srccrv->curveLength * 2]; if (dstcrv->crvData == NULL) { // 如果申请内存的操作失败,则再报错返回前需要将旧的目标曲线数据 // 恢复到目标曲线中,以保证系统接下的操作不至于混乱。 *dstcrvCud = olddstcrvCud; return OUT_OF_MEM; } // 如果 smooth 曲线标记有效,则申请数据空间。 if (dstcrvCud->smCurveIsValid == SMCURVE_VALID) { dstcrv->smCurveCordiXY = new float[srccrv->curveLength * 2]; if (dstcrv->crvData == NULL) { // 如果申请内存的操作失败,则再报错返回前需要将旧的目标曲线数据 // 恢复到目标曲线中,以保证系统接下的操作不至于混乱。 delete [] dstcrv->crvData; *dstcrvCud = olddstcrvCud; return OUT_OF_MEM; } } // 如果曲线斜率标记有效,则申请数据空间。 if (dstcrvCud->tangentIsValid == TANGENT_VALID) { dstcrv->tangent = new float[srccrv->curveLength]; if (dstcrv->crvData == NULL) { // 如果申请内存的操作失败,则再报错返回前需要将旧的目标曲线数据 // 恢复到目标曲线中,以保证系统接下的操作不至于混乱。 delete [] dstcrv->crvData; if (dstcrvCud->smCurveIsValid == SMCURVE_VALID) delete [] dstcrv->smCurveCordiXY; *dstcrvCud = olddstcrvCud; return OUT_OF_MEM; } } } // 将坐标数据从源曲线中拷贝到目标曲线中。 if (srccrvCud->deviceId < 0) { // 如果源曲线数据存储于 Host 内存,则直接使用 C 标准支持库中的 memcpy // 完成拷贝。 // 将 srccrv 内的坐标数据拷贝到 dstcrv 中。memcpy 不返回错误,因此,没 // 有进行错误检查。 memcpy(dstcrv->crvData, srccrv->crvData, srccrv->curveLength * 2 * sizeof (int)); if (dstcrvCud->smCurveIsValid == SMCURVE_VALID) memcpy(dstcrv->smCurveCordiXY, srccrv->smCurveCordiXY, srccrv->curveLength * 2 * sizeof (float)); if (dstcrvCud->tangentIsValid == TANGENT_VALID) memcpy(dstcrv->tangent, srccrv->tangent, srccrv->curveLength * sizeof (float)); } else { // 如果源曲线数据存储于 Device 内存(无论是当前 Device 还是其他的 // Device),都是通过 CUDA 提供的函数进行拷贝。 cudaError_t cuerrcode; // CUDA 调用返回的错误码。 // 首先切换到 srccrv 坐标数据所在的 Device,以方便进行内存操作。 cudaSetDevice(srccrvCud->deviceId); // 这里使用 cudaMemcpy 将 srccrv 中处于 Device 上的数据拷贝到 dstcrv 中 // 位于 Host 的内存空间上面。 // 拷贝坐标数据 cuerrcode = cudaMemcpy(dstcrv->crvData, srccrv->crvData, srccrv->curveLength * 2 * sizeof (int), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { // 如果拷贝操作失败,则再报错退出前,需要将旧的目标曲线数据恢复到目 // 标曲线中。此外,如果数据不是重用的,则需要释放新申请的数据空间, // 防止内存泄漏。最后,还需要把 Device 切换回来,以免整个程序乱套。 if (!reusedata) { delete [] dstcrv->crvData; if (dstcrvCud->smCurveIsValid == SMCURVE_VALID) delete [] dstcrv->smCurveCordiXY; if (dstcrvCud->tangentIsValid == TANGENT_VALID) delete [] dstcrv->tangent; } *dstcrvCud = olddstcrvCud; cudaSetDevice(curdevid); return CUDA_ERROR; } // 如果 smooth 曲线标记有效,则拷贝数据。 if (dstcrvCud->smCurveIsValid == SMCURVE_VALID){ cuerrcode = cudaMemcpy(dstcrv->smCurveCordiXY, srccrv->smCurveCordiXY, srccrv->curveLength * 2 * sizeof (float), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { // 如果拷贝操作失败,则再报错退出前,需要将旧的目标曲线数据恢复到目 // 标曲线中。此外,如果数据不是重用的,则需要释放新申请的数据空间, // 防止内存泄漏。最后,还需要把 Device 切换回来,以免整个程序乱套。 if (!reusedata) { delete [] dstcrv->crvData; delete [] dstcrv->smCurveCordiXY; if (dstcrvCud->tangentIsValid == TANGENT_VALID) delete [] dstcrv->tangent; } *dstcrvCud = olddstcrvCud; cudaSetDevice(curdevid); return CUDA_ERROR; } } // 如果曲线斜率标记有效,则拷贝数据。 if (dstcrvCud->tangentIsValid == TANGENT_VALID) { cuerrcode = cudaMemcpy(dstcrv->tangent, srccrv->tangent, srccrv->curveLength * sizeof (float), cudaMemcpyDeviceToHost); if (cuerrcode != cudaSuccess) { // 如果拷贝操作失败,则再报错退出前,需要将旧的目标曲线数据恢复到目 // 标曲线中。此外,如果数据不是重用的,则需要释放新申请的数据空间, // 防止内存泄漏。最后,还需要把 Device 切换回来,以免整个程序乱套。 if (!reusedata) { delete [] dstcrv->crvData; if (dstcrvCud->smCurveIsValid == SMCURVE_VALID) delete [] dstcrv->smCurveCordiXY; delete [] dstcrv->tangent; } *dstcrvCud = olddstcrvCud; cudaSetDevice(curdevid); return CUDA_ERROR; } } // 对内存操作完毕后,将设备切换回当前的 Device。 cudaSetDevice(curdevid); } // 到此步骤已经说明新的曲线数据空间已经成功的申请并拷贝了新的数据,因此,旧 // 的数据空间已毫无用处。本步骤就是释放掉旧的数据空间以防止内存泄漏。这里, // 作为拷贝的 olddstcrvCud 是局部变量,因此相应的元数据会在本函数退出后自动 // 释放,不用理会。 if (olddstcrvCud.crvMeta.crvData != NULL) { if (olddstcrvCud.deviceId > 0) { // 如果旧数据是存储于 Device 内存上的数据,则需要无条件的释放。 cudaSetDevice(olddstcrvCud.deviceId); cudaFree(olddstcrvCud.crvMeta.crvData); if (dstcrvCud->smCurveIsValid == SMCURVE_VALID) cudaFree(olddstcrvCud.crvMeta.smCurveCordiXY); if (dstcrvCud->tangentIsValid == TANGENT_VALID) cudaFree(olddstcrvCud.crvMeta.tangent); cudaSetDevice(curdevid); } else if (!reusedata) { // 如果旧数据就在 Host 内存上,则对于 reusedata 未置位的情况进行释 // 放,因为一旦置位,旧的数据空间就被用于承载新的数据,则不能释放。 delete [] olddstcrvCud.crvMeta.crvData; if (dstcrvCud->smCurveIsValid == SMCURVE_VALID) delete [] olddstcrvCud.crvMeta.smCurveCordiXY; if (dstcrvCud->tangentIsValid == TANGENT_VALID) delete [] olddstcrvCud.crvMeta.tangent; } } // 处理完毕,退出。 return NO_ERROR; } // Host 静态方法:assignData(为曲线数据赋值) __host__ int CurveBasicOp::assignData(Curve *crv, int *data, size_t count) { // 检查输入曲线是否为 NULL if (crv == NULL) return NULL_POINTER; // 检查给定的曲线中坐标点数量 if (count < 1) return INVALID_DATA; // 局部变量,错误码。 int errcode; // 计算曲线坐标内的各属性值 errcode = _calCrvXY(crv, data, count); if (errcode != NO_ERROR) { return errcode; } // 获取 crv 对应的 CurveCuda 型数据。 CurveCuda *crvCud = CURVE_CUDA(crv); // 如果曲线内存在数据,则将释放原数据。 if (crv->crvData != NULL) { delete [] crv->crvData; } // 为新的曲线数据开空间。 crv->crvData = new int[count * 2]; // 拷贝数据。 memcpy(crv->crvData, data, count * 2 * sizeof (int)); // 将新的曲线数据信息写入到曲线元数据中。 crv->curveLength = count; crvCud->capacity = count; return NO_ERROR; } // Host 静态方法:setSmCurveValid(设置 smoothing 曲线有效) __host__ int CurveBasicOp::setSmCurveValid(Curve *crv) { // 检查输入曲线是否为 NULL if (crv == NULL) return NULL_POINTER; // 判断曲线是否已经有数据,有则返回错误; // 此函数必须执行在为曲线数据申请空间之前。 if (crv->crvData != NULL) return INVALID_DATA; // 获取 crv 对应的 CurveCuda 型指针。 CurveCuda *crvCud = CURVE_CUDA(crv); // 设置 smoothing 曲线有效。 crvCud->smCurveIsValid = SMCURVE_VALID; return NO_ERROR; } // Host 静态方法:setTangentValid(设置曲线斜率有效) __host__ int CurveBasicOp::setTangentValid(Curve *crv) { // 检查输入曲线是否为 NULL if (crv == NULL) return NULL_POINTER; // 判断曲线是否已经有数据,有则返回错误; // 此函数必须执行在为曲线数据申请空间之前。 if (crv->crvData != NULL) return INVALID_DATA; // 获取 crv 对应的 CurveCuda 型指针。 CurveCuda *crvCud = CURVE_CUDA(crv); // 设置曲线斜率有效。 crvCud->tangentIsValid = TANGENT_VALID; return NO_ERROR; }
the_stack
#include <cusparse.h> #include <torch/extension.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/Exceptions.h> #include <c10/cuda/CUDACachingAllocator.h> #include "utils.cuh" #if defined(__CUDACC__) && (CUSPARSE_VERSION >= 11000)// || (!defined(_MSC_VER) && CUSPARSE_VERSION >= 10301)) #define IS_GENERIC_AVAILABLE() 1 #else #define IS_GENERIC_AVAILABLE() 0 #endif #if IS_GENERIC_AVAILABLE() #include <library_types.h> #endif #if IS_GENERIC_AVAILABLE() template<typename scalar_t> std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> run_spspmm_cuda( const torch::Tensor &rowptrA, const torch::Tensor &colA, const torch::Tensor &valA, const torch::Tensor &rowptrB, const torch::Tensor &colB, const torch::Tensor &valB, int64_t N ) { constexpr auto cuda_value_type = std::is_same<float, scalar_t>::value ? CUDA_R_32F : CUDA_R_64F; cusparseHandle_t handle = at::cuda::getCurrentCUDASparseHandle(); cusparseSpMatDescr_t matA, matB, matC; void* dBuffer1 = NULL, *dBuffer2 = NULL; size_t bufferSize1 = 0, bufferSize2 = 0; const int64_t M = rowptrA.numel() - 1, K = rowptrB.numel() - 1; const int nnzA = valA.numel(), nnzB = valB.numel(); const scalar_t alpha = (scalar_t)1.0, beta = (scalar_t)0.0; auto& allocator = *c10::cuda::CUDACachingAllocator::get(); // Convert indices to int (could be long at input) const torch::Tensor &rowptrA_int = rowptrA.toType(torch::kInt); const torch::Tensor &colA_int = colA.toType(torch::kInt); const torch::Tensor &rowptrB_int = rowptrB.toType(torch::kInt); const torch::Tensor &colB_int = colB.toType(torch::kInt); TORCH_CUDASPARSE_CHECK(cusparseCreateCsr(&matA, M, K, nnzA, rowptrA_int.data_ptr<int>(), colA_int.data_ptr<int>(), valA.data_ptr<scalar_t>(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cuda_value_type)); TORCH_CUDASPARSE_CHECK(cusparseCreateCsr(&matB, K, N, nnzB, rowptrB_int.data_ptr<int>(), colB_int.data_ptr<int>(), valB.data_ptr<scalar_t>(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cuda_value_type)); TORCH_CUDASPARSE_CHECK(cusparseCreateCsr(&matC, M, N, 0, NULL, NULL, NULL, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cuda_value_type)); // Step 0. Opaque object creation cusparseSpGEMMDescr_t spgemmDesc; TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_createDescr(&spgemmDesc)); // Step 1. Estimate amount of work (buffer 1) TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_workEstimation(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha, matA, matB, &beta, matC, cuda_value_type, CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &bufferSize1, NULL)); auto dataPtr1 = allocator.allocate(bufferSize1); dBuffer1 = dataPtr1.get(); // Step 2. Fill buffer 1? TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_workEstimation(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha, matA, matB, &beta, matC, cuda_value_type, CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &bufferSize1, dBuffer1)); // Step 3. Estimate amount of work (buffer 2) TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_compute(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha, matA, matB, &beta, matC, cuda_value_type, CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &bufferSize2, NULL)); auto dataPtr2 = allocator.allocate(bufferSize2); dBuffer2 = dataPtr2.get(); // Step 4. compute the intermediate product of A * B TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_compute(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha, matA, matB, &beta, matC, cuda_value_type, CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &bufferSize2, dBuffer2)); // Step 5. Retrieve nnzC from matrix descriptor, allocate all of C and update pointers in matC descriptor int64_t C_num_rows, C_num_cols, nnzC; TORCH_CUDASPARSE_CHECK(cusparseSpMatGetSize(matC, &C_num_rows, &C_num_cols, &nnzC)); torch::Tensor rowptrC = torch::empty(M + 1, rowptrA_int.options()); torch::Tensor colC = torch::empty(nnzC, rowptrC.options()); torch::Tensor valC = torch::empty(nnzC, valA.options()); TORCH_CUDASPARSE_CHECK(cusparseCsrSetPointers(matC, rowptrC.data_ptr<int>(), colC.data_ptr<int>(), valC.data_ptr<scalar_t>())); // Step 6. Copy the final products to matrix C TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_copy(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, &alpha, matA, matB, &beta, matC, cuda_value_type, CUSPARSE_SPGEMM_DEFAULT, spgemmDesc)); // Finally free-up temporary buffers TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_destroyDescr(spgemmDesc)); TORCH_CUDASPARSE_CHECK(cusparseDestroySpMat(matA)); TORCH_CUDASPARSE_CHECK(cusparseDestroySpMat(matB)); TORCH_CUDASPARSE_CHECK(cusparseDestroySpMat(matC)); return std::make_tuple(rowptrC, colC, valC); } #else template<typename value_t> cusparseStatus_t cusparseXcsrgemm2_bufferSizeExt( cusparseHandle_t handle, int m, int n, int k, const value_t* alpha, const cusparseMatDescr_t descrA, int nnzA, const int* csrRowPtrA, const int* csrColIndA, const cusparseMatDescr_t descrB, int nnzB, const int* csrRowPtrB, const int* csrColIndB, const value_t* beta, const cusparseMatDescr_t descrD, int nnzD, const int* csrRowPtrD, const int* csrColIndD, csrgemm2Info_t info, size_t* pBufferSizeInBytes) { } template<> cusparseStatus_t cusparseXcsrgemm2_bufferSizeExt<float>( cusparseHandle_t handle, int m, int n, int k, const float* alpha, const cusparseMatDescr_t descrA, int nnzA, const int* csrRowPtrA, const int* csrColIndA, const cusparseMatDescr_t descrB, int nnzB, const int* csrRowPtrB, const int* csrColIndB, const float* beta, const cusparseMatDescr_t descrD, int nnzD, const int* csrRowPtrD, const int* csrColIndD, csrgemm2Info_t info, size_t* pBufferSizeInBytes) { return cusparseScsrgemm2_bufferSizeExt(handle, m, n, k, alpha, descrA, nnzA, csrRowPtrA, csrColIndA, descrB, nnzB, csrRowPtrB, csrColIndB, beta, descrD, nnzD, csrRowPtrD, csrColIndD, info, pBufferSizeInBytes); } template<> cusparseStatus_t cusparseXcsrgemm2_bufferSizeExt<double>( cusparseHandle_t handle, int m, int n, int k, const double* alpha, const cusparseMatDescr_t descrA, int nnzA, const int* csrRowPtrA, const int* csrColIndA, const cusparseMatDescr_t descrB, int nnzB, const int* csrRowPtrB, const int* csrColIndB, const double* beta, const cusparseMatDescr_t descrD, int nnzD, const int* csrRowPtrD, const int* csrColIndD, csrgemm2Info_t info, size_t* pBufferSizeInBytes) { return cusparseDcsrgemm2_bufferSizeExt(handle, m, n, k, alpha, descrA, nnzA, csrRowPtrA, csrColIndA, descrB, nnzB, csrRowPtrB, csrColIndB, beta, descrD, nnzD, csrRowPtrD, csrColIndD, info, pBufferSizeInBytes); } template<typename value_t> cusparseStatus_t cusparseXcsrgemm2( cusparseHandle_t handle, int m, int n, int k, const value_t* alpha, const cusparseMatDescr_t descrA, int nnzA, const value_t* csrValA, const int* csrRowPtrA, const int* csrColIndA, const cusparseMatDescr_t descrB, int nnzB, const value_t* csrValB, const int* csrRowPtrB, const int* csrColIndB, const value_t* beta, const cusparseMatDescr_t descrD, int nnzD, const value_t* csrValD, const int* csrRowPtrD, const int* csrColIndD, const cusparseMatDescr_t descrC, value_t* csrValC, const int* csrRowPtrC, int* csrColIndC, const csrgemm2Info_t info, void* pBuffer) { } template<> cusparseStatus_t cusparseXcsrgemm2<float>( cusparseHandle_t handle, int m, int n, int k, const float* alpha, const cusparseMatDescr_t descrA, int nnzA, const float* csrValA, const int* csrRowPtrA, const int* csrColIndA, const cusparseMatDescr_t descrB, int nnzB, const float* csrValB, const int* csrRowPtrB, const int* csrColIndB, const float* beta, const cusparseMatDescr_t descrD, int nnzD, const float* csrValD, const int* csrRowPtrD, const int* csrColIndD, const cusparseMatDescr_t descrC, float* csrValC, const int* csrRowPtrC, int* csrColIndC, const csrgemm2Info_t info, void* pBuffer) { return cusparseScsrgemm2(handle, m, n, k, alpha, descrA, nnzA, csrValA, csrRowPtrA, csrColIndA, descrB, nnzB, csrValB, csrRowPtrB, csrColIndB, beta, descrD, nnzD, csrValD, csrRowPtrD, csrColIndD, descrC, csrValC, csrRowPtrC, csrColIndC, info, pBuffer); } template<> cusparseStatus_t cusparseXcsrgemm2<double>( cusparseHandle_t handle, int m, int n, int k, const double* alpha, const cusparseMatDescr_t descrA, int nnzA, const double* csrValA, const int* csrRowPtrA, const int* csrColIndA, const cusparseMatDescr_t descrB, int nnzB, const double* csrValB, const int* csrRowPtrB, const int* csrColIndB, const double* beta, const cusparseMatDescr_t descrD, int nnzD, const double* csrValD, const int* csrRowPtrD, const int* csrColIndD, const cusparseMatDescr_t descrC, double* csrValC, const int* csrRowPtrC, int* csrColIndC, const csrgemm2Info_t info, void* pBuffer) { return cusparseDcsrgemm2(handle, m, n, k, alpha, descrA, nnzA, csrValA, csrRowPtrA, csrColIndA, descrB, nnzB, csrValB, csrRowPtrB, csrColIndB, beta, descrD, nnzD, csrValD, csrRowPtrD, csrColIndD, descrC, csrValC, csrRowPtrC, csrColIndC, info, pBuffer); } template<typename scalar_t> std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> run_spspmm_cuda( const torch::Tensor &rowptrA, const torch::Tensor &colA, const torch::Tensor &valA, const torch::Tensor &rowptrB, const torch::Tensor &colB, const torch::Tensor &valB, int64_t N ) { /* Input checks: all matrices should be in CSR format, matrix `D` is not used. * C = alpha*A*B + beta*D * A: m x k * B: k x n * D: m x n * C: m x n */ auto handle = at::cuda::getCurrentCUDASparseHandle(); /* * Summary of the necessary steps * 1. Allocate buffer for working-memory of size given by the cusparseXcsrgemm2_bufferSizeExt function * 2. Compute the row-pointers of the output C with function cusparseXcsrgemm2Nnz. This calculates the nnzC * 4. allocates csrValC and csrColIndC of nnzC elements respectively, and fill them with the * cusparseXcsrgemm2 function */ // Convert indices to int (could be long at input) const torch::Tensor &rowptrA_int = rowptrA.toType(torch::kInt); const torch::Tensor &colA_int = colA.toType(torch::kInt); const torch::Tensor &rowptrB_int = rowptrB.toType(torch::kInt); const torch::Tensor &colB_int = colB.toType(torch::kInt); const int64_t M = rowptrA_int.numel() - 1, K = rowptrB_int.numel() - 1; const int nnzA = valA.numel(); const int nnzB = valB.numel(); const scalar_t alpha = (scalar_t)1.0; torch::Tensor rowptrC = torch::empty(M + 1, rowptrA_int.options()); torch::Tensor colC, valC; int nnzC; // Creates default matrix descriptor (0-based and GENERAL matrix) cusparseMatDescr_t descr; TORCH_CUDASPARSE_CHECK(cusparseCreateMatDescr(&descr)); // Pointers (to alpha) are in host memory. TORCH_CUDASPARSE_CHECK(cusparseSetPointerMode(handle, CUSPARSE_POINTER_MODE_HOST)); // Step 1: Create an opaque structure. csrgemm2Info_t info = NULL; TORCH_CUDASPARSE_CHECK(cusparseCreateCsrgemm2Info(&info)); // Step 2: Allocate buffer for `csrgemm2Nnz` and `csrgemm2`. size_t bufferSize; TORCH_CUDASPARSE_CHECK(cusparseXcsrgemm2_bufferSizeExt<scalar_t>( handle, M, /* Number of columns in C (output) */ N, /* Number of rows in C (output) */ K, /* Number of cols in A, rows in B */ &alpha, /* Multiplier in front of mat-mul */ descr, /* Matrix descriptor for A */ nnzA, /* NNZ for A */ rowptrA_int.data_ptr<int>(), /* Row-pointer array for A */ colA_int.data_ptr<int>(), /* Column data array for A */ descr, /* Matrix descriptor for B */ nnzB, /* NNZ for B */ rowptrB_int.data_ptr<int>(), /* Row-pointer array for B */ colB_int.data_ptr<int>(), /* Column data array for B */ NULL, /* beta (multiplier summed matrix D) */ descr, /* Matrix descriptor for D */ 0, /* NNZ for D */ NULL, /* Row-pointer array for D (unused) */ NULL, /* Column data array for D */ info, &bufferSize /* Output */ )); auto& allocator = *c10::cuda::CUDACachingAllocator::get(); auto bufferDataPtr = allocator.allocate(bufferSize); auto csrGemmBuffer = bufferDataPtr.get(); // Step 3: Compute CSR row pointer. This will fill `rowptrC_data` and `nnzC` TORCH_CUDASPARSE_CHECK(cusparseXcsrgemm2Nnz( handle, M, /* Number of columns in C (output) */ N, /* Number of rows in C (output) */ K, /* Number of cols in A, rows in B */ descr, /* Matrix descriptor for A */ nnzA, /* NNZ for A */ rowptrA_int.data_ptr<int>(), /* Row-pointer array for A */ colA_int.data_ptr<int>(), /* Column data array for A */ descr, /* Matrix descriptor for B */ nnzB, /* NNZ for B */ rowptrB_int.data_ptr<int>(), /* Row-pointer array for B */ colB_int.data_ptr<int>(), /* Column data array for B */ descr, /* Matrix descriptor for D */ 0, /* NNZ for D */ NULL, /* Row-pointer array for D (unused) */ NULL, /* Column data array for D */ descr, /* Matrix descriptor for C */ rowptrC.data_ptr<int>(), /* Output: column data array for C */ &nnzC, /* Output: number of nnz entries in C */ info, bufferDataPtr.get() /* Additional workspace in GPU memory */ )); // Step 4: Compute CSR entries. colC = torch::empty(nnzC, rowptrC.options()); valC = torch::empty(nnzC, valA.options()); /* C = alpha * A @ B + beta * D (beta, D are empty) */ TORCH_CUDASPARSE_CHECK(cusparseXcsrgemm2<scalar_t>( handle, M, /* Number of columns in C (output) */ N, /* Number of rows in C (output) */ K, /* Number of cols in A, rows in B */ &alpha, /* Multiplier in front of mat-mul */ descr, /* Matrix descriptor for A */ nnzA, /* NNZ for A */ valA.data_ptr<scalar_t>(), /* Value array for A */ rowptrA_int.data_ptr<int>(), /* Row-pointer array for A */ colA_int.data_ptr<int>(), /* Column data array for A */ descr, /* Matrix descriptor for B */ nnzB, /* NNZ for B */ valB.data_ptr<scalar_t>(), /* Value array for B */ rowptrB_int.data_ptr<int>(), /* Row-pointer array for B */ colB_int.data_ptr<int>(), /* Column data array for B */ NULL, /* beta (multiplier summed matrix D) */ descr, /* Matrix descriptor for D */ 0, /* NNZ for D */ NULL, /* Value array for D */ NULL, /* Row-pointer array for D (unused) */ NULL, /* Column data array for D */ descr, /* Matrix descriptor for C */ valC.data_ptr<scalar_t>(), /* Value array for C */ rowptrC.data_ptr<int>(), /* Row-pointer array for C */ colC.data_ptr<int>(), /* Column data array for C */ info, bufferDataPtr.get() /* Additional workspace in GPU memory */ )); // Step 5: Free the opaque structure. cusparseDestroyCsrgemm2Info(info); cusparseDestroyMatDescr(descr); return std::make_tuple(rowptrC, colC, valC); } #endif std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> spspmm_cuda( const torch::Tensor &rowptrA, const torch::Tensor &colA, const torch::Tensor &valA, const torch::Tensor &rowptrB, const torch::Tensor &colB, const torch::Tensor &valB, int64_t N ) { CHECK_CUDA(rowptrA); CHECK_CUDA(colA); CHECK_CUDA(valA); CHECK_CUDA(rowptrB); CHECK_CUDA(colB); CHECK_CUDA(valB); TORCH_CHECK(rowptrA.dim() == 1); TORCH_CHECK(colA.dim() == 1); TORCH_CHECK(valA.dim() == 1); TORCH_CHECK(valA.size(0) == colA.size(0)); TORCH_CHECK(rowptrB.dim() == 1); TORCH_CHECK(colB.dim() == 1); TORCH_CHECK(valB.dim() == 1); TORCH_CHECK(valB.size(0) == colB.size(0)); TORCH_CHECK(valA.dtype() == valB.dtype(), "Expected A, B with equal dtypes but found ", valA.dtype(), ", ", valB.dtype()); std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> out; auto scalar_type = valA.scalar_type(); at::DeviceGuard g(rowptrA.device()); AT_DISPATCH_FLOATING_TYPES(valA.scalar_type(), "dispatch_spspmm", [&] { out = run_spspmm_cuda<scalar_t>(rowptrA, colA, valA, rowptrB, colB, valB, N); }); return out; }
the_stack
// Thrust Dependencies #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/unique.h> #include <thrust/remove.h> // Octree-SLAM Dependencies #include <octree_slam/timing_utils.h> #include <octree_slam/world/svo/svo.h> namespace octree_slam { namespace svo { texture<float, 3> brick_tex; surface<void, 3> brick_surf; typedef long long int octkey; __host__ void initOctree(unsigned int* &octree) { int oct[16]; for (int i = 0; i < 16; i++) { oct[i] = 0; } cudaMalloc((void**)&octree, 16*sizeof(unsigned int)); cudaMemcpy(octree, oct, 16*sizeof(unsigned int), cudaMemcpyHostToDevice); } template <class T> __device__ octkey computeKey(const T& point, glm::vec3 center, const int tree_depth, float edge_length) { //TODO: This will break if tree_depth > 10 (would require >= 33 bits) //Check for invalid if (!isfinite(point.x) || !isfinite(point.z) || !isfinite(point.z)) { return 1; } //Initialize the output value with a leading 1 to specify the depth octkey morton = 1; for (int i = 0; i < tree_depth; i++) { morton = morton << 3; //Determine which octant the point lies in bool x = point.x > center.x; bool y = point.y > center.y; bool z = point.z > center.z; //Update the code morton += (x + 2*y + 4*z); //Update the edge length edge_length /= 2.0f; //Update the center center.x += edge_length * (x ? 1 : -1); center.y += edge_length * (y ? 1 : -1); center.z += edge_length * (z ? 1 : -1); } return morton; } __device__ int depthFromKey(octkey key) { const int bval[] = { 0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4 }; int r = 0; if (key & 0x7FFF0000) { r += 16 / 1; key >>= 16 / 1; } if (key & 0x0000FF00) { r += 16 / 2; key >>= 16 / 2; } if (key & 0x000000F0) { r += 16 / 4; key >>= 16 / 4; } return (r + bval[key] - 1) / 3; } __device__ int getValueFromKey(const octkey key, const int depth) { return ((key >> 3*depth) & 0x7); } __device__ int getFirstValueAndShiftDown(octkey& key) { int depth = depthFromKey(key); int value = getValueFromKey(key, depth-1); key -= ((8 + value) << 3 * (depth - 1)); key += (1 << 3 * (depth - 1)); return value; } template <class T> __global__ void computeKeys(const T* voxels, const int numVoxels, const int max_depth, const glm::vec3 center, float edge_length, octkey* keys) { int index = blockIdx.x * blockDim.x + threadIdx.x; //Don't do anything if its out of bounds if (index >= numVoxels) { return; } //Compute the full morton code const octkey morton = computeKey(voxels[index], center, max_depth, edge_length); //Fill in the key for the output keys[index] = morton; } __global__ void splitKeys(octkey* keys, const int numKeys, unsigned int* octree, octkey* left, octkey* right) { int index = blockIdx.x * blockDim.x + threadIdx.x; //Don't do anything if its out of bounds if (index >= numKeys) { return; } octkey r_key = keys[index]; octkey l_key = -1; octkey temp_key = 1; int node_idx = 0; //Determine the existing depth from the current octree data while (r_key >= 15) { //Get the child number from the first level of the key, and shift it down int value = getFirstValueAndShiftDown(r_key); temp_key = (temp_key << 3) + value; node_idx += value; //Check the flag if (!(octree[2 * node_idx] & 0x40000000)) { l_key = temp_key; break; } //The lowest 30 bits are the address of the child nodes node_idx = octree[2 * node_idx] & 0x3FFFFFFF; } //Fill in the results left[index] = l_key; right[index] = r_key; } __global__ void rightToLeftShift(octkey* left, octkey* right, const int numVoxels) { int index = blockIdx.x * blockDim.x + threadIdx.x; //Don't do anything if its out of bounds if (index >= numVoxels) { return; } //Not valid if right is empty or if left is already invalid if (left[index] == -1 || right[index] == 1) { left[index] = -1; return; } //Get moving bits from right and remove them from it octkey r_key = right[index]; int moved_bits = getFirstValueAndShiftDown(r_key); right[index] = r_key; //If this leaves left empty (only the leading 1) then right is no longer valid if (right[index] == 1) { left[index] = -1; return; } //Add the moved bits to the left key left[index] = (left[index] << 3) + moved_bits; } struct negative { __host__ __device__ bool operator() (const int x) { return (x < 0); } }; __host__ int prepassCheckResize(octkey* keys, const int numVoxels, const int max_depth, unsigned int* octree, octkey** &d_codes, int* &code_sizes) { int num_split_nodes = 0; //Allocate left and right morton code data octkey* d_left; cudaMalloc((void**)&d_left, numVoxels*sizeof(octkey)); octkey* d_right; cudaMalloc((void**)&d_right, numVoxels*sizeof(octkey)); octkey* temp_left; cudaMalloc((void**)&temp_left, numVoxels*sizeof(octkey)); thrust::device_ptr<octkey> t_left = thrust::device_pointer_cast<octkey>(temp_left); //Split the set of existing codes and new codes (right/left) splitKeys<<<ceil(float(numVoxels)/128.0f), 128>>>(keys, numVoxels, octree, d_left, d_right); cudaDeviceSynchronize(); //Allocate memory for output code data based on max_depth d_codes = (octkey**)malloc(max_depth*sizeof(octkey*)); code_sizes = (int*)malloc(max_depth*sizeof(int)); //Loop over passes for (int i = 0; i < max_depth; i++) { //Get a copy of left codes so we can modify cudaMemcpy(thrust::raw_pointer_cast(t_left), d_left, numVoxels*sizeof(octkey), cudaMemcpyDeviceToDevice); //Get the valid codes int size = thrust::remove_if(t_left, t_left + numVoxels, negative()) - t_left; //If there are no valid codes, we're done if (size == 0) { for (int j = i; j < max_depth; j++) { code_sizes[j] = 0; } break; } //Get the unique codes thrust::sort(t_left, t_left + size); size = thrust::unique(t_left, t_left + size) - t_left; //Allocate output and copy the data into it code_sizes[i] = size; cudaMalloc((void**)&(d_codes[i]), size*sizeof(octkey)); cudaMemcpy(d_codes[i], thrust::raw_pointer_cast(t_left), size*sizeof(octkey), cudaMemcpyDeviceToDevice); //Update the total number of nodes that are being split num_split_nodes += size; //Move one morton code from each right to left for the next depth rightToLeftShift<<<ceil((float)numVoxels/128.0f), 128>>>(d_left, d_right, numVoxels); } //Cleanup cudaFree(d_left); cudaFree(d_right); cudaFree(temp_left); return num_split_nodes; } __global__ void splitNodes(const octkey* keys, int numKeys, unsigned int* octree, int num_nodes) { int index = blockIdx.x * blockDim.x + threadIdx.x; //Don't do anything if its out of bounds if (index >= numKeys) { return; } //Get the key for this thread octkey key = keys[index]; //Don't do anything if its an empty key if (key == 1) { return; } int node_idx = 0; int child_idx = 0; while (key != 1) { //Get the child number from the first three bits of the key node_idx = child_idx + getFirstValueAndShiftDown(key); //The lowest 30 bits are the address of the child nodes child_idx = octree[2 * node_idx] & 0x3FFFFFFF; } //Get a new node tile int newNode = num_nodes + 8 * index; //Point this node at the new tile, and flag it octree[2 * node_idx] = (1 << 30) + (newNode & 0x3FFFFFFF); //Initialize new child nodes to 0's for (int off = 0; off < 8; off++) { octree[2 * (newNode + off)] = 0; octree[2 * (newNode + off) + 1] = 127 << 24; } } __host__ void expandTreeAtKeys(octkey** d_keys, int* numKeys, const int depth, unsigned int* octree, int& num_nodes) { for (size_t i = 0; i < depth; i++) { if (numKeys[i] == 0) { break; } splitNodes<<<ceil((float)numKeys[i]/128.0f), 128>>>(d_keys[i], numKeys[i], octree, num_nodes); num_nodes += 8 * numKeys[i]; cudaDeviceSynchronize(); } } __global__ void fillNodes(const octkey* keys, int numKeys, const glm::vec4* values, unsigned int* octree) { int index = blockIdx.x * blockDim.x + threadIdx.x; //Don't do anything if its out of bounds if (index >= numKeys) { return; } //Get the key for this thread octkey key = keys[index]; //Check for invalid key if (key == 1) { return; } int node_idx = 0; int child_idx = 0; while (key != 1) { //Get the child number from the first three bits of the morton code node_idx = child_idx + getFirstValueAndShiftDown(key); //The lowest 30 bits are the address of the child nodes child_idx = octree[2 * node_idx] & 0x3FFFFFFF; } glm::vec4 new_value = values[index] * 256.0f; unsigned int current_value = octree[2 * node_idx + 1]; int current_alpha = current_value >> 24; int current_r = current_value & 0xFF; int current_g = (current_value >> 8) & 0xFF; int current_b = (current_value >> 16) & 0xFF; //Implement a pseudo low-pass filter with laplace smoothing float f1 = 1 - ((float)current_alpha / 256.0f); float f2 = (float)current_alpha / 256.0f; new_value.r = new_value.r * f1 + current_r * f2; new_value.g = new_value.g * f1 + current_g * f2; new_value.b = new_value.b * f1 + current_b * f2; octree[2 * node_idx + 1] = ((int)new_value.r) + ((int)new_value.g << 8) + ((int)new_value.b << 16) + (min(255, current_alpha + 2) << 24); } __global__ void fillNodes(const octkey* keys, int numKeys, const Color256* values, unsigned int* octree) { int index = blockIdx.x * blockDim.x + threadIdx.x; //Don't do anything if its out of bounds if (index >= numKeys) { return; } //Get the key for this thread octkey key = keys[index]; //Check for invalid key if (key == 1) { return; } int node_idx = 0; int child_idx = 0; while (key != 1) { //Get the child number from the first three bits of the morton code node_idx = child_idx + getFirstValueAndShiftDown(key); if (!octree[2 * node_idx] & 0x40000000) { return; } //The lowest 30 bits are the address of the child nodes child_idx = octree[2 * node_idx] & 0x3FFFFFFF; } Color256 new_value = values[index]; unsigned int current_value = octree[2 * node_idx + 1]; Color256 current; short current_alpha = current_value >> 24; current.r = current_value & 0xFF; current.g = (current_value >> 8) & 0xFF; current.b = (current_value >> 16) & 0xFF; //Implement a pseudo low-pass filter with laplace smoothing float f1 = (1 - ((float)current_alpha/256.0f)); float f2 = (float)current_alpha / 256.0f; new_value.r = new_value.r * f1 + current.r * f2; new_value.g = new_value.g * f1 + current.g * f2; new_value.b = new_value.b * f1 + current.b * f2; octree[2 * node_idx + 1] = ((int)new_value.r) + ((int)new_value.g << 8) + ((int)new_value.b << 16) + (min(255, current_alpha + 2) << 24); } __global__ void averageChildren(octkey* keys, int numKeys, unsigned int* octree) { int index = blockIdx.x * blockDim.x + threadIdx.x; //Don't do anything if its out of bounds if (index >= numKeys) { return; } //Get the key for this thread octkey key = keys[index]; ///Get the depth of the key int depth = depthFromKey(key); //Remove the max depth level from the key key = key >> 3; //Fill in back into global memory this way keys[index] = key; int node_idx = 0; int child_idx = 0; while (key != 1) { //Get the child number from the first three bits of the morton code node_idx = child_idx + getFirstValueAndShiftDown(key); //The lowest 30 bits are the address of the child nodes child_idx = octree[2 * node_idx] & 0x3FFFFFFF; } //Loop through children values and average them glm::vec4 val = glm::vec4(0.0); int num_occ = 0; for (int i = 0; i < 8; i++) { int child_val = octree[2*(child_idx+i) + 1]; if ((child_val >> 24) & 0xFF == 0) { //Don't count in the average if its not occupied continue; } val.r += (float) (child_val & 0xFF); val.g += (float) ((child_val >> 8) & 0xFF); val.b += (float) ((child_val >> 16) & 0xFF); //Assign the max albedo (avoids diluting it) val.a = max(val.a,(float) ((child_val >> 24) & 0xFF)); num_occ++; } //Average the color values if (num_occ > 0) { val.r = val.r / (float)num_occ; val.g = val.g / (float)num_occ; val.b = val.b / (float)num_occ; } //Assign value of this node to the average octree[(2 * node_idx) + 1] = (int)val.r + ((int)val.g << 8) + ((int)val.b << 16) + ((int)val.a << 24); } struct depth_is_zero { __device__ bool operator() (const int key) { return (depthFromKey(key) == 0); } }; __host__ void mipmapNodes(octkey* keys, int numKeys, unsigned int* octree) { //Get a thrust pointer for the keys thrust::device_ptr<octkey> t_keys = thrust::device_pointer_cast<octkey>(keys); //Check if any keys still have children while ((numKeys = thrust::remove_if(t_keys, t_keys + numKeys, depth_is_zero()) - t_keys) > 0) { if (numKeys > 100000) numKeys = thrust::unique(t_keys, t_keys + numKeys) - t_keys; //Average the children at the given set of keys averageChildren<<<ceil((float)numKeys / 64.0f), 64>>>(keys, numKeys, octree); cudaDeviceSynchronize(); } } /* __global__ void mipmapBricks(int* octree, int poolSize, int startNode, cudaArray* bricks, float* numBricks) { int index = blockIdx.x * blockDim.x + threadIdx.x; //Don't do anything if its out of bounds if (index >= poolSize) { return; } int node = octree[2 * (index + startNode)]; //Don't do anything if this node has no children if (!(node & 0x40000000)) { return; } //Get a new brick float newBrick = atomicAdd(numBricks, 3.0f); //Assign the brick to the node octree[(2 * (index + startNode)) + 1] = __float_as_int(newBrick); //TODO: Get all necessary neighbors //TODO: Fill the values into the brick in texture memory float val = tex3D(brick_tex, 1.1f, 1.1f, 1.1f); surf3Dwrite(5.0f, brick_surf, 1, 1, 1, cudaBoundaryModeClamp); } */ __global__ void getOccupiedChildren(const unsigned int* octree, const octkey* parents, const int num_parents, octkey* children) { int index = blockIdx.x * blockDim.x + threadIdx.x; //Don't do anything if its out of bounds if (index >= num_parents) { return; } //Get the key for the parent const octkey key = parents[index]; octkey temp_key = key; //Flag whether the node has any children int has_children = true; //Get the pointer to the children int pointer = 0; while (temp_key != 1) { //Get the next child pointer += getFirstValueAndShiftDown(temp_key); has_children = octree[2 * pointer] & 0x40000000; pointer = octree[2 * pointer] & 0x3FFFFFFF; } //Loop through the children, and if they are occupied fill its key into the output for (int i = 0; i < 8; i++) { int child_val = -1; if (has_children) { unsigned int val2 = octree[2 * (pointer + i) + 1]; if (((val2 >> 24) & 0xFF) > 127) { //TODO: Should we threshold "occupied" at something other than 0? //Add the moved bits child_val = (key << 3) + i; } } children[8 * index + i] = child_val; } } __global__ void voxelGridFromKeys(unsigned int* octree, octkey* keys, int num_voxels, glm::vec3 center, float edge_length, glm::vec4* centers, glm::vec4* colors) { //Get the index for the thread int idx = blockIdx.x*blockDim.x + threadIdx.x; //Don't do anything if out of bounds if (idx >= num_voxels) { return; } octkey key = keys[idx]; //Get the pointer to the voxel int node_idx = 0; int child_idx = 0; while (key != 1) { //Get the next child int pos = getFirstValueAndShiftDown(key); node_idx = child_idx + pos; child_idx = octree[2 * node_idx] & 0x3FFFFFFF; //Decode the value into xyz int x = pos & 0x1; int y = pos & 0x2; int z = pos & 0x4; //Half the edge length to use it for the update edge_length /= 2.0f; //Update the center center.x += edge_length * (x ? 1 : -1); center.y += edge_length * (y ? 1 : -1); center.z += edge_length * (z ? 1 : -1); } unsigned int val = octree[2 * node_idx + 1]; //Fill in the voxel centers[idx] = glm::vec4(center.x, center.y, center.z, 1.0f); colors[idx].r = ((float)(val & 0xFF) / 255.0f); colors[idx].g = ((float)((val >> 8) & 0xFF) / 255.0f); colors[idx].b = ((float)((val >> 16) & 0xFF) / 255.0f); colors[idx].a = ((float)((val >> 24) & 0xFF) / 255.0f); } extern "C" void svoFromVoxelGrid(const VoxelGrid& grid, const int max_depth, unsigned int* &octree, int& octree_size, glm::vec3 octree_center, const float edge_length, cudaArray* d_bricks) { //Initialize the octree with a base set of empty nodes if its empty if (octree_size == 0) { initOctree(octree); octree_size = 8; } //Allocate space for octree keys for each input octkey* d_keys; cudaMalloc((void**)&d_keys, grid.size*sizeof(octkey)); //Compute Keys computeKeys<glm::vec4><<<ceil((float)grid.size/256.0f), 256>>>(grid.centers, grid.size, max_depth, octree_center, edge_length, d_keys); cudaDeviceSynchronize(); //Get a thrust pointer for the keys thrust::device_ptr<octkey> t_keys = thrust::device_pointer_cast<octkey>(d_keys); thrust::sort(t_keys, t_keys + grid.size); //Determine how many new nodes are needed in the octree, and the keys to the nodes that need split in each pass octkey** d_codes = NULL; int* code_sizes = NULL; int new_nodes = prepassCheckResize(d_keys, grid.size, max_depth, octree, d_codes, code_sizes); //Create a new octree with an updated size and copy over the old data. Free up the old copy when it is no longer needed unsigned int* new_octree; cudaMalloc((void**)&new_octree, 2 * (octree_size + 8*new_nodes) * sizeof(unsigned int)); cudaMemcpy(new_octree, octree, 2 * octree_size * sizeof(unsigned int), cudaMemcpyDeviceToDevice); cudaFree(octree); octree = new_octree; //Expand the tree now that the space has been allocated expandTreeAtKeys(d_codes, code_sizes, max_depth, octree, octree_size); //Free up the codes now that we no longer need them for (size_t i = 0; i < max_depth; i++) { if (code_sizes[i] > 0) { cudaFree(d_codes[i]); } } free(d_codes); free(code_sizes); //Write voxel values into the lowest level of the svo fillNodes<<<ceil((float)grid.size / 256.0f), 256>>>(d_keys, grid.size, grid.colors, octree); cudaDeviceSynchronize(); //TODO: Handle duplicate keys //Mip-mapping (currently only without use of the brick pool) mipmapNodes(d_keys, grid.size, octree); cudaDeviceSynchronize(); //Free up the keys since they are no longer needed cudaFree(d_keys); } extern "C" void svoFromPointCloud(const glm::vec3* points, const Color256* colors, const int size, const int max_depth, unsigned int* &octree, int& octree_size, glm::vec3 octree_center, const float edge_length, cudaArray* d_bricks) { //TODO: This duplicates alot from the VoxelGrid function. Refactor the API to be more efficient //startTiming(); //Initialize the octree with a base set of empty nodes if its empty if (octree_size == 0) { initOctree(octree); octree_size = 8; } //Allocate space for octree keys for each input octkey* d_keys; cudaMalloc((void**)&d_keys, size*sizeof(octkey)); //Compute Keys computeKeys<glm::vec3><<<ceil((float)size / 256.0f), 256>>>(points, size, max_depth, octree_center, edge_length, d_keys); cudaDeviceSynchronize(); //Determine how many new nodes are needed in the octree, and the keys to the nodes that need split in each pass octkey** d_codes = NULL; int* code_sizes = NULL; int new_nodes = prepassCheckResize(d_keys, size, max_depth, octree, d_codes, code_sizes); //Create a new octree with an updated size and copy over the old data. Free up the old copy when it is no longer needed unsigned int* new_octree; cudaMalloc((void**)&new_octree, 2 * (octree_size + 8 * new_nodes) * sizeof(unsigned int)); cudaMemcpy(new_octree, octree, 2 * octree_size * sizeof(unsigned int), cudaMemcpyDeviceToDevice); cudaFree(octree); octree = new_octree; //Expand the tree now that the space has been allocated expandTreeAtKeys(d_codes, code_sizes, max_depth, octree, octree_size); //Free up the codes now that we no longer need them for (size_t i = 0; i < max_depth; i++) { if (code_sizes[i] > 0) { cudaFree(d_codes[i]); } } free(d_codes); free(code_sizes); //Write voxel values into the lowest level of the svo fillNodes<<<ceil((float)size / 256.0f), 256>>>(d_keys, size, colors, octree); cudaDeviceSynchronize(); //TODO: Handle duplicate keys //Mip-mapping (currently only without use of the brick pool) mipmapNodes(d_keys, size, octree); cudaDeviceSynchronize(); //Free up the keys since they are no longer needed cudaFree(d_keys); //std::cout << "Num nodes: " << octree_size << std::endl; //float t = stopTiming(); //std::cout << "PC insertion took: " << t << std::endl; } extern "C" void extractVoxelGridFromSVO(unsigned int* &octree, int& octree_size, const int max_depth, const glm::vec3 center, float edge_length, VoxelGrid& grid) { //startTiming(); //Loop through each pass until max_depth, and determine the number of nodes at the highest resolution, along with morton codes for them int num_voxels = 1; //Initialize a node list with empty key (only a leading 1) for the first set of children, and copy to GPU octkey initial_nodes[1] = {1}; octkey* node_list; cudaMalloc((void**)&node_list, sizeof(octkey)); cudaMemcpy(node_list, initial_nodes, sizeof(octkey), cudaMemcpyHostToDevice); for (int i = 0; i < max_depth; i++) { //Allocate space for this pass based on the number of keys (x8) octkey* new_nodes; cudaMalloc((void**)&new_nodes, 8*num_voxels*sizeof(octkey)); //Run kernel on all of the keys (x8) getOccupiedChildren<<<ceil((float)num_voxels/256.0f), 256>>>(octree, node_list, num_voxels, new_nodes); cudaDeviceSynchronize(); //Thrust remove-if to get the set of keys for the next pass { thrust::device_ptr<octkey> t_nodes = thrust::device_pointer_cast<octkey>(new_nodes); num_voxels = thrust::remove_if(t_nodes, t_nodes + 8*num_voxels, negative()) - t_nodes; } //Free up memory for the previous set of keys cudaFree(node_list); node_list = new_nodes; } //Allocate the voxel grid grid.size = num_voxels; cudaMalloc((void**)&grid.centers, num_voxels*sizeof(glm::vec4)); cudaMalloc((void**)&grid.colors, num_voxels*sizeof(glm::vec4)); //Extract the data into the grid voxelGridFromKeys<<<ceil((float)num_voxels / 256.0f), 256>>>(octree, node_list, num_voxels, center, edge_length, grid.centers, grid.colors); cudaDeviceSynchronize(); //Free up memory cudaFree(node_list); //float t = stopTiming(); //std::cout << "Vox extraction took: " << t << std::endl; } } // namespace svo } // namespace octree_slam
the_stack
#include "utils/utils.cuh" #define NUM_BANKS 32 #define LOG_NUM_BANKS 5 #define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_NUM_BANKS) // Blelloch scan template<typename data_t> __global__ void __pre_scan( data_t* dg_index, data_t* dg_input, data_t* dg_output, data_t* dg_blk_sum, int n, int blk_sz){ extern __shared__ data_t s_tmp[]; //contains blk_sz vaild element const int STRIDE = blockDim.x*gridDim.x; const int tid = threadIdx.x; const int gtid = threadIdx.x + blockIdx.x*blockDim.x; int offset = 1; int ai = tid; int bi = tid+(blk_sz/2); int bankoffsetA = CONFLICT_FREE_OFFSET(ai); int bankoffsetB = CONFLICT_FREE_OFFSET(bi); int th = (1+(n-1)/blk_sz)*blk_sz; for(int idx = gtid;; idx += STRIDE){ // step 1: load to share memory int blk_id = (2*idx)/blk_sz; int base = blk_id*blk_sz; if(base+ai>=th && base+bi>=th) break; if(dg_index){ s_tmp[ai+bankoffsetA] = (base+ai<n) ? dg_input[dg_index[base+ai]] : 0; s_tmp[bi+bankoffsetB] = (base+bi<n) ? dg_input[dg_index[base+bi]] : 0; }else{ s_tmp[ai+bankoffsetA] = (base+ai<n) ? dg_input[base+ai] : 0; s_tmp[bi+bankoffsetB] = (base+bi<n) ? dg_input[base+bi] : 0; } // step 2: up-sweep for(int d=(blk_sz>>1); d>0; d>>=1){ __syncthreads(); if(tid < d){ int ai = offset*(2*tid+1)-1; int bi = offset*(2*tid+2)-1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); s_tmp[bi] += s_tmp[ai]; } offset <<= 1; } // step 3: write the block sum and clear the last element if(tid == 0){ if(dg_blk_sum!=NULL) dg_blk_sum[blk_id] = s_tmp[blk_sz-1 + CONFLICT_FREE_OFFSET(blk_sz-1)]; s_tmp[blk_sz-1 + CONFLICT_FREE_OFFSET(blk_sz-1)] = 0; } // step 4: down-sweep for(int d=1; d<blk_sz; d<<=1){ offset >>= 1; __syncthreads(); if(tid < d){ int ai = offset*(2*tid+1)-1; int bi = offset*(2*tid+2)-1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); data_t t = s_tmp[ai]; s_tmp[ai] = s_tmp[bi]; s_tmp[bi] += t; } } // step 5: write back to global memory __syncthreads(); if(dg_output){ if(base+ai < n) dg_output[base+ai] = s_tmp[ai+bankoffsetA]; if(base+bi < n) dg_output[base+bi] = s_tmp[bi+bankoffsetB]; } } } // if blk_num < THD_NUM*2 // and the dg_blk_sum is not exact equal to THD_NUM*2 template<typename data_t> __global__ void __post_scan(data_t* dg_output, data_t* dg_blk_sum, int n, int blk_sz, int blk_num){ extern __shared__ data_t s_tmp[]; //contains blk_sz vaild element const int STRIDE = blockDim.x*gridDim.x; const int tid = threadIdx.x; const int gtid = threadIdx.x + blockIdx.x*blockDim.x; int offset = 1; int ai = tid<<1; int bi = tid<<1|1; int bankoffsetA = CONFLICT_FREE_OFFSET(ai); int bankoffsetB = CONFLICT_FREE_OFFSET(bi); // step 1: load to share memory (maybe bank conflict) s_tmp[ai+bankoffsetA] = (ai<blk_num) ? dg_blk_sum[ai] : 0; s_tmp[bi+bankoffsetB] = (bi<blk_num) ? dg_blk_sum[bi] : 0; // step 2: up-sweep for(int d=blk_sz>>1; d>0; d>>=1){ __syncthreads(); if(tid < d){ int ai = offset*(2*tid+1)-1; int bi = offset*(2*tid+2)-1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); s_tmp[bi] += s_tmp[ai]; } offset <<= 1; } // step 3: write the block sum and clear the last element if(tid == 0) s_tmp[blk_sz-1 + CONFLICT_FREE_OFFSET(blk_sz-1)] = 0; // step 4: down-sweep for(int d=1; d<blk_sz; d<<=1){ offset >>= 1; __syncthreads(); if(tid < d){ int ai = offset*(2*tid+1)-1; int bi = offset*(2*tid+2)-1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); data_t t = s_tmp[ai]; s_tmp[ai] = s_tmp[bi]; s_tmp[bi] += t; } } // step 5: write back to global memory __syncthreads(); for(int idx = gtid; (idx<<1) < n; idx += STRIDE){ int blk_id = 2*idx/blk_sz; if(idx<<1 < n) dg_output[idx<<1] += s_tmp[blk_id+CONFLICT_FREE_OFFSET(blk_id)]; if(idx<<1|1 < n) dg_output[idx<<1|1] += s_tmp[blk_id+CONFLICT_FREE_OFFSET(blk_id)]; } } template<typename data_t> __global__ void __final_scan( data_t* dg_output, data_t* dg_blk_sum, int n, int blk_sz){ const int STRIDE = blockDim.x*gridDim.x; const int gtid = threadIdx.x + blockIdx.x*blockDim.x; for(int idx = gtid; idx < n; idx += STRIDE){ int blk_id = idx/blk_sz; dg_output[idx] += dg_blk_sum[blk_id]; } } template<size_t CTA_NUM = 256, size_t THD_NUM = 256, typename data_t> __host__ void __recursive_scan(data_t* dg_input, int n, cudaStream_t &stream){ data_t *dg_blk_sum; const int blk_sz = THD_NUM << 1; const int blk_num = 1 + (n-1)/blk_sz; const int padding = CONFLICT_FREE_OFFSET(blk_sz-1); //TODO: Messag Pool H_ERR(cudaMalloc((void **)&dg_blk_sum, sizeof(data_t)*blk_num)); __pre_scan<data_t> <<<CTA_NUM, THD_NUM, (padding+blk_sz)*sizeof(data_t), stream>>> (NULL, dg_input, dg_input, dg_blk_sum, n, blk_sz); //cudaThreadSynchronize(); if(blk_num <= blk_sz){ __post_scan<data_t> <<<CTA_NUM, THD_NUM, (padding+blk_sz)*sizeof(data_t), stream>>> (dg_input, dg_blk_sum, n, blk_sz, blk_num); }else{ __recursive_scan<CTA_NUM, THD_NUM, data_t>(dg_blk_sum, blk_num, stream); __final_scan<data_t> <<<CTA_NUM, THD_NUM, 0, stream>>> (dg_input, dg_blk_sum, n, blk_sz); } H_ERR(cudaFree(dg_blk_sum)); } /**************************************************** * Usage: * scan<CTA_NUM, THD_NUM>(dg_index, dg_input, dg_output, n); * - dg_index: the index array * - dg_input: the input array * - dg_output: prefix sum array * - n: the size of dg_input * (n must be exact times of 2*THD_NUM) ****************************************************/ template<size_t CTA_NUM = 256, size_t THD_NUM = 256, typename data_t> __host__ void scan( data_t* dg_index, data_t* dg_input, data_t* dg_output, int n, cudaStream_t &stream){ data_t *dg_blk_sum; const int blk_sz = THD_NUM << 1; const int blk_num = 1 + (n-1)/blk_sz; const int padding = CONFLICT_FREE_OFFSET(blk_sz-1); if(n <= blk_sz){ __pre_scan<data_t> <<<1, THD_NUM, (padding+blk_sz)*sizeof(data_t), stream>>> (dg_index, dg_input, dg_output, NULL, n, blk_sz); }else if(n <= blk_sz*blk_sz){ //TODO: Messag Pool H_ERR(cudaMalloc((void **)&dg_blk_sum, sizeof(data_t)*blk_num)); __pre_scan<data_t> <<<CTA_NUM, THD_NUM, (padding+blk_sz)*sizeof(data_t), stream>>> (dg_index, dg_input, dg_output, dg_blk_sum, n, blk_sz); //cudaThreadSynchronize(); __post_scan<data_t> <<<CTA_NUM, THD_NUM, (padding+blk_sz)*sizeof(data_t), stream>>> (dg_output, dg_blk_sum, n, blk_sz, blk_num); //TODO: Message Pool H_ERR(cudaFree(dg_blk_sum)); }else{ //TODO: Messag Pool H_ERR(cudaMalloc((void **)&dg_blk_sum, sizeof(data_t)*blk_num)); __pre_scan<data_t> <<<CTA_NUM, THD_NUM, (padding+blk_sz)*sizeof(data_t), stream>>> (dg_index, dg_input, dg_output, dg_blk_sum, n, blk_sz); //cudaThreadSynchronize(); __recursive_scan<CTA_NUM, THD_NUM, data_t>(dg_blk_sum, blk_num, stream); //cudaThreadSynchronize(); __final_scan<data_t> <<<CTA_NUM, THD_NUM, 0, stream>>> (dg_output, dg_blk_sum, n, blk_sz); H_ERR(cudaFree(dg_blk_sum)); } } /**************************************************** * Usage: * scan<CTA_NUM, THD_NUM>(dg_input, dg_output, n); * - dg_input: the input array * - dg_output: prefix sum array * - n: the size of dg_input * (n must be exact times of 2*THD_NUM) ****************************************************/ template<size_t CTA_NUM = 256, size_t THD_NUM = 256, typename data_t> inline __host__ void scan( data_t* dg_input, data_t* dg_output, int n, cudaStream_t &stream){ scan<CTA_NUM, THD_NUM, data_t>(NULL, dg_input, dg_output, n, stream); } //////////////////////////////////////////////////////////////////////////////// // TODO: UGLY //////////////////////////////////////////////////////////////////////////////// template<size_t CTA_NUM = 256, size_t THD_NUM = 256, typename data_t> __host__ void __recursive_scan(data_t* dg_input, int n){ data_t *dg_blk_sum; const int blk_sz = THD_NUM << 1; const int blk_num = 1 + (n-1)/blk_sz; const int padding = CONFLICT_FREE_OFFSET(blk_sz-1); //TODO: Messag Pool H_ERR(cudaMalloc((void **)&dg_blk_sum, sizeof(data_t)*blk_num)); __pre_scan<data_t> <<<CTA_NUM, THD_NUM, (padding+blk_sz)*sizeof(data_t)>>> (NULL, dg_input, dg_input, dg_blk_sum, n, blk_sz); //cudaThreadSynchronize(); if(blk_num <= blk_sz){ __post_scan<data_t> <<<CTA_NUM, THD_NUM, (padding+blk_sz)*sizeof(data_t)>>> (dg_input, dg_blk_sum, n, blk_sz, blk_num); }else{ __recursive_scan<CTA_NUM, THD_NUM, data_t>(dg_blk_sum, blk_num); __final_scan<data_t> <<<CTA_NUM, THD_NUM>>> (dg_input, dg_blk_sum, n, blk_sz); } H_ERR(cudaFree(dg_blk_sum)); } template<size_t CTA_NUM = 256, size_t THD_NUM = 256, typename data_t> __host__ void scan( data_t* dg_index, data_t* dg_input, data_t* dg_output, int n){ const int blk_sz = THD_NUM << 1; const int blk_num = 1 + (n-1)/blk_sz; const int padding = CONFLICT_FREE_OFFSET(blk_sz-1); if(global_dg_blk_sum == NULL) H_ERR(cudaMalloc((void **)&global_dg_blk_sum, sizeof(data_t)*blk_num)); data_t *dg_blk_sum = global_dg_blk_sum; if(n <= blk_sz){ __pre_scan<data_t> <<<1, THD_NUM, (padding+blk_sz)*sizeof(data_t)>>> (dg_index, dg_input, dg_output, NULL, n, blk_sz); }else if(n <= blk_sz*blk_sz){ //TODO: Preallocate //H_ERR(cudaMalloc((void **)&dg_blk_sum, sizeof(data_t)*blk_num)); __pre_scan<data_t> <<<CTA_NUM, THD_NUM, (padding+blk_sz)*sizeof(data_t)>>> (dg_index, dg_input, dg_output, dg_blk_sum, n, blk_sz); //cudaThreadSynchronize(); __post_scan<data_t> <<<CTA_NUM, THD_NUM, (padding+blk_sz)*sizeof(data_t)>>> (dg_output, dg_blk_sum, n, blk_sz, blk_num); //TODO: Message Pool //H_ERR(cudaFree(dg_blk_sum)); }else{ //TODO: Messag Pool //H_ERR(cudaMalloc((void **)&dg_blk_sum, sizeof(data_t)*blk_num)); __pre_scan<data_t> <<<CTA_NUM, THD_NUM, (padding+blk_sz)*sizeof(data_t)>>> (dg_index, dg_input, dg_output, dg_blk_sum, n, blk_sz); //cudaThreadSynchronize(); __recursive_scan<CTA_NUM, THD_NUM, data_t>(dg_blk_sum, blk_num); //cudaThreadSynchronize(); __final_scan<data_t> <<<CTA_NUM, THD_NUM, 0>>> (dg_output, dg_blk_sum, n, blk_sz); //H_ERR(cudaFree(dg_blk_sum)); } } template<size_t CTA_NUM = 256, size_t THD_NUM = 256, typename data_t> inline __host__ void scan( data_t* dg_input, data_t* dg_output, int n){ scan<CTA_NUM, THD_NUM, data_t>(NULL, dg_input, dg_output, n); } #endif
the_stack
#pragma once #include <vector> #include <gunrock/util/basic_utils.h> #include <gunrock/util/error_utils.cuh> #include <gunrock/util/multithread_utils.cuh> #include <gunrock/util/multithreading.cuh> #include <gunrock/graph/gp.cuh> namespace gunrock { namespace partitioner { using PartitionStatus = unsigned int; enum : PartitionStatus { PreInit = 0x100, Inited = 0x200, Partitioned = 0x400, }; template <typename SizeT, typename ValueT> struct SortNode { public: SizeT posit; ValueT value; bool operator==(const SortNode &node) const { return (node.value == value); } bool operator<(const SortNode &node) const { return (node.value < value); } SortNode &operator=(const SortNode &rhs) { this->posit = rhs.posit; this->value = rhs.value; return *this; } }; // end of SortNode template <typename SizeT, typename ValueT> bool Compare_SortNode(SortNode<SizeT, ValueT> A, SortNode<SizeT, ValueT> B) { return (A.value < B.value); } /* * @brief ThreadSlice data structure. * * @tparam VertexId * @tparam SizeT * @tparam Value */ template <typename GraphT> struct ThreadSlice { public: GraphT *org_graph; GraphT *sub_graph; GraphT *sub_graphs; int thread_num, num_subgraphs; util::cpu_mt::CPUBarrier *cpu_barrier; CUTThread thread_Id; PartitionFlag partition_flag; cudaError_t retval; }; /** * @brief MakeSubGraph_Thread function. * * @param[in] thread_data_ * * \return CUT_THREADPROC */ template <typename GraphT, bool CSR_SWITCH> struct CsrSwitch { static CUT_THREADPROC MakeSubGraph_Thread(void *thread_data_) { CUT_THREADEND; } }; template <typename GraphT> struct CsrSwitch<GraphT, true> { static CUT_THREADPROC MakeSubGraph_Thread(void *thread_data_) { typedef typename GraphT::VertexT VertexT; typedef typename GraphT::SizeT SizeT; typedef typename GraphT::ValueT ValueT; typedef typename GraphT::CsrT CsrT; typedef typename GraphT::GpT GpT; ThreadSlice<GraphT> *thread_data = (ThreadSlice<GraphT> *)thread_data_; GraphT *org_graph = thread_data->org_graph; GraphT *sub_graph = thread_data->sub_graph; GraphT *sub_graphs = thread_data->sub_graphs; int thread_num = thread_data->thread_num; util::cpu_mt::CPUBarrier *cpu_barrier = thread_data->cpu_barrier; int num_subgraphs = thread_data->num_subgraphs; PartitionFlag flag = thread_data->partition_flag; cudaError_t &retval = thread_data->retval; auto &org_partition_table = org_graph->GpT::partition_table; auto &org_convertion_table = org_graph->GpT::convertion_table; auto &partition_table = sub_graph->GpT::partition_table; // VertexId** convertion_tables = thread_data->convertion_tables; // int** partition_tables = thread_data->partition_tables; auto &convertion_table = sub_graph->GpT::convertion_table; auto &original_vertex = sub_graph->GpT::original_vertex; auto &backward_partition = sub_graph->GpT::backward_partition; auto &backward_convertion = sub_graph->GpT::backward_convertion; auto &backward_offset = sub_graph->GpT::backward_offset; auto &out_offset = sub_graph->GpT::out_offset; auto &in_counter = sub_graph->GpT::in_counter; auto &out_counter = sub_graph->GpT::out_counter; // bool enable_backward = thread_data->enable_backward; bool keep_node_num = ((flag & Keep_Node_Num) != 0); // bool keep_order = thread_data->keep_order; SizeT num_nodes = 0, node_counter; SizeT num_edges = 0, edge_counter; util::Array1D<SizeT, int> marker; util::Array1D<SizeT, VertexT> tconvertion_table; util::Array1D<SizeT, SizeT> tout_counter; SizeT in_counter_ = 0; util::Location target = util::HOST; marker.SetName("partitioner::marker"); tconvertion_table.SetName("partitioner::tconvertion_table"); tout_counter.SetName("partitioner::tout_counter"); util::PrintMsg("Thread " + std::to_string(thread_num) + ", 1"); retval = cudaSuccess; retval = marker.Allocate(org_graph->nodes, target); if (retval) CUT_THREADEND; if (!keep_node_num) retval = tconvertion_table.Allocate(org_graph->nodes, target); if (retval) CUT_THREADEND; retval = in_counter.Allocate(num_subgraphs + 1, target); if (retval) CUT_THREADEND; retval = out_counter.Allocate(num_subgraphs + 1, target); if (retval) CUT_THREADEND; retval = out_offset.Allocate(num_subgraphs + 1, target); if (retval) CUT_THREADEND; memset(marker + 0, 0, sizeof(int) * org_graph->nodes); memset(out_counter + 0, 0, sizeof(SizeT) * (num_subgraphs + 1)); // util::PrintMsg("Thread " + std::to_string(thread_num) + ", 2"); num_nodes = 0; for (VertexT v = 0; v < org_graph->nodes; v++) if (org_partition_table[v] == thread_num) { if (!keep_node_num) { org_convertion_table[v] = out_counter[thread_num]; tconvertion_table[v] = out_counter[thread_num]; } marker[v] = 1; SizeT edge_start = org_graph->CsrT::row_offsets[v]; SizeT edge_end = org_graph->CsrT::row_offsets[v + 1]; for (SizeT edge = edge_start; edge < edge_end; edge++) { SizeT neighbour = org_graph->CsrT::column_indices[edge]; int peer = org_partition_table[neighbour]; if ((peer != thread_num) && (marker[neighbour] == 0)) { if (!keep_node_num) tconvertion_table[neighbour] = out_counter[peer]; out_counter[peer]++; marker[neighbour] = 1; num_nodes++; } } out_counter[thread_num]++; num_nodes++; num_edges += edge_end - edge_start; } retval = marker.Release(); if (retval) CUT_THREADEND; // util::PrintMsg("Thread " + std::to_string(thread_num) + ", 3"); out_offset[0] = 0; node_counter = out_counter[thread_num]; for (int peer = 0; peer < num_subgraphs; peer++) { if (peer == thread_num) continue; int peer_ = (peer < thread_num ? peer + 1 : peer); out_offset[peer_] = node_counter; node_counter += out_counter[peer]; } out_offset[num_subgraphs] = node_counter; // util::cpu_mt::PrintCPUArray<SizeT, SizeT>( // "out_offsets", out_offsets[thread_num], num_subgraphs+1, thread_num); util::cpu_mt::IncrementnWaitBarrier(cpu_barrier, thread_num); // util::PrintMsg("Thread " + std::to_string(thread_num) + ", 4"); node_counter = 0; for (int peer = 0; peer < num_subgraphs; peer++) { if (peer == thread_num) continue; int peer_ = (peer < thread_num ? peer + 1 : peer); int thread_num_ = (thread_num < peer ? thread_num + 1 : thread_num); in_counter[peer_] = sub_graphs[peer].GpT::out_offset[thread_num_ + 1] - sub_graphs[peer].GpT::out_offset[thread_num_]; node_counter += in_counter[peer_]; } in_counter[num_subgraphs] = node_counter; // util::PrintMsg("Thread " + std::to_string(thread_num) + ", 5"); if (keep_node_num) num_nodes = org_graph->nodes; retval = sub_graph->CsrT::Allocate(num_nodes, num_edges, target); if (retval) CUT_THREADEND; retval = sub_graph->GpT::Allocate(num_nodes, num_edges, num_subgraphs, flag | Sub_Graph_Mark, target); if (retval) CUT_THREADEND; if (flag & Enable_Backward) { if (keep_node_num) retval = marker.Allocate(num_subgraphs * org_graph->nodes, target); else retval = marker.Allocate(num_subgraphs * out_counter[thread_num], target); memset(marker + 0, 0, sizeof(VertexT) * marker.GetSize()); for (SizeT neighbour = 0; neighbour < org_graph->nodes; neighbour++) if (org_partition_table[neighbour] != thread_num) { SizeT edge_start = org_graph->CsrT::row_offsets[neighbour]; SizeT edge_end = org_graph->CsrT::row_offsets[neighbour + 1]; for (SizeT edge = edge_start; edge < edge_end; edge++) { VertexT v = org_graph->CsrT::column_indices[edge]; if (org_partition_table[v] != thread_num) continue; marker[org_convertion_table[v] * num_subgraphs + org_partition_table[v]] = 1 + neighbour; } } } // util::PrintMsg("Thread " + std::to_string(thread_num) + ", 6"); edge_counter = 0; for (VertexT v = 0; v < org_graph->nodes; v++) if (org_partition_table[v] == thread_num) { VertexT v_ = keep_node_num ? v : tconvertion_table[v]; sub_graph->CsrT::row_offsets[v_] = edge_counter; if (GraphT::FLAG & graph::HAS_NODE_VALUES) sub_graph->CsrT::node_values[v_] = org_graph->CsrT::node_values[v]; partition_table[v_] = 0; if (!keep_node_num) { convertion_table[v_] = v_; if (flag & Use_Original_Vertex) original_vertex[v_] = v; } SizeT edge_start = org_graph->CsrT::row_offsets[v]; SizeT edge_end = org_graph->CsrT::row_offsets[v + 1]; for (SizeT edge = edge_start; edge < edge_end; edge++) { SizeT neighbour = org_graph->CsrT::column_indices[edge]; int peer = org_partition_table[neighbour]; int peer_ = (peer < thread_num ? peer + 1 : peer); if (peer == thread_num) peer_ = 0; VertexT neighbour_ = (keep_node_num) ? neighbour : (tconvertion_table[neighbour] + out_offset[peer_]); sub_graph->CsrT::column_indices[edge_counter] = neighbour_; if (GraphT::FLAG & graph::HAS_EDGE_VALUES) sub_graph->CsrT::edge_values[edge_counter] = org_graph->CsrT::edge_values[edge]; if (peer != thread_num && !keep_node_num) { sub_graph->CsrT::row_offsets[neighbour_] = num_edges; partition_table[neighbour_] = peer_; if (!keep_node_num) { convertion_table[neighbour_] = org_convertion_table[neighbour]; if (flag & Use_Original_Vertex) original_vertex[neighbour_] = neighbour; } } edge_counter++; } } else if (keep_node_num) { sub_graph->CsrT::row_offsets[v] = edge_counter; int peer = org_partition_table[v]; int peer_ = (peer < thread_num) ? peer + 1 : peer; partition_table[v] = peer_; } sub_graph->CsrT::row_offsets[num_nodes] = num_edges; // util::PrintMsg("Thread " + std::to_string(thread_num) + ", 7"); if (flag & Enable_Backward) { in_counter_ = 0; util::cpu_mt::IncrementnWaitBarrier(cpu_barrier, thread_num); if (!keep_node_num) { for (VertexT v_ = 0; v_ < num_nodes; v_++) { backward_offset[v_] = in_counter_; if (partition_table[v_] != 0) { continue; } for (int peer = 0; peer < num_subgraphs; peer++) { if (marker[v_ * num_subgraphs + peer] == 0) continue; int peer_ = peer < thread_num ? peer + 1 : peer; int thread_num_ = thread_num < peer ? thread_num + 1 : thread_num; VertexT neighbour = marker[v_ * num_subgraphs + peer] - 1; VertexT neighbour_ = convertion_table[neighbour]; SizeT edge_start = sub_graph->CsrT::row_offsets[neighbour_]; SizeT edge_end = sub_graph->CsrT::row_offsets[neighbour_ + 1]; for (SizeT edge = edge_start; edge < edge_end; edge++) { VertexT _v = sub_graph->CsrT::column_indices[edge]; if (sub_graphs[peer].GpT::convertion_table[_v] == v_ && sub_graphs[peer].GpT::partition_table[_v] == thread_num_) { backward_convertion[in_counter_] = _v; break; } } backward_partition[in_counter_] = peer_; in_counter_++; } } backward_offset[num_nodes] = in_counter_; } else { retval = backward_partition.Release(target); if (retval) CUT_THREADEND; retval = backward_partition.Allocate(num_nodes * (num_subgraphs - 1), target); if (retval) CUT_THREADEND; for (VertexT v = 0; v < num_nodes; v++) { backward_offset[v] = v * (num_subgraphs - 1); for (int peer = 1; peer < num_subgraphs; peer++) { // backward_convertion[v * (num_subgraphs-1) + peer-1] = v; backward_partition[v * (num_subgraphs - 1) + peer - 1] = peer; } } backward_offset[num_nodes] = num_nodes * (num_subgraphs - 1); } retval = marker.Release(); if (retval) CUT_THREADEND; } // util::PrintMsg("Thread " + std::to_string(thread_num) + ", 8"); out_counter[num_subgraphs] = 0; in_counter[num_subgraphs] = 0; for (int peer = 0; peer < num_subgraphs; peer++) { int peer_ = peer < thread_num ? peer + 1 : peer; int thread_num_ = peer < thread_num ? thread_num : thread_num + 1; if (peer == thread_num) { peer_ = 0; thread_num_ = 0; } out_counter[peer_] = out_offset[peer_ + 1] - out_offset[peer_]; out_counter[num_subgraphs] += out_counter[peer_]; in_counter[peer_] = sub_graphs[peer].GpT::out_offset[thread_num_ + 1] - sub_graphs[peer].GpT::out_offset[thread_num_]; in_counter[num_subgraphs] += in_counter[peer_]; } // util::cpu_mt::PrintCPUArray<SizeT, // SizeT>("out_counter",out_counter,num_gpus+1,gpu); // util::cpu_mt::PrintCPUArray<SizeT, SizeT>("in_counter ", // in_counter,num_gpus+1,gpu); retval = tconvertion_table.Release(); if (retval) CUT_THREADEND; // util::PrintMsg("Thread " + std::to_string(thread_num) + ", 9"); retval = sub_graph->FromCsr(*sub_graph, true); if (retval) CUT_THREADEND; CUT_THREADEND; } }; /** * @brief Make subgraph function. * * \return cudaError_t object indicates the success of all CUDA calls. */ template <typename GraphT> cudaError_t MakeSubGraph(GraphT &org_graph, GraphT *&sub_graphs, util::Parameters &parameters, int num_subgraphs = 1, PartitionFlag flag = PARTITION_NONE, util::Location target = util::HOST) { cudaError_t retval = cudaSuccess; ThreadSlice<GraphT> *thread_data = new ThreadSlice<GraphT>[num_subgraphs]; CUTThread *thread_Ids = new CUTThread[num_subgraphs]; util::cpu_mt::CPUBarrier cpu_barrier = util::cpu_mt::CreateBarrier(num_subgraphs); if (sub_graphs == NULL) sub_graphs = new GraphT[num_subgraphs]; for (int i = 0; i < num_subgraphs; i++) { thread_data[i].org_graph = &org_graph; thread_data[i].sub_graph = sub_graphs + i; thread_data[i].sub_graphs = sub_graphs; thread_data[i].thread_num = i; thread_data[i].cpu_barrier = &cpu_barrier; thread_data[i].num_subgraphs = num_subgraphs; thread_data[i].partition_flag = flag; thread_data[i].thread_Id = cutStartThread( (CUT_THREADROUTINE) & (CsrSwitch<GraphT, (GraphT::FLAG & gunrock::graph::HAS_CSR) != 0>::MakeSubGraph_Thread), (void *)(thread_data + i)); thread_Ids[i] = thread_data[i].thread_Id; } cutWaitForThreads(thread_Ids, num_subgraphs); util::cpu_mt::DestoryBarrier(&cpu_barrier); delete[] thread_Ids; thread_Ids = NULL; delete[] thread_data; thread_data = NULL; return retval; } } // namespace partitioner } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#include "histogram/histogram_gmem_atomics.h" #include "histogram/histogram_smem_atomics.h" #include "histogram/histogram_cub.h" #include <cub/util_allocator.cuh> #include <test/test_util.h> using namespace cub; //--------------------------------------------------------------------- // Globals, constants, and type declarations //--------------------------------------------------------------------- // Ensure printing of CUDA runtime errors to console #define CUB_STDERR bool g_verbose = false; // Whether to display input/output to console bool g_report = false; // Whether to display a full report in CSV format CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory struct less_than_value { inline bool operator()( const std::pair<std::string, double> &a, const std::pair<std::string, double> &b) { return a.second < b.second; } }; //--------------------------------------------------------------------- // Targa (.tga) image file parsing //--------------------------------------------------------------------- /** * TGA image header info */ struct TgaHeader { char idlength; char colormaptype; char datatypecode; short colormaporigin; short colormaplength; char colormapdepth; short x_origin; short y_origin; short width; short height; char bitsperpixel; char imagedescriptor; void Parse (FILE *fptr) { idlength = fgetc(fptr); colormaptype = fgetc(fptr); datatypecode = fgetc(fptr); fread(&colormaporigin, 2, 1, fptr); fread(&colormaplength, 2, 1, fptr); colormapdepth = fgetc(fptr); fread(&x_origin, 2, 1, fptr); fread(&y_origin, 2, 1, fptr); fread(&width, 2, 1, fptr); fread(&height, 2, 1, fptr); bitsperpixel = fgetc(fptr); imagedescriptor = fgetc(fptr); } void Display (FILE *fptr) { fprintf(fptr, "ID length: %d\n", idlength); fprintf(fptr, "Color map type: %d\n", colormaptype); fprintf(fptr, "Image type: %d\n", datatypecode); fprintf(fptr, "Color map offset: %d\n", colormaporigin); fprintf(fptr, "Color map length: %d\n", colormaplength); fprintf(fptr, "Color map depth: %d\n", colormapdepth); fprintf(fptr, "X origin: %d\n", x_origin); fprintf(fptr, "Y origin: %d\n", y_origin); fprintf(fptr, "Width: %d\n", width); fprintf(fptr, "Height: %d\n", height); fprintf(fptr, "Bits per pixel: %d\n", bitsperpixel); fprintf(fptr, "Descriptor: %d\n", imagedescriptor); } }; /** * Decode image byte data into pixel */ void ParseTgaPixel(uchar4 &pixel, unsigned char *tga_pixel, int bytes) { if (bytes == 4) { pixel.x = tga_pixel[2]; pixel.y = tga_pixel[1]; pixel.z = tga_pixel[0]; pixel.w = tga_pixel[3]; } else if (bytes == 3) { pixel.x = tga_pixel[2]; pixel.y = tga_pixel[1]; pixel.z = tga_pixel[0]; pixel.w = 0; } else if (bytes == 2) { pixel.x = (tga_pixel[1] & 0x7c) << 1; pixel.y = ((tga_pixel[1] & 0x03) << 6) | ((tga_pixel[0] & 0xe0) >> 2); pixel.z = (tga_pixel[0] & 0x1f) << 3; pixel.w = (tga_pixel[1] & 0x80); } } /** * Reads a .tga image file */ void ReadTga(uchar4* &pixels, int &width, int &height, const char *filename) { // Open the file FILE *fptr; if ((fptr = fopen(filename, "rb")) == NULL) { fprintf(stderr, "File open failed\n"); exit(-1); } // Parse header TgaHeader header; header.Parse(fptr); // header.Display(stdout); width = header.width; height = header.height; // Verify compatibility if (header.datatypecode != 2 && header.datatypecode != 10) { fprintf(stderr, "Can only handle image type 2 and 10\n"); exit(-1); } if (header.bitsperpixel != 16 && header.bitsperpixel != 24 && header.bitsperpixel != 32) { fprintf(stderr, "Can only handle pixel depths of 16, 24, and 32\n"); exit(-1); } if (header.colormaptype != 0 && header.colormaptype != 1) { fprintf(stderr, "Can only handle color map types of 0 and 1\n"); exit(-1); } // Skip unnecessary header info int skip_bytes = header.idlength + (header.colormaptype * header.colormaplength); fseek(fptr, skip_bytes, SEEK_CUR); // Read the image int pixel_bytes = header.bitsperpixel / 8; // Allocate and initialize pixel data size_t image_bytes = width * height * sizeof(uchar4); if ((pixels == NULL) && ((pixels = (uchar4*) malloc(image_bytes)) == NULL)) { fprintf(stderr, "malloc of image failed\n"); exit(-1); } memset(pixels, 0, image_bytes); // Parse pixels unsigned char tga_pixel[5]; int current_pixel = 0; while (current_pixel < header.width * header.height) { if (header.datatypecode == 2) { // Uncompressed if (fread(tga_pixel, 1, pixel_bytes, fptr) != pixel_bytes) { fprintf(stderr, "Unexpected end of file at pixel %d (uncompressed)\n", current_pixel); exit(-1); } ParseTgaPixel(pixels[current_pixel], tga_pixel, pixel_bytes); current_pixel++; } else if (header.datatypecode == 10) { // Compressed if (fread(tga_pixel, 1, pixel_bytes + 1, fptr) != pixel_bytes + 1) { fprintf(stderr, "Unexpected end of file at pixel %d (compressed)\n", current_pixel); exit(-1); } int run_length = tga_pixel[0] & 0x7f; ParseTgaPixel(pixels[current_pixel], &(tga_pixel[1]), pixel_bytes); current_pixel++; if (tga_pixel[0] & 0x80) { // RLE chunk for (int i = 0; i < run_length; i++) { ParseTgaPixel(pixels[current_pixel], &(tga_pixel[1]), pixel_bytes); current_pixel++; } } else { // Normal chunk for (int i = 0; i < run_length; i++) { if (fread(tga_pixel, 1, pixel_bytes, fptr) != pixel_bytes) { fprintf(stderr, "Unexpected end of file at pixel %d (normal)\n", current_pixel); exit(-1); } ParseTgaPixel(pixels[current_pixel], tga_pixel, pixel_bytes); current_pixel++; } } } } // Close file fclose(fptr); } //--------------------------------------------------------------------- // Random image generation //--------------------------------------------------------------------- /** * Generate a random image with specified entropy */ void GenerateRandomImage(uchar4* &pixels, int width, int height, int entropy_reduction) { int num_pixels = width * height; size_t image_bytes = num_pixels * sizeof(uchar4); if ((pixels == NULL) && ((pixels = (uchar4*) malloc(image_bytes)) == NULL)) { fprintf(stderr, "malloc of image failed\n"); exit(-1); } for (int i = 0; i < num_pixels; ++i) { RandomBits(pixels[i].x, entropy_reduction); RandomBits(pixels[i].y, entropy_reduction); RandomBits(pixels[i].z, entropy_reduction); RandomBits(pixels[i].w, entropy_reduction); } } //--------------------------------------------------------------------- // Histogram verification //--------------------------------------------------------------------- // Decode float4 pixel into bins template <int NUM_BINS, int ACTIVE_CHANNELS> void DecodePixelGold(float4 pixel, unsigned int (&bins)[ACTIVE_CHANNELS]) { float* samples = reinterpret_cast<float*>(&pixel); for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL) bins[CHANNEL] = (unsigned int) (samples[CHANNEL] * float(NUM_BINS)); } // Decode uchar4 pixel into bins template <int NUM_BINS, int ACTIVE_CHANNELS> void DecodePixelGold(uchar4 pixel, unsigned int (&bins)[ACTIVE_CHANNELS]) { unsigned char* samples = reinterpret_cast<unsigned char*>(&pixel); for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL) bins[CHANNEL] = (unsigned int) (samples[CHANNEL]); } // Decode uchar1 pixel into bins template <int NUM_BINS, int ACTIVE_CHANNELS> void DecodePixelGold(uchar1 pixel, unsigned int (&bins)[ACTIVE_CHANNELS]) { bins[0] = (unsigned int) pixel.x; } // Compute reference histogram. Specialized for uchar4 template < int ACTIVE_CHANNELS, int NUM_BINS, typename PixelType> void HistogramGold(PixelType *image, int width, int height, unsigned int* hist) { memset(hist, 0, ACTIVE_CHANNELS * NUM_BINS * sizeof(unsigned int)); for (int i = 0; i < width; i++) { for (int j = 0; j < height; j++) { PixelType pixel = image[i + j * width]; unsigned int bins[ACTIVE_CHANNELS]; DecodePixelGold<NUM_BINS>(pixel, bins); for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL) { hist[(NUM_BINS * CHANNEL) + bins[CHANNEL]]++; } } } } //--------------------------------------------------------------------- // Test execution //--------------------------------------------------------------------- /** * Run a specific histogram implementation */ template < int ACTIVE_CHANNELS, int NUM_BINS, typename PixelType> void RunTest( std::vector<std::pair<std::string, double> >& timings, PixelType* d_pixels, const int width, const int height, unsigned int * d_hist, unsigned int * h_hist, int timing_iterations, const char * long_name, const char * short_name, double (*f)(PixelType*, int, int, unsigned int*, bool)) { if (!g_report) printf("%s ", long_name); fflush(stdout); // Run single test to verify (and code cache) (*f)(d_pixels, width, height, d_hist, !g_report); int compare = CompareDeviceResults(h_hist, d_hist, ACTIVE_CHANNELS * NUM_BINS, true, g_verbose); if (!g_report) printf("\t%s\n", compare ? "FAIL" : "PASS"); fflush(stdout); double elapsed_ms = 0; for (int i = 0; i < timing_iterations; i++) { elapsed_ms += (*f)(d_pixels, width, height, d_hist, false); } double avg_us = (elapsed_ms / timing_iterations) * 1000; // average in us timings.push_back(std::pair<std::string, double>(short_name, avg_us)); if (!g_report) { printf("Avg time %.3f us (%d iterations)\n", avg_us, timing_iterations); fflush(stdout); } else { printf("%.3f, ", avg_us); fflush(stdout); } AssertEquals(0, compare); } /** * Evaluate corpus of histogram implementations */ template < int NUM_CHANNELS, int ACTIVE_CHANNELS, int NUM_BINS, typename PixelType> void TestMethods( PixelType* h_pixels, int height, int width, int timing_iterations, double bandwidth_GBs) { // Copy data to gpu PixelType* d_pixels; size_t pixel_bytes = width * height * sizeof(PixelType); CubDebugExit(g_allocator.DeviceAllocate((void**) &d_pixels, pixel_bytes)); CubDebugExit(cudaMemcpy(d_pixels, h_pixels, pixel_bytes, cudaMemcpyHostToDevice)); if (g_report) printf("%.3f, ", double(pixel_bytes) / bandwidth_GBs / 1000); // Allocate results arrays on cpu/gpu unsigned int *h_hist; unsigned int *d_hist; size_t histogram_bytes = NUM_BINS * ACTIVE_CHANNELS * sizeof(unsigned int); h_hist = (unsigned int *) malloc(histogram_bytes); g_allocator.DeviceAllocate((void **) &d_hist, histogram_bytes); // Compute reference cpu histogram HistogramGold<ACTIVE_CHANNELS, NUM_BINS>(h_pixels, width, height, h_hist); // Store timings std::vector<std::pair<std::string, double> > timings; // Run experiments RunTest<ACTIVE_CHANNELS, NUM_BINS>(timings, d_pixels, width, height, d_hist, h_hist, timing_iterations, "CUB", "CUB", run_cub_histogram<NUM_CHANNELS, ACTIVE_CHANNELS, NUM_BINS, PixelType>); RunTest<ACTIVE_CHANNELS, NUM_BINS>(timings, d_pixels, width, height, d_hist, h_hist, timing_iterations, "Shared memory atomics", "smem atomics", run_smem_atomics<ACTIVE_CHANNELS, NUM_BINS, PixelType>); RunTest<ACTIVE_CHANNELS, NUM_BINS>(timings, d_pixels, width, height, d_hist, h_hist, timing_iterations, "Global memory atomics", "gmem atomics", run_gmem_atomics<ACTIVE_CHANNELS, NUM_BINS, PixelType>); // Report timings if (!g_report) { std::sort(timings.begin(), timings.end(), less_than_value()); printf("Timings (us):\n"); for (int i = 0; i < timings.size(); i++) { double bandwidth = height * width * sizeof(PixelType) / timings[i].second / 1000; printf("\t %.3f %s (%.3f GB/s, %.3f%% peak)\n", timings[i].second, timings[i].first.c_str(), bandwidth, bandwidth / bandwidth_GBs * 100); } printf("\n"); } // Free data CubDebugExit(g_allocator.DeviceFree(d_pixels)); CubDebugExit(g_allocator.DeviceFree(d_hist)); free(h_hist); } /** * Test different problem genres */ void TestGenres( uchar4* uchar4_pixels, int height, int width, int timing_iterations, double bandwidth_GBs) { int num_pixels = width * height; { if (!g_report) printf("1 channel uchar1 tests (256-bin):\n\n"); fflush(stdout); size_t image_bytes = num_pixels * sizeof(uchar1); uchar1* uchar1_pixels = (uchar1*) malloc(image_bytes); // Convert to 1-channel (averaging first 3 channels) for (int i = 0; i < num_pixels; ++i) { uchar1_pixels[i].x = (unsigned char) (((unsigned int) uchar4_pixels[i].x + (unsigned int) uchar4_pixels[i].y + (unsigned int) uchar4_pixels[i].z) / 3); } TestMethods<1, 1, 256>(uchar1_pixels, width, height, timing_iterations, bandwidth_GBs); free(uchar1_pixels); if (g_report) printf(", "); } { if (!g_report) printf("3/4 channel uchar4 tests (256-bin):\n\n"); fflush(stdout); TestMethods<4, 3, 256>(uchar4_pixels, width, height, timing_iterations, bandwidth_GBs); if (g_report) printf(", "); } { if (!g_report) printf("3/4 channel float4 tests (256-bin):\n\n"); fflush(stdout); size_t image_bytes = num_pixels * sizeof(float4); float4* float4_pixels = (float4*) malloc(image_bytes); // Convert to float4 with range [0.0, 1.0) for (int i = 0; i < num_pixels; ++i) { float4_pixels[i].x = float(uchar4_pixels[i].x) / 256; float4_pixels[i].y = float(uchar4_pixels[i].y) / 256; float4_pixels[i].z = float(uchar4_pixels[i].z) / 256; float4_pixels[i].w = float(uchar4_pixels[i].w) / 256; } TestMethods<4, 3, 256>(float4_pixels, width, height, timing_iterations, bandwidth_GBs); free(float4_pixels); if (g_report) printf("\n"); } } /** * Main */ int main(int argc, char **argv) { // Initialize command line CommandLineArgs args(argc, argv); if (args.CheckCmdLineFlag("help")) { printf( "%s " "[--device=<device-id>] " "[--v] " "[--i=<timing iterations>] " "\n\t" "--file=<.tga filename> " "\n\t" "--entropy=<-1 (0%), 0 (100%), 1 (81%), 2 (54%), 3 (34%), 4 (20%), ..." "[--height=<default: 1080>] " "[--width=<default: 1920>] " "\n", argv[0]); exit(0); } std::string filename; int timing_iterations = 100; int entropy_reduction = 0; int height = 1080; int width = 1920; g_verbose = args.CheckCmdLineFlag("v"); g_report = args.CheckCmdLineFlag("report"); args.GetCmdLineArgument("i", timing_iterations); args.GetCmdLineArgument("file", filename); args.GetCmdLineArgument("height", height); args.GetCmdLineArgument("width", width); args.GetCmdLineArgument("entropy", entropy_reduction); // Initialize device CubDebugExit(args.DeviceInit()); // Get GPU device bandwidth (GB/s) int device_ordinal, bus_width, mem_clock_khz; CubDebugExit(cudaGetDevice(&device_ordinal)); CubDebugExit(cudaDeviceGetAttribute(&bus_width, cudaDevAttrGlobalMemoryBusWidth, device_ordinal)); CubDebugExit(cudaDeviceGetAttribute(&mem_clock_khz, cudaDevAttrMemoryClockRate, device_ordinal)); double bandwidth_GBs = double(bus_width) * mem_clock_khz * 2 / 8 / 1000 / 1000; // Run test(s) uchar4* uchar4_pixels = NULL; if (!g_report) { if (!filename.empty()) { // Parse targa file ReadTga(uchar4_pixels, width, height, filename.c_str()); printf("File %s: width(%d) height(%d)\n\n", filename.c_str(), width, height); fflush(stdout); } else { // Generate image GenerateRandomImage(uchar4_pixels, width, height, entropy_reduction); printf("Random image: entropy-reduction(%d) width(%d) height(%d)\n\n", entropy_reduction, width, height); fflush(stdout); } TestGenres(uchar4_pixels, height, width, timing_iterations, bandwidth_GBs); } else { // Run test suite printf("Test, MIN, RLE CUB, SMEM, GMEM, , MIN, RLE_CUB, SMEM, GMEM, , MIN, RLE_CUB, SMEM, GMEM\n"); // Entropy reduction tests for (entropy_reduction = 0; entropy_reduction < 5; ++entropy_reduction) { printf("entropy reduction %d, ", entropy_reduction); GenerateRandomImage(uchar4_pixels, width, height, entropy_reduction); TestGenres(uchar4_pixels, height, width, timing_iterations, bandwidth_GBs); } printf("entropy reduction -1, "); GenerateRandomImage(uchar4_pixels, width, height, -1); TestGenres(uchar4_pixels, height, width, timing_iterations, bandwidth_GBs); printf("\n"); // File image tests std::vector<std::string> file_tests; file_tests.push_back("animals"); file_tests.push_back("apples"); file_tests.push_back("sunset"); file_tests.push_back("cheetah"); file_tests.push_back("nature"); file_tests.push_back("operahouse"); file_tests.push_back("austin"); file_tests.push_back("cityscape"); for (int i = 0; i < file_tests.size(); ++i) { printf("%s, ", file_tests[i].c_str()); std::string filename = std::string("histogram/benchmark/") + file_tests[i] + ".tga"; ReadTga(uchar4_pixels, width, height, filename.c_str()); TestGenres(uchar4_pixels, height, width, timing_iterations, bandwidth_GBs); } } free(uchar4_pixels); CubDebugExit(cudaDeviceSynchronize()); printf("\n\n"); return 0; }
the_stack
#include "DiffPattern.h" #include <stdio.h> #include <iostream> #include <fstream> using namespace std; // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 8 #define DEF_BLOCK_Y 8 // 结构体:PatternData( 每个 pattern 的详细数据) // 描述每个 pattern 的数据集合,包括走向角、中心坐标、和最小外接有向矩形的长边 // 及短边等 typedef struct PatternData_st { float angel; // 走向角 float ss; // 短边 float ls; // 长边 float csX; // 中心点横坐标 float csY; // 中心点纵坐标 } PatternData; // 结构体:PatternDesc( 每个 pattern 中区域的分布信息) // 描述每个 pattern 中红色区域和紫色区域的分布信息,由于 19 个 pattern 的大小 // 均在 8 x 8 的方形区域内,因此使用位图记录,r 表示红色区域,p 表示紫色区域 // r 和 p 都是单字节,字节中二进制位为 1 的区域代表被使用,0 表示未使用。 // 包围每个 pattern 的矩形的左上角顶点与 8 x 8 方形区域左上顶点对齐,坐标方向 // 与 pattern 方向一致 typedef struct PatternDesc_st { unsigned char r[8]; // 红色区域,每个字节表示一行的 8 个像素点位置 unsigned char p[8]; // 紫色区域,每个字节表示一行的 8 个像素点位置 int pCount; // 紫色区块数目 int rCount; // 红色区块数目 int xinCord; // 区域中心点相对横坐标 int yinCord; // 区域中心点相对纵坐标 } PatternDesc; // 各个 pattern 的数据 PatternData _patData[19] = { { 0, 1, 1, 0, 0 }, // pattern1 { 0.25f, 1.414f, 4.242f, 0, 0 }, // pattern2 { 0.75f, 1.414f, 4.242f, 0, 0 }, // pattern3 { 0, 1, 3, 0, 0 }, // pattern4 { 0.5f, 1, 3, 0, 0 }, // pattern5 { 0.25f, 1.414f, 2.828f, 0.5, 0.5 }, // pattern6 { 0.75f, 1.414f, 2.828f, -0.5, 0.5 }, // pattern7 { 0, 1, 2, 0.5, 0 }, // pattern8 { 0.5, 1, 2, 0, 0.5 }, // pattern9 { 0, 2, 2, 0.5, 0.5 }, // pattern10 { 0, 2, 2, 0.5, 0.5 }, // pattern11 { 0, 2, 3, 0, 0.5 }, // pattern12 {0.5, 2, 3, 0.5, 0 }, // pattern13 {0.25, 2, 5, 0, 0.5 }, // pattern14 {0.75, 2, 5, 0, 0.5 }, // pattern15 {0, 3, 3, 0, 0 }, // pattern16 {0, 3, 3, 0, 0 }, // pattern17 {0.25, 4.242, 4.242, 0, 0 }, // pattern18 {0.75, 4.242, 4.242, 0, 0} // pattern19 }; // 每个 pattern 的区域分布数据 static __device__ PatternDesc _pd[19] = { { // [0] // r[8] { 0x0E, 0x1F, 0x1B, 0x1F, 0x0E, 0x00, 0x00, 0x00 }, // p[8] { 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00 }, // pCount, rCount 1, 20, // xinCord, yinCord 2, 2 }, { // [1] // r[8] { 0x1B, 0x36, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00 }, // p[8] { 0x04, 0x08, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00 }, // pCount, rCount 3, 12, // xinCord, yinCord 3, 1 }, { // [2] // r[8] { 0x6C, 0x36, 0x1B, 0x00, 0x00, 0x00, 0x00, 0x00 }, // p[8] { 0x10, 0x08, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00 }, // pCount, rCount 3, 12, // xinCord, yinCord 3, 1 }, { // [3] // r[8] { 0x07, 0x07, 0x00, 0x07, 0x07, 0x00, 0x00, 0x00 }, // p[8] { 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00 }, // pCount, rCount 3, 12, // xinCord, yinCord 1, 2 }, { // [4] // r[8] { 0x1B, 0x1B, 0x1B, 0x00, 0x00, 0x00, 0x00, 0x00 }, // p[8] { 0x04, 0x04, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00 }, // pCount, rCount 3, 12, // xinCord, yinCord 2, 1 }, { // [5] // r[8] { 0x1B, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, // p[8] { 0x04, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, // pCount, rCount 3, 12, // xinCord, yinCord 2, 1 }, { // [6] // r[8] { 0x6C, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, // p[8] { 0x10, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, // pCount, rCount 2, 8, // xinCord, yinCord 3, 0 }, { // [7] // r[8] { 0x03, 0x03, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00 }, // p[8] { 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00 }, // pCount, rCount 2, 8, // xinCord, yinCord 0, 2 }, { // [8] // r[8] { 0x1B, 0x1B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, // p[8] { 0x04, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, // pCount, rCount 2, 8, // xinCord, yinCord 2, 0 }, { // [9] // r[8] { 0x03, 0x03, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00 }, // p[8] { 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00 }, // pCount, rCount 4, 8, // xinCord, yinCord 0, 2 }, { // [10] // r[8] { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, // p[8] { 0x0C, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, // pCount, rCount 4, 8, // xinCord, yinCord 2, 0 }, { // [11] // r[8] { 0x07, 0x07, 0x00, 0x00, 0x07, 0x07, 0x00, 0x00 }, // p[8] { 0x00, 0x00, 0x07, 0x07, 0x00, 0x00, 0x00, 0x00 }, // pCount, rCount 6, 12, // xinCord, yinCord 1, 2 }, { // [12] // r[8] { 0x33, 0x33, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00 }, // p[8] { 0x0C, 0x0C, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00 }, // pCount, rCount 6, 12, // xinCord, yinCord 2, 1 }, { // [13] // r[8] { 0x01, 0x03, 0x06, 0x04, 0x01, 0x03, 0x06, 0x04 }, // p[8] { 0x00, 0x00, 0x01, 0x03, 0x06, 0x04, 0x00, 0x00 }, // pCount, rCount 6, 12, // xinCord, yinCord 1, 3 }, { // [14] // r[8] { 0x04, 0x06, 0x03, 0x01, 0x04, 0x06, 0x03, 0x01 }, // p[8] { 0x00, 0x00, 0x04, 0x06, 0x03, 0x01, 0x00, 0x00 }, // pCount, rCount 6, 12, // xinCord, yinCord 1, 3 }, { // [15] // r[8] { 0x07, 0x07, 0x00, 0x00, 0x00, 0x07, 0x07, 0x00 }, // p[8] { 0x00, 0x00, 0x07, 0x07, 0x07, 0x00, 0x00, 0x00 }, // pCount, rCount 9, 12, // xinCord, yinCord 1, 3 }, { // [16] // r[8] { 0x63, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00 }, // p[8] { 0x1B, 0x1B, 0x1B, 0x00, 0x00, 0x00, 0x00, 0x00 }, // pCount, rCount 9, 12, // xinCord, yinCord 3, 1 }, { // [17] // r[8] { 0x38, 0x70, 0x60, 0x41, 0x03, 0x07, 0x0E, 0x00 }, // p[8] { 0x00, 0x08, 0x1C, 0x3E, 0x1C, 0x08, 0x00, 0x00 }, // pCount, rCount 13, 18, // xinCord, yinCord 3, 3 }, { // [18] // r[8] { 0x0E, 0x07, 0x03, 0x41, 0x60, 0x70, 0x38, 0x00 }, // p[8] { 0x00, 0x08, 0x1C, 0x3E, 0x1C, 0x08, 0x00, 0x00 }, // pCount, rCount 13, 18, // xinCord, yinCord 3, 3 } }; // 宏:GET_BIT // 获取某一行数据的中指定列的 2 进制位 #define GET_BIT(row, x) row == 0 ? 0 : (row >> x) % 2 // Kernel 函数:_diffPatternKer(根据各个 pattern 对检查是否局部特异) // 根据 patterns 参数指定的 pattern 序号,计算对应的 pattern 是否特异,若特异则 // 修改 patterns 数组中的值进行标记 static __global__ void // kernel 函数无返回值 _diffPatternKer( ImageCuda inimg, // 输入图像 int centerx, // 中心点横坐标 int centery, // 中心点纵坐标 int patcount, // 差分 pattern 对的数目 int *patterns, // 差分 pattern 序号数组 float *avgs // 每个 pattern 中紫色区域像素平均值 ); // Kernel 函数:_diffPatternKer(根据各个 pattern 对检查是否局部特异) static __global__ void _diffPatternKer( ImageCuda inimg, int centerX, int centerY, int patcount, int *patterns, float *avgs) { // 如果 z 序号超出差分 pattern 总对数则直接退出 if (threadIdx.z >= patcount) return; // 将 pattern 对编号与数组编号对应 int couple = patterns[threadIdx.z] - 1; if (couple < 0) return; // 申明动态共享内存 extern __shared__ unsigned short pixels[]; // 获取第 1 个 pattern 红色区域数据指针 unsigned short *red1 = &pixels[256 * threadIdx.z]; // 获取第 1 个 pattern 紫色区域数据指针 unsigned short *pur1 = &pixels[256 * threadIdx.z + 64]; // 获取第 2 个 pattern 红色区域数据指针 unsigned short *red2 = &pixels[256 * threadIdx.z + 128]; // 获取第 2 个 pattern 紫色区域数据指针 unsigned short *pur2 = &pixels[256 * threadIdx.z + 192]; // 计算对应的图像位置下标 int pidx1 = couple == 0 ? 0 : 2 * couple - 1, pidx2 = 2 * couple; int idx1 = (centerY - _pd[pidx1].yinCord) * inimg.pitchBytes + centerX + threadIdx.x - _pd[pidx1].xinCord; int idx2 = (centerY - _pd[pidx2].yinCord) * inimg.pitchBytes + centerX + threadIdx.x - _pd[pidx2].xinCord; int tid = threadIdx.y * blockDim.x + threadIdx.x; // 将对应的区域的图像数据复制到共享内存 red1[tid] = GET_BIT(_pd[pidx1].r[threadIdx.y], threadIdx.x) * inimg.imgMeta.imgData[idx1]; pur1[tid] = GET_BIT(_pd[pidx1].p[threadIdx.y], threadIdx.x) * inimg.imgMeta.imgData[idx1]; red2[tid] = GET_BIT(_pd[pidx2].r[threadIdx.y], threadIdx.x) * inimg.imgMeta.imgData[idx2]; pur2[tid] = GET_BIT(_pd[pidx2].p[threadIdx.y], threadIdx.x) * inimg.imgMeta.imgData[idx2]; __syncthreads(); // 使用 reduction 对各个区域内进行求和 if (tid < 32) { red1[tid] += red1[tid + 32]; pur1[tid] += pur1[tid + 32]; red2[tid] += red2[tid + 32]; pur2[tid] += pur2[tid + 32]; __syncthreads(); red1[tid] += red1[tid + 16]; pur1[tid] += pur1[tid + 16]; red2[tid] += red2[tid + 16]; pur2[tid] += pur2[tid + 16]; __syncthreads(); red1[tid] += red1[tid + 8]; pur1[tid] += pur1[tid + 8]; red2[tid] += red2[tid + 8]; pur2[tid] += pur2[tid + 8]; __syncthreads(); red1[tid] += red1[tid + 4]; pur1[tid] += pur1[tid + 4]; red2[tid] += red2[tid + 4]; pur2[tid] += pur2[tid + 4]; __syncthreads(); red1[tid] += red1[tid + 2]; pur1[tid] += pur1[tid + 2]; red2[tid] += red2[tid + 2]; pur2[tid] += pur2[tid + 2]; __syncthreads(); red1[tid] += red1[tid + 1]; pur1[tid] += pur1[tid + 1]; red2[tid] += red2[tid + 1]; pur2[tid] += pur2[tid + 1]; __syncthreads(); } // 计算最终结果 if (tid == 0) { // 记录第一个 pattern 的紫色区域像素平均值 avgs[pidx1] = pur1[0] * 1.0f / _pd[pidx1].pCount; // 保存第二个 pattern 的紫色区域像素平均值 avgs[pidx2] = pur2[0] * 1.0f / _pd[pidx2].pCount; // 计算第 1 个 pattern 红色区域像素平均值和紫色区域像素平均值的差值 float comp1 = red1[0] * 1.0f / _pd[pidx1].rCount - avgs[pidx1]; // 计算第 2 个 pattern 红色区域像素平均值和紫色区域像素平均值的差值 float comp2 = red2[0] * 1.0f / _pd[pidx2].rCount - avgs[pidx2]; // 若两个 pattern 都满足同样的不等关系则将该 pattern 对序号标记为 0 if ((comp1 > 0 && comp2 > 0) || (comp1 < 0 && comp2 < 0)) { patterns[threadIdx.z] = 0; } } } // Host 方法:doDiffPattern(检出图像特异的 pattern 信息) __host__ int DiffPattern::doDiffPattern(Image *inimg, int *counter, float *result) { // 数据指针为空时返回空指针异常 if (inimg == NULL || counter == NULL || result == NULL || indice == NULL) return NULL_POINTER; // 差分 pattern 对数为 0 返回数据异常 if (patCount == 0 ) return INVALID_DATA; int errcode; // 局部变量,错误码 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 差分 pattern 对的序号数组,设备端使用的指针 int *patterns; errcode = cudaMalloc(&patterns, patCount * sizeof (int)); if (errcode != cudaSuccess) return errcode; // 初始数据与 indice 数组中的相同 errcode = cudaMemcpy(patterns, indice, patCount * sizeof (int), cudaMemcpyHostToDevice); if (errcode != cudaSuccess) return errcode; // 所有 19 个 pattern 的紫色区域像素平均值 float *avgs = new float[19], *dev_avgs; // 数组置 0 memset(avgs, 0, 19 * sizeof(float)); errcode = cudaMalloc(&dev_avgs, 19 * sizeof (float)); if (errcode != cudaSuccess) return errcode; errcode = cudaMemcpy(dev_avgs, avgs, 19 * sizeof (float), cudaMemcpyHostToDevice); if (errcode != cudaSuccess) return errcode; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; blocksize.z = patCount; gridsize.x = 1; gridsize.y = 1; int sharedSize = 256 * patCount * sizeof (unsigned short); // 调用核函数 _diffPatternKer<<<gridsize, blocksize, sharedSize>>> (insubimgCud, 10, 10, patCount, patterns, dev_avgs); // 保存运算后的 pattern 对数组 int *comp = new int[patCount]; errcode = cudaMemcpy(comp, patterns, patCount * sizeof (int), cudaMemcpyDeviceToHost); if (errcode != cudaSuccess) return errcode; errcode = cudaMemcpy(avgs, dev_avgs, 19 * sizeof (float), cudaMemcpyDeviceToHost); if (errcode != cudaSuccess) return errcode; // 差异 pattern 对的数目的计数器 *counter = 0; for (int i = 0; i < patCount; i++) { if (comp[i] != indice[i]) { int idx = 2 * indice[i] - 3; if (idx < 0) continue; // 把有差异的 pattern 信息保存至数据指针 result[6 * (*counter)] = _patData[idx].csX; result[6 * (*counter) + 1] = _patData[idx].csY; result[6 * (*counter) + 2] = _patData[idx].angel; result[6 * (*counter) + 3] = _patData[idx].ss; result[6 * (*counter) + 4] = _patData[idx].ls; result[6 * (*counter) + 5] = avgs[idx]; (*counter)++; idx = 2 * indice[i] - 1; if (idx < 0) continue; result[6 * (*counter)] = _patData[idx].csX; result[6 * (*counter) + 1] = _patData[idx].csY; result[6 * (*counter) + 2] = _patData[idx].angel; result[6 * (*counter) + 3] = _patData[idx].ss; result[6 * (*counter) + 4] = _patData[idx].ls; result[6 * (*counter) + 5] = avgs[idx]; (*counter)++; } } // 释放 Host 端内存空间 delete []comp; delete []avgs; // 释放 Device 端内存空间 cudaFree(patterns); cudaFree(dev_avgs); return NO_ERROR; }
the_stack
__device__ double* t3_s_d; __device__ double* t3_d; #include "header.h" extern "C" void set_dev_mem_d(size_t h1d, size_t h2d, size_t h3d, size_t p4d, size_t p5d,size_t p6d) { size_t size_t3; size_t3 = h1d*h2d*h3d*p4d*p5d*p6d; t3_d = (double *) getGpuMem(size_t3*sizeof(double)); cudaMemset(t3_d,0,size_t3*sizeof(double)); } extern "C" void dev_mem_d_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d) { set_dev_mem_d((size_t) *h1d, (size_t) *h2d, (size_t) *h3d, (size_t) *p4d, (size_t) *p5d, (size_t) *p6d); } extern "C" void dev_release() { freeGpuMem(t3_d); freeGpuMem(t3_s_d); } extern "C" void dev_release_() { dev_release(); } /*----------------------------------------------------------------------* *triplesx[h3,h1,p6,p5,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_d1_1_kernel(size_t h1d,size_t h3d,size_t h7d,size_t p4d,size_t p5d,size_t p6d,size_t h7ld_t2sub,size_t p4ld_t2sub,size_t p5ld_t2sub,size_t h1ld_t2sub,size_t h3ld_v2sub,size_t p6ld_v2sub,size_t h7ld_v2sub,size_t h3ld_triplesx,size_t h1ld_triplesx,size_t p6ld_triplesx,size_t p5ld_triplesx,size_t p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,size_t unused_idx, size_t total_x, size_t total_y) { size_t h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3; double a1,b1; double a2,b2; double a3,b3; double a4,b4; size_t in1_idxl,in2_idxl,h7l,h7T; __shared__ double t2sub_shm[4*T1][Tcomm]; __shared__ double v2sub_shm[Tcomm][4*T2]; size_t rest_x=blockIdx.x; size_t rest_y=blockIdx.y; size_t thread_x = T2*4 * rest_x + threadIdx.x; size_t thread_y = T1*4 * rest_y + threadIdx.y; in1_idxl=threadIdx.y; in2_idxl=threadIdx.x ; double tlocal1=0; double tlocal2=0; double tlocal3=0; double tlocal4=0; double tlocal5=0; double tlocal6=0; double tlocal7=0; double tlocal8=0; double tlocal9=0; double tlocal10=0; double tlocal11=0; double tlocal12=0; double tlocal13=0; double tlocal14=0; double tlocal15=0; double tlocal16=0; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0; h3_0=rest_x%h3d; rest_x=rest_x/h3d; h1_0=rest_y%h1d; rest_y=rest_y/h1d; p5_0=rest_y%p5d; rest_y=rest_y/p5d; p4_0=rest_y; p6_0=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1; h3_1=rest_x%h3d; rest_x=rest_x/h3d; h1_1=rest_y%h1d; rest_y=rest_y/h1d; p5_1=rest_y%p5d; rest_y=rest_y/p5d; p4_1=rest_y; p6_1=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2; h3_2=rest_x%h3d; rest_x=rest_x/h3d; h1_2=rest_y%h1d; rest_y=rest_y/h1d; p5_2=rest_y%p5d; rest_y=rest_y/p5d; p4_2=rest_y; p6_2=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3; h3_3=rest_x%h3d; rest_x=rest_x/h3d; h1_3=rest_y%h1d; rest_y=rest_y/h1d; p5_3=rest_y%p5d; rest_y=rest_y/p5d; p4_3=rest_y; p6_3=rest_x; size_t t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){size_t h7l_hi; h7l_hi = MIN(Tcomm+h7T,h7d)-h7T; t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub; v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub; if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub; v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub; if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub; v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub; if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub; v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub; if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } __syncthreads(); for(h7l=0;h7l<h7l_hi;++h7l){ a1=t2sub_shm[in1_idxl+T1*0][h7l]; a2=t2sub_shm[in1_idxl+T1*1][h7l]; a3=t2sub_shm[in1_idxl+T1*2][h7l]; a4=t2sub_shm[in1_idxl+T1*3][h7l]; b1=v2sub_shm[h7l][in2_idxl+T2*0]; b2=v2sub_shm[h7l][in2_idxl+T2*1]; b3=v2sub_shm[h7l][in2_idxl+T2*2]; b4=v2sub_shm[h7l][in2_idxl+T2*3]; tlocal1+=a1*b1; tlocal2+=a2*b1; tlocal3+=a3*b1; tlocal4+=a4*b1; tlocal5+=a1*b2; tlocal6+=a2*b2; tlocal7+=a3*b2; tlocal8+=a4*b2; tlocal9+=a1*b3; tlocal10+=a2*b3; tlocal11+=a3*b3; tlocal12+=a4*b3; tlocal13+=a1*b4; tlocal14+=a2*b4; tlocal15+=a3*b4; tlocal16+=a4*b4; } __syncthreads(); } if(thread_x+T1*0<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p6_0*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1; triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p6_0*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2; triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p6_0*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3; triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+p6_0*p6ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal4; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p6_0*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1; triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p6_0*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2; triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p6_0*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p6_0*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1; triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p6_0*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p6_0*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1; } } if(thread_x+T1*1<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p6_1*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5; triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p6_1*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6; triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p6_1*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7; triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+p6_1*p6ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal8; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p6_1*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5; triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p6_1*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6; triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p6_1*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p6_1*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5; triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p6_1*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p6_1*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5; } } if(thread_x+T1*2<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p6_2*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9; triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p6_2*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10; triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p6_2*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11; triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+p6_2*p6ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal12; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p6_2*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9; triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p6_2*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10; triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p6_2*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p6_2*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9; triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p6_2*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p6_2*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9; } } if(thread_x+T1*3<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p6_3*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13; triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p6_3*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14; triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p6_3*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15; triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+p6_3*p6ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal16; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p6_3*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13; triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p6_3*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14; triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p6_3*p6ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p6_3*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13; triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p6_3*p6ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p6_3*p6ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13; } } __syncthreads(); } extern "C" void sd_t_d1_1_cuda(size_t h1d, size_t h2d, size_t h3d, size_t h7d, size_t p4d, size_t p5d, size_t p6d, double *triplesx, double *t2sub, double *v2sub) { h3d=h3d*h2d; size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p6ld_triplesx,p5ld_triplesx,p4ld_triplesx; size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub; cudaStream_t *streams; size_t nstreams,i; double *t2sub_d,*v2sub_d; size_triplesx=h3d*h1d*p6d*p5d*p4d*sizeof(double); size_t2sub=h7d*p4d*p5d*h1d*sizeof(double); size_v2sub=h3d*p6d*h7d*sizeof(double); cudaFuncSetCacheConfig(sd_t_d1_1_kernel, cudaFuncCachePreferShared); nstreams=1; size_block_triplesx=size_triplesx/nstreams; size_el_block_triplesx=size_block_triplesx/sizeof(double); t2sub_d=(double*)getGpuMem(size_t2sub); v2sub_d=(double*)getGpuMem(size_v2sub); streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t)); assert(streams!= NULL); for(i=0;i<nstreams;++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])) ; } CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice)); h7ld_t2sub=1; p4ld_t2sub=h7d; p5ld_t2sub=p4d*h7d; h1ld_t2sub=p5d*p4d*h7d; h3ld_v2sub=1; p6ld_v2sub=h3d; h7ld_v2sub=p6d*h3d; h3ld_triplesx=1; h1ld_triplesx=h3d; p6ld_triplesx=h1d*h3d; p5ld_triplesx=p6d*h1d*h3d; p4ld_triplesx=p5d*p6d*h1d*h3d; size_t total_x = h3d*p6d*1; size_t total_y = p4d*p5d*h1d; dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1))); for(i=0;i<nstreams;++i){ sd_t_d1_1_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p6ld_triplesx,p5ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y); CHECK_ERR("Kernel execution failed"); } cudaThreadSynchronize(); for(i=0;i<nstreams;++i){ cudaStreamDestroy(streams[i]);} freeGpuMem(t2sub_d); freeGpuMem(v2sub_d); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_d1_1_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_1_cuda((size_t)*h1d,(size_t)*h2d,(size_t)*h3d,(size_t)*h7d,(size_t)*p4d,(size_t)*p5d,(size_t)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h3,h1,h2,p5,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,h7] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_d1_2_kernel(size_t h1d,size_t h2d,size_t h3d,size_t h7d,size_t p4d,size_t p5d,size_t h7ld_t2sub,size_t p4ld_t2sub,size_t p5ld_t2sub,size_t h1ld_t2sub,size_t h3ld_v2sub,size_t h2ld_v2sub,size_t h7ld_v2sub,size_t h3ld_triplesx,size_t h1ld_triplesx,size_t h2ld_triplesx,size_t p5ld_triplesx,size_t p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,size_t unused_idx, size_t total_x, size_t total_y) { size_t h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3; double a1,b1; double a2,b2; double a3,b3; double a4,b4; size_t in1_idxl,in2_idxl,h7l,h7T; __shared__ double t2sub_shm[4*T1][Tcomm]; __shared__ double v2sub_shm[Tcomm][4*T2]; size_t rest_x=blockIdx.x; size_t rest_y=blockIdx.y; size_t thread_x = T2*4 * rest_x + threadIdx.x; size_t thread_y = T1*4 * rest_y + threadIdx.y; in1_idxl=threadIdx.y; in2_idxl=threadIdx.x ; double tlocal1=0; double tlocal2=0; double tlocal3=0; double tlocal4=0; double tlocal5=0; double tlocal6=0; double tlocal7=0; double tlocal8=0; double tlocal9=0; double tlocal10=0; double tlocal11=0; double tlocal12=0; double tlocal13=0; double tlocal14=0; double tlocal15=0; double tlocal16=0; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0; h3_0=rest_x%h3d; rest_x=rest_x/h3d; h1_0=rest_y%h1d; rest_y=rest_y/h1d; p5_0=rest_y%p5d; rest_y=rest_y/p5d; p4_0=rest_y; h2_0=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1; h3_1=rest_x%h3d; rest_x=rest_x/h3d; h1_1=rest_y%h1d; rest_y=rest_y/h1d; p5_1=rest_y%p5d; rest_y=rest_y/p5d; p4_1=rest_y; h2_1=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2; h3_2=rest_x%h3d; rest_x=rest_x/h3d; h1_2=rest_y%h1d; rest_y=rest_y/h1d; p5_2=rest_y%p5d; rest_y=rest_y/p5d; p4_2=rest_y; h2_2=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3; h3_3=rest_x%h3d; rest_x=rest_x/h3d; h1_3=rest_y%h1d; rest_y=rest_y/h1d; p5_3=rest_y%p5d; rest_y=rest_y/p5d; p4_3=rest_y; h2_3=rest_x; size_t t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){size_t h7l_hi; h7l_hi = MIN(Tcomm+h7T,h7d)-h7T; t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub; v2sub_d_off=h3_0*h3ld_v2sub+h2_0*h2ld_v2sub; if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub; v2sub_d_off=h3_1*h3ld_v2sub+h2_1*h2ld_v2sub; if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub; v2sub_d_off=h3_2*h3ld_v2sub+h2_2*h2ld_v2sub; if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub; v2sub_d_off=h3_3*h3ld_v2sub+h2_3*h2ld_v2sub; if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } __syncthreads(); for(h7l=0;h7l<h7l_hi;++h7l){ a1=t2sub_shm[in1_idxl+T1*0][h7l]; a2=t2sub_shm[in1_idxl+T1*1][h7l]; a3=t2sub_shm[in1_idxl+T1*2][h7l]; a4=t2sub_shm[in1_idxl+T1*3][h7l]; b1=v2sub_shm[h7l][in2_idxl+T2*0]; b2=v2sub_shm[h7l][in2_idxl+T2*1]; b3=v2sub_shm[h7l][in2_idxl+T2*2]; b4=v2sub_shm[h7l][in2_idxl+T2*3]; tlocal1+=a1*b1; tlocal2+=a2*b1; tlocal3+=a3*b1; tlocal4+=a4*b1; tlocal5+=a1*b2; tlocal6+=a2*b2; tlocal7+=a3*b2; tlocal8+=a4*b2; tlocal9+=a1*b3; tlocal10+=a2*b3; tlocal11+=a3*b3; tlocal12+=a4*b3; tlocal13+=a1*b4; tlocal14+=a2*b4; tlocal15+=a3*b4; tlocal16+=a4*b4; } __syncthreads(); } if(thread_x+T1*0<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1; triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2; triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3; triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+h2_0*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]+=tlocal4; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1; triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2; triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1; triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1; } } if(thread_x+T1*1<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5; triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6; triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7; triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+h2_1*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]+=tlocal8; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5; triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6; triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5; triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5; } } if(thread_x+T1*2<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9; triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10; triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11; triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+h2_2*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]+=tlocal12; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9; triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10; triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9; triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9; } } if(thread_x+T1*3<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13; triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14; triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15; triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+h2_3*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]+=tlocal16; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13; triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14; triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13; triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13; } } __syncthreads(); } extern "C" void sd_t_d1_2_cuda(size_t h1d, size_t h2d, size_t h3d, size_t h7d, size_t p4d, size_t p5d, size_t p6d, double *triplesx, double *t2sub, double *v2sub) { h2d=h2d*p6d; size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p4ld_triplesx; size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub; cudaStream_t *streams; size_t nstreams,i; double *t2sub_d,*v2sub_d; size_triplesx=h3d*h1d*h2d*p5d*p4d*sizeof(double); size_t2sub=h7d*p4d*p5d*h1d*sizeof(double); size_v2sub=h3d*h2d*h7d*sizeof(double); cudaFuncSetCacheConfig(sd_t_d1_2_kernel, cudaFuncCachePreferShared); nstreams=1; size_block_triplesx=size_triplesx/nstreams; size_el_block_triplesx=size_block_triplesx/sizeof(double); t2sub_d=(double*)getGpuMem(size_t2sub); v2sub_d=(double*)getGpuMem(size_v2sub); streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t)); assert(streams!= NULL); for(i=0;i<nstreams;++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])) ; } //CUDA_SAFE( cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice); //); //CUDA_SAFE( cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice); //); h7ld_t2sub=1; p4ld_t2sub=h7d; p5ld_t2sub=p4d*h7d; h1ld_t2sub=p5d*p4d*h7d; h3ld_v2sub=1; h2ld_v2sub=h3d; h7ld_v2sub=h2d*h3d; h3ld_triplesx=1; h1ld_triplesx=h3d; h2ld_triplesx=h1d*h3d; p5ld_triplesx=h2d*h1d*h3d; p4ld_triplesx=p5d*h2d*h1d*h3d; size_t total_x = h3d*h2d*1; size_t total_y = p4d*p5d*h1d; //printf("Blocks %d %d\n", total_x, total_y); //fflush(stdout); dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1))); for(i=0;i<nstreams;++i){ sd_t_d1_2_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,h7d,p4d,p5d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y); CHECK_ERR("Kernel execution failed"); } cudaThreadSynchronize(); for(i=0;i<nstreams;++i){ cudaStreamDestroy(streams[i]);} freeGpuMem(t2sub_d); freeGpuMem(v2sub_d); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_d1_2_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_2_cuda((size_t)*h1d,(size_t)*h2d,(size_t)*h3d,(size_t)*h7d,(size_t)*p4d,(size_t)*p5d,(size_t)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h1,h3,p5,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h7] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_d1_3_kernel(size_t h1d,size_t h3d,size_t h7d,size_t p4d,size_t p5d,size_t h7ld_t2sub,size_t p4ld_t2sub,size_t p5ld_t2sub,size_t h1ld_t2sub,size_t h3ld_v2sub,size_t h7ld_v2sub,size_t h1ld_triplesx,size_t h3ld_triplesx,size_t p5ld_triplesx,size_t p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,size_t unused_idx, size_t total_x, size_t total_y) { size_t h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3; double a1,b1; double a2,b2; double a3,b3; double a4,b4; size_t in1_idxl,in2_idxl,h7l,h7T; __shared__ double t2sub_shm[4*T1][Tcomm]; __shared__ double v2sub_shm[Tcomm][4*T2]; size_t rest_x=blockIdx.x; size_t rest_y=blockIdx.y; size_t thread_x = T2*4 * rest_x + threadIdx.x; size_t thread_y = T1*4 * rest_y + threadIdx.y; in1_idxl=threadIdx.y; in2_idxl=threadIdx.x ; double tlocal1=0; double tlocal2=0; double tlocal3=0; double tlocal4=0; double tlocal5=0; double tlocal6=0; double tlocal7=0; double tlocal8=0; double tlocal9=0; double tlocal10=0; double tlocal11=0; double tlocal12=0; double tlocal13=0; double tlocal14=0; double tlocal15=0; double tlocal16=0; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0; h1_0=rest_y%h1d; rest_y=rest_y/h1d; p5_0=rest_y%p5d; rest_y=rest_y/p5d; p4_0=rest_y; h3_0=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1; h1_1=rest_y%h1d; rest_y=rest_y/h1d; p5_1=rest_y%p5d; rest_y=rest_y/p5d; p4_1=rest_y; h3_1=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2; h1_2=rest_y%h1d; rest_y=rest_y/h1d; p5_2=rest_y%p5d; rest_y=rest_y/p5d; p4_2=rest_y; h3_2=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3; h1_3=rest_y%h1d; rest_y=rest_y/h1d; p5_3=rest_y%p5d; rest_y=rest_y/p5d; p4_3=rest_y; h3_3=rest_x; size_t t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){size_t h7l_hi; h7l_hi = MIN(Tcomm+h7T,h7d)-h7T; t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub; v2sub_d_off=h3_0*h3ld_v2sub; if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub; v2sub_d_off=h3_1*h3ld_v2sub; if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub; v2sub_d_off=h3_2*h3ld_v2sub; if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub; v2sub_d_off=h3_3*h3ld_v2sub; if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } __syncthreads(); for(h7l=0;h7l<h7l_hi;++h7l){ a1=t2sub_shm[in1_idxl+T1*0][h7l]; a2=t2sub_shm[in1_idxl+T1*1][h7l]; a3=t2sub_shm[in1_idxl+T1*2][h7l]; a4=t2sub_shm[in1_idxl+T1*3][h7l]; b1=v2sub_shm[h7l][in2_idxl+T2*0]; b2=v2sub_shm[h7l][in2_idxl+T2*1]; b3=v2sub_shm[h7l][in2_idxl+T2*2]; b4=v2sub_shm[h7l][in2_idxl+T2*3]; tlocal1+=a1*b1; tlocal2+=a2*b1; tlocal3+=a3*b1; tlocal4+=a4*b1; tlocal5+=a1*b2; tlocal6+=a2*b2; tlocal7+=a3*b2; tlocal8+=a4*b2; tlocal9+=a1*b3; tlocal10+=a2*b3; tlocal11+=a3*b3; tlocal12+=a4*b3; tlocal13+=a1*b4; tlocal14+=a2*b4; tlocal15+=a3*b4; tlocal16+=a4*b4; } __syncthreads(); } if(thread_x+T1*0<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1; triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2; triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3; triplesx_d[h1_3*h1ld_triplesx+h3_0*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal4; } else if(thread_y+T2*2<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1; triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2; triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3; } else if(thread_y+T2*1<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1; triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2; } else if(thread_y+T2*0<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1; } } if(thread_x+T1*1<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5; triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6; triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7; triplesx_d[h1_3*h1ld_triplesx+h3_1*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal8; } else if(thread_y+T2*2<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5; triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6; triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7; } else if(thread_y+T2*1<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5; triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6; } else if(thread_y+T2*0<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5; } } if(thread_x+T1*2<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9; triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10; triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11; triplesx_d[h1_3*h1ld_triplesx+h3_2*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal12; } else if(thread_y+T2*2<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9; triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10; triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11; } else if(thread_y+T2*1<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9; triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10; } else if(thread_y+T2*0<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9; } } if(thread_x+T1*3<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13; triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14; triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15; triplesx_d[h1_3*h1ld_triplesx+h3_3*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx]-=tlocal16; } else if(thread_y+T2*2<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13; triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14; triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15; } else if(thread_y+T2*1<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13; triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14; } else if(thread_y+T2*0<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13; } } __syncthreads(); } extern "C" void sd_t_d1_3_cuda(size_t h1d, size_t h2d, size_t h3d, size_t h7d, size_t p4d, size_t p5d, size_t p6d, double *triplesx, double *t2sub, double *v2sub) { h3d=h3d*h2d; h3d=h3d*p6d; size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p4ld_triplesx; size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub; cudaStream_t *streams; size_t nstreams,i; double *t2sub_d,*v2sub_d; size_triplesx=h1d*h3d*p5d*p4d*sizeof(double); size_t2sub=h7d*p4d*p5d*h1d*sizeof(double); size_v2sub=h3d*h7d*sizeof(double); cudaFuncSetCacheConfig(sd_t_d1_3_kernel, cudaFuncCachePreferShared); nstreams=1; size_block_triplesx=size_triplesx/nstreams; size_el_block_triplesx=size_block_triplesx/sizeof(double); t2sub_d=(double*)getGpuMem(size_t2sub); v2sub_d=(double*)getGpuMem(size_v2sub); streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t)); assert(streams!= NULL); for(i=0;i<nstreams;++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])) ; } CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice)); h7ld_t2sub=1; p4ld_t2sub=h7d; p5ld_t2sub=p4d*h7d; h1ld_t2sub=p5d*p4d*h7d; h3ld_v2sub=1; h7ld_v2sub=h3d; h1ld_triplesx=1; h3ld_triplesx=h1d; p5ld_triplesx=h3d*h1d; p4ld_triplesx=p5d*h3d*h1d; size_t total_x = h3d*1; size_t total_y = p4d*p5d*h1d; dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1))); for(i=0;i<nstreams;++i){ sd_t_d1_3_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h3d,h7d,p4d,p5d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y); CHECK_ERR("Kernel execution failed"); } cudaThreadSynchronize(); for(i=0;i<nstreams;++i){ cudaStreamDestroy(streams[i]);} freeGpuMem(t2sub_d); freeGpuMem(v2sub_d); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_d1_3_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_3_cuda((size_t)*h1d,(size_t)*h2d,(size_t)*h3d,(size_t)*h7d,(size_t)*p4d,(size_t)*p5d,(size_t)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h3,h1,p5,p4,p6] -= t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_d1_4_kernel(size_t h1d,size_t h3d,size_t h7d,size_t p4d,size_t p5d,size_t p6d,size_t h7ld_t2sub,size_t p4ld_t2sub,size_t p5ld_t2sub,size_t h1ld_t2sub,size_t h3ld_v2sub,size_t p6ld_v2sub,size_t h7ld_v2sub,size_t h3ld_triplesx,size_t h1ld_triplesx,size_t p5ld_triplesx,size_t p4ld_triplesx,size_t p6ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,size_t unused_idx, size_t total_x, size_t total_y) { size_t h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3; double a1,b1; double a2,b2; double a3,b3; double a4,b4; size_t in1_idxl,in2_idxl,h7l,h7T; __shared__ double t2sub_shm[4*T1][Tcomm]; __shared__ double v2sub_shm[Tcomm][4*T2]; size_t rest_x=blockIdx.x; size_t rest_y=blockIdx.y; size_t thread_x = T2*4 * rest_x + threadIdx.x; size_t thread_y = T1*4 * rest_y + threadIdx.y; in1_idxl=threadIdx.y; in2_idxl=threadIdx.x ; double tlocal1=0; double tlocal2=0; double tlocal3=0; double tlocal4=0; double tlocal5=0; double tlocal6=0; double tlocal7=0; double tlocal8=0; double tlocal9=0; double tlocal10=0; double tlocal11=0; double tlocal12=0; double tlocal13=0; double tlocal14=0; double tlocal15=0; double tlocal16=0; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0; h3_0=rest_x%h3d; rest_x=rest_x/h3d; h1_0=rest_y%h1d; rest_y=rest_y/h1d; p5_0=rest_y%p5d; rest_y=rest_y/p5d; p4_0=rest_y; p6_0=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1; h3_1=rest_x%h3d; rest_x=rest_x/h3d; h1_1=rest_y%h1d; rest_y=rest_y/h1d; p5_1=rest_y%p5d; rest_y=rest_y/p5d; p4_1=rest_y; p6_1=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2; h3_2=rest_x%h3d; rest_x=rest_x/h3d; h1_2=rest_y%h1d; rest_y=rest_y/h1d; p5_2=rest_y%p5d; rest_y=rest_y/p5d; p4_2=rest_y; p6_2=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3; h3_3=rest_x%h3d; rest_x=rest_x/h3d; h1_3=rest_y%h1d; rest_y=rest_y/h1d; p5_3=rest_y%p5d; rest_y=rest_y/p5d; p4_3=rest_y; p6_3=rest_x; size_t t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){size_t h7l_hi; h7l_hi = MIN(Tcomm+h7T,h7d)-h7T; t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub; v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub; if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub; v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub; if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub; v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub; if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub; v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub; if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } __syncthreads(); for(h7l=0;h7l<h7l_hi;++h7l){ a1=t2sub_shm[in1_idxl+T1*0][h7l]; a2=t2sub_shm[in1_idxl+T1*1][h7l]; a3=t2sub_shm[in1_idxl+T1*2][h7l]; a4=t2sub_shm[in1_idxl+T1*3][h7l]; b1=v2sub_shm[h7l][in2_idxl+T2*0]; b2=v2sub_shm[h7l][in2_idxl+T2*1]; b3=v2sub_shm[h7l][in2_idxl+T2*2]; b4=v2sub_shm[h7l][in2_idxl+T2*3]; tlocal1+=a1*b1; tlocal2+=a2*b1; tlocal3+=a3*b1; tlocal4+=a4*b1; tlocal5+=a1*b2; tlocal6+=a2*b2; tlocal7+=a3*b2; tlocal8+=a4*b2; tlocal9+=a1*b3; tlocal10+=a2*b3; tlocal11+=a3*b3; tlocal12+=a4*b3; tlocal13+=a1*b4; tlocal14+=a2*b4; tlocal15+=a3*b4; tlocal16+=a4*b4; } __syncthreads(); } if(thread_x+T1*0<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1; triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2; triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal3; triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal4; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1; triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2; triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal3; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1; triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1; } } if(thread_x+T1*1<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5; triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6; triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal7; triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal8; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5; triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6; triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal7; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5; triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5; } } if(thread_x+T1*2<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9; triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10; triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal11; triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal12; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9; triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10; triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal11; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9; triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9; } } if(thread_x+T1*3<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13; triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14; triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal15; triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal16; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13; triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14; triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal15; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13; triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13; } } __syncthreads(); } extern "C" void sd_t_d1_4_cuda(size_t h1d, size_t h2d, size_t h3d, size_t h7d, size_t p4d, size_t p5d, size_t p6d, double *triplesx, double *t2sub, double *v2sub) { h3d=h3d*h2d; size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx; size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub; cudaStream_t *streams; size_t nstreams,i; double *t2sub_d,*v2sub_d; size_triplesx=h3d*h1d*p5d*p4d*p6d*sizeof(double); size_t2sub=h7d*p4d*p5d*h1d*sizeof(double); size_v2sub=h3d*p6d*h7d*sizeof(double); cudaFuncSetCacheConfig(sd_t_d1_4_kernel, cudaFuncCachePreferShared); nstreams=1; size_block_triplesx=size_triplesx/nstreams; size_el_block_triplesx=size_block_triplesx/sizeof(double); t2sub_d=(double*)getGpuMem(size_t2sub); v2sub_d=(double*)getGpuMem(size_v2sub); streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t)); assert(streams!= NULL); for(i=0;i<nstreams;++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])) ; } CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice)); h7ld_t2sub=1; p4ld_t2sub=h7d; p5ld_t2sub=p4d*h7d; h1ld_t2sub=p5d*p4d*h7d; h3ld_v2sub=1; p6ld_v2sub=h3d; h7ld_v2sub=p6d*h3d; h3ld_triplesx=1; h1ld_triplesx=h3d; p5ld_triplesx=h1d*h3d; p4ld_triplesx=p5d*h1d*h3d; p6ld_triplesx=p4d*p5d*h1d*h3d; size_t total_x = h3d*p6d*1; size_t total_y = p4d*p5d*h1d; dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1))); for(i=0;i<nstreams;++i){ sd_t_d1_4_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y); CHECK_ERR("Kernel execution failed"); } cudaThreadSynchronize(); for(i=0;i<nstreams;++i){ cudaStreamDestroy(streams[i]);} freeGpuMem(t2sub_d); freeGpuMem(v2sub_d); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_d1_4_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_4_cuda((size_t)*h1d,(size_t)*h2d,(size_t)*h3d,(size_t)*h7d,(size_t)*p4d,(size_t)*p5d,(size_t)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h3,h1,h2,p5,p4,p6] += t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_d1_5_kernel(size_t h1d,size_t h2d,size_t h3d,size_t h7d,size_t p4d,size_t p5d,size_t p6d,size_t h7ld_t2sub,size_t p4ld_t2sub,size_t p5ld_t2sub,size_t h1ld_t2sub,size_t h3ld_v2sub,size_t h2ld_v2sub,size_t p6ld_v2sub,size_t h7ld_v2sub,size_t h3ld_triplesx,size_t h1ld_triplesx,size_t h2ld_triplesx,size_t p5ld_triplesx,size_t p4ld_triplesx,size_t p6ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,size_t unused_idx, size_t total_x, size_t total_y) { size_t h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3; double a1,b1; double a2,b2; double a3,b3; double a4,b4; size_t in1_idxl,in2_idxl,h7l,h7T; __shared__ double t2sub_shm[4*T1][Tcomm]; __shared__ double v2sub_shm[Tcomm][4*T2]; size_t rest_x=blockIdx.x; size_t rest_y=blockIdx.y; size_t thread_x = T2*4 * rest_x + threadIdx.x; size_t thread_y = T1*4 * rest_y + threadIdx.y; in1_idxl=threadIdx.y; in2_idxl=threadIdx.x ; double tlocal1=0; double tlocal2=0; double tlocal3=0; double tlocal4=0; double tlocal5=0; double tlocal6=0; double tlocal7=0; double tlocal8=0; double tlocal9=0; double tlocal10=0; double tlocal11=0; double tlocal12=0; double tlocal13=0; double tlocal14=0; double tlocal15=0; double tlocal16=0; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0; h3_0=rest_x%h3d; rest_x=rest_x/h3d; h1_0=rest_y%h1d; rest_y=rest_y/h1d; h2_0=rest_x%h2d; rest_x=rest_x/h2d; p5_0=rest_y%p5d; rest_y=rest_y/p5d; p4_0=rest_y; p6_0=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1; h3_1=rest_x%h3d; rest_x=rest_x/h3d; h1_1=rest_y%h1d; rest_y=rest_y/h1d; h2_1=rest_x%h2d; rest_x=rest_x/h2d; p5_1=rest_y%p5d; rest_y=rest_y/p5d; p4_1=rest_y; p6_1=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2; h3_2=rest_x%h3d; rest_x=rest_x/h3d; h1_2=rest_y%h1d; rest_y=rest_y/h1d; h2_2=rest_x%h2d; rest_x=rest_x/h2d; p5_2=rest_y%p5d; rest_y=rest_y/p5d; p4_2=rest_y; p6_2=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3; h3_3=rest_x%h3d; rest_x=rest_x/h3d; h1_3=rest_y%h1d; rest_y=rest_y/h1d; h2_3=rest_x%h2d; rest_x=rest_x/h2d; p5_3=rest_y%p5d; rest_y=rest_y/p5d; p4_3=rest_y; p6_3=rest_x; size_t t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){size_t h7l_hi; h7l_hi = MIN(Tcomm+h7T,h7d)-h7T; t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub; v2sub_d_off=h3_0*h3ld_v2sub+h2_0*h2ld_v2sub+p6_0*p6ld_v2sub; if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub; v2sub_d_off=h3_1*h3ld_v2sub+h2_1*h2ld_v2sub+p6_1*p6ld_v2sub; if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub; v2sub_d_off=h3_2*h3ld_v2sub+h2_2*h2ld_v2sub+p6_2*p6ld_v2sub; if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub; v2sub_d_off=h3_3*h3ld_v2sub+h2_3*h2ld_v2sub+p6_3*p6ld_v2sub; if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } __syncthreads(); for(h7l=0;h7l<h7l_hi;++h7l){ a1=t2sub_shm[in1_idxl+T1*0][h7l]; a2=t2sub_shm[in1_idxl+T1*1][h7l]; a3=t2sub_shm[in1_idxl+T1*2][h7l]; a4=t2sub_shm[in1_idxl+T1*3][h7l]; b1=v2sub_shm[h7l][in2_idxl+T2*0]; b2=v2sub_shm[h7l][in2_idxl+T2*1]; b3=v2sub_shm[h7l][in2_idxl+T2*2]; b4=v2sub_shm[h7l][in2_idxl+T2*3]; tlocal1+=a1*b1; tlocal2+=a2*b1; tlocal3+=a3*b1; tlocal4+=a4*b1; tlocal5+=a1*b2; tlocal6+=a2*b2; tlocal7+=a3*b2; tlocal8+=a4*b2; tlocal9+=a1*b3; tlocal10+=a2*b3; tlocal11+=a3*b3; tlocal12+=a4*b3; tlocal13+=a1*b4; tlocal14+=a2*b4; tlocal15+=a3*b4; tlocal16+=a4*b4; } __syncthreads(); } if(thread_x+T1*0<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal1; triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal2; triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal3; triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+h2_0*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal4; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal1; triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal2; triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal3; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal1; triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal2; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]+=tlocal1; } } if(thread_x+T1*1<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal5; triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal6; triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal7; triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+h2_1*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal8; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal5; triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal6; triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal7; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal5; triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal6; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]+=tlocal5; } } if(thread_x+T1*2<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal9; triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal10; triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal11; triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+h2_2*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal12; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal9; triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal10; triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal11; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal9; triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal10; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]+=tlocal9; } } if(thread_x+T1*3<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal13; triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal14; triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal15; triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+h2_3*h2ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal16; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal13; triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal14; triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal15; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal13; triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal14; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]+=tlocal13; } } __syncthreads(); } extern "C" void sd_t_d1_5_cuda(size_t h1d, size_t h2d, size_t h3d, size_t h7d, size_t p4d, size_t p5d, size_t p6d, double *triplesx, double *t2sub, double *v2sub) { size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx; size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub; cudaStream_t *streams; size_t nstreams,i; double *t2sub_d,*v2sub_d; size_triplesx=h3d*h1d*h2d*p5d*p4d*p6d*sizeof(double); size_t2sub=h7d*p4d*p5d*h1d*sizeof(double); size_v2sub=h3d*h2d*p6d*h7d*sizeof(double); cudaFuncSetCacheConfig(sd_t_d1_5_kernel, cudaFuncCachePreferShared); nstreams=1; size_block_triplesx=size_triplesx/nstreams; size_el_block_triplesx=size_block_triplesx/sizeof(double); t2sub_d=(double*)getGpuMem(size_t2sub); v2sub_d=(double*)getGpuMem(size_v2sub); streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t)); assert(streams!= NULL); for(i=0;i<nstreams;++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])) ; } CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice)); h7ld_t2sub=1; p4ld_t2sub=h7d; p5ld_t2sub=p4d*h7d; h1ld_t2sub=p5d*p4d*h7d; h3ld_v2sub=1; h2ld_v2sub=h3d; p6ld_v2sub=h2d*h3d; h7ld_v2sub=p6d*h2d*h3d; h3ld_triplesx=1; h1ld_triplesx=h3d; h2ld_triplesx=h1d*h3d; p5ld_triplesx=h2d*h1d*h3d; p4ld_triplesx=p5d*h2d*h1d*h3d; p6ld_triplesx=p4d*p5d*h2d*h1d*h3d; size_t total_x = h3d*h2d*p6d*1; size_t total_y = p4d*p5d*h1d; dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1))); for(i=0;i<nstreams;++i){ sd_t_d1_5_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y); CHECK_ERR("Kernel execution failed"); } cudaThreadSynchronize(); for(i=0;i<nstreams;++i){ cudaStreamDestroy(streams[i]);} freeGpuMem(t2sub_d); freeGpuMem(v2sub_d); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_d1_5_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_5_cuda((size_t)*h1d,(size_t)*h2d,(size_t)*h3d,(size_t)*h7d,(size_t)*p4d,(size_t)*p5d,(size_t)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h1,h3,p5,p4,p6] -= t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_d1_6_kernel(size_t h1d,size_t h3d,size_t h7d,size_t p4d,size_t p5d,size_t p6d,size_t h7ld_t2sub,size_t p4ld_t2sub,size_t p5ld_t2sub,size_t h1ld_t2sub,size_t h3ld_v2sub,size_t p6ld_v2sub,size_t h7ld_v2sub,size_t h1ld_triplesx,size_t h3ld_triplesx,size_t p5ld_triplesx,size_t p4ld_triplesx,size_t p6ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,size_t unused_idx, size_t total_x, size_t total_y) { size_t h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3; double a1,b1; double a2,b2; double a3,b3; double a4,b4; size_t in1_idxl,in2_idxl,h7l,h7T; __shared__ double t2sub_shm[4*T1][Tcomm]; __shared__ double v2sub_shm[Tcomm][4*T2]; size_t rest_x=blockIdx.x; size_t rest_y=blockIdx.y; size_t thread_x = T2*4 * rest_x + threadIdx.x; size_t thread_y = T1*4 * rest_y + threadIdx.y; in1_idxl=threadIdx.y; in2_idxl=threadIdx.x ; double tlocal1=0; double tlocal2=0; double tlocal3=0; double tlocal4=0; double tlocal5=0; double tlocal6=0; double tlocal7=0; double tlocal8=0; double tlocal9=0; double tlocal10=0; double tlocal11=0; double tlocal12=0; double tlocal13=0; double tlocal14=0; double tlocal15=0; double tlocal16=0; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0; h1_0=rest_y%h1d; rest_y=rest_y/h1d; h3_0=rest_x%h3d; rest_x=rest_x/h3d; p5_0=rest_y%p5d; rest_y=rest_y/p5d; p4_0=rest_y; p6_0=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1; h1_1=rest_y%h1d; rest_y=rest_y/h1d; h3_1=rest_x%h3d; rest_x=rest_x/h3d; p5_1=rest_y%p5d; rest_y=rest_y/p5d; p4_1=rest_y; p6_1=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2; h1_2=rest_y%h1d; rest_y=rest_y/h1d; h3_2=rest_x%h3d; rest_x=rest_x/h3d; p5_2=rest_y%p5d; rest_y=rest_y/p5d; p4_2=rest_y; p6_2=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3; h1_3=rest_y%h1d; rest_y=rest_y/h1d; h3_3=rest_x%h3d; rest_x=rest_x/h3d; p5_3=rest_y%p5d; rest_y=rest_y/p5d; p4_3=rest_y; p6_3=rest_x; size_t t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){size_t h7l_hi; h7l_hi = MIN(Tcomm+h7T,h7d)-h7T; t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub; v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub; if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub; v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub; if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub; v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub; if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub; v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub; if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } __syncthreads(); for(h7l=0;h7l<h7l_hi;++h7l){ a1=t2sub_shm[in1_idxl+T1*0][h7l]; a2=t2sub_shm[in1_idxl+T1*1][h7l]; a3=t2sub_shm[in1_idxl+T1*2][h7l]; a4=t2sub_shm[in1_idxl+T1*3][h7l]; b1=v2sub_shm[h7l][in2_idxl+T2*0]; b2=v2sub_shm[h7l][in2_idxl+T2*1]; b3=v2sub_shm[h7l][in2_idxl+T2*2]; b4=v2sub_shm[h7l][in2_idxl+T2*3]; tlocal1+=a1*b1; tlocal2+=a2*b1; tlocal3+=a3*b1; tlocal4+=a4*b1; tlocal5+=a1*b2; tlocal6+=a2*b2; tlocal7+=a3*b2; tlocal8+=a4*b2; tlocal9+=a1*b3; tlocal10+=a2*b3; tlocal11+=a3*b3; tlocal12+=a4*b3; tlocal13+=a1*b4; tlocal14+=a2*b4; tlocal15+=a3*b4; tlocal16+=a4*b4; } __syncthreads(); } if(thread_x+T1*0<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1; triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2; triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal3; triplesx_d[h1_3*h1ld_triplesx+h3_0*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal4; } else if(thread_y+T2*2<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1; triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2; triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal3; } else if(thread_y+T2*1<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1; triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal2; } else if(thread_y+T2*0<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_0*p6ld_triplesx]-=tlocal1; } } if(thread_x+T1*1<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5; triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6; triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal7; triplesx_d[h1_3*h1ld_triplesx+h3_1*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal8; } else if(thread_y+T2*2<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5; triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6; triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal7; } else if(thread_y+T2*1<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5; triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal6; } else if(thread_y+T2*0<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_1*p6ld_triplesx]-=tlocal5; } } if(thread_x+T1*2<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9; triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10; triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal11; triplesx_d[h1_3*h1ld_triplesx+h3_2*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal12; } else if(thread_y+T2*2<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9; triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10; triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal11; } else if(thread_y+T2*1<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9; triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal10; } else if(thread_y+T2*0<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_2*p6ld_triplesx]-=tlocal9; } } if(thread_x+T1*3<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13; triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14; triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal15; triplesx_d[h1_3*h1ld_triplesx+h3_3*h3ld_triplesx+p5_3*p5ld_triplesx+p4_3*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal16; } else if(thread_y+T2*2<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13; triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14; triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p4_2*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal15; } else if(thread_y+T2*1<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13; triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p4_1*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal14; } else if(thread_y+T2*0<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p4_0*p4ld_triplesx+p6_3*p6ld_triplesx]-=tlocal13; } } __syncthreads(); } extern "C" void sd_t_d1_6_cuda(size_t h1d, size_t h2d, size_t h3d, size_t h7d, size_t p4d, size_t p5d, size_t p6d, double *triplesx, double *t2sub, double *v2sub) { h3d=h3d*h2d; size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx; size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub; cudaStream_t *streams; size_t nstreams,i; double *t2sub_d,*v2sub_d; size_triplesx=h1d*h3d*p5d*p4d*p6d*sizeof(double); size_t2sub=h7d*p4d*p5d*h1d*sizeof(double); size_v2sub=h3d*p6d*h7d*sizeof(double); cudaFuncSetCacheConfig(sd_t_d1_6_kernel, cudaFuncCachePreferShared); nstreams=1; size_block_triplesx=size_triplesx/nstreams; size_el_block_triplesx=size_block_triplesx/sizeof(double); t2sub_d=(double*)getGpuMem(size_t2sub); v2sub_d=(double*)getGpuMem(size_v2sub); streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t)); assert(streams!= NULL); for(i=0;i<nstreams;++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])) ; } CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice)); h7ld_t2sub=1; p4ld_t2sub=h7d; p5ld_t2sub=p4d*h7d; h1ld_t2sub=p5d*p4d*h7d; h3ld_v2sub=1; p6ld_v2sub=h3d; h7ld_v2sub=p6d*h3d; h1ld_triplesx=1; h3ld_triplesx=h1d; p5ld_triplesx=h3d*h1d; p4ld_triplesx=p5d*h3d*h1d; p6ld_triplesx=p4d*p5d*h3d*h1d; size_t total_x = h3d*p6d*1; size_t total_y = p4d*p5d*h1d; dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1))); for(i=0;i<nstreams;++i){ sd_t_d1_6_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p4ld_triplesx,p6ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y); CHECK_ERR("Kernel execution failed"); } cudaThreadSynchronize(); for(i=0;i<nstreams;++i){ cudaStreamDestroy(streams[i]);} freeGpuMem(t2sub_d); freeGpuMem(v2sub_d); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_d1_6_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_6_cuda((size_t)*h1d,(size_t)*h2d,(size_t)*h3d,(size_t)*h7d,(size_t)*p4d,(size_t)*p5d,(size_t)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h3,h1,p5,p6,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_d1_7_kernel(size_t h1d,size_t h3d,size_t h7d,size_t p4d,size_t p5d,size_t p6d,size_t h7ld_t2sub,size_t p4ld_t2sub,size_t p5ld_t2sub,size_t h1ld_t2sub,size_t h3ld_v2sub,size_t p6ld_v2sub,size_t h7ld_v2sub,size_t h3ld_triplesx,size_t h1ld_triplesx,size_t p5ld_triplesx,size_t p6ld_triplesx,size_t p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,size_t unused_idx, size_t total_x, size_t total_y) { size_t h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3; double a1,b1; double a2,b2; double a3,b3; double a4,b4; size_t in1_idxl,in2_idxl,h7l,h7T; __shared__ double t2sub_shm[4*T1][Tcomm]; __shared__ double v2sub_shm[Tcomm][4*T2]; size_t rest_x=blockIdx.x; size_t rest_y=blockIdx.y; size_t thread_x = T2*4 * rest_x + threadIdx.x; size_t thread_y = T1*4 * rest_y + threadIdx.y; in1_idxl=threadIdx.y; in2_idxl=threadIdx.x ; double tlocal1=0; double tlocal2=0; double tlocal3=0; double tlocal4=0; double tlocal5=0; double tlocal6=0; double tlocal7=0; double tlocal8=0; double tlocal9=0; double tlocal10=0; double tlocal11=0; double tlocal12=0; double tlocal13=0; double tlocal14=0; double tlocal15=0; double tlocal16=0; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0; h3_0=rest_x%h3d; rest_x=rest_x/h3d; h1_0=rest_y%h1d; rest_y=rest_y/h1d; p5_0=rest_y%p5d; rest_y=rest_y/p5d; p4_0=rest_y; p6_0=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1; h3_1=rest_x%h3d; rest_x=rest_x/h3d; h1_1=rest_y%h1d; rest_y=rest_y/h1d; p5_1=rest_y%p5d; rest_y=rest_y/p5d; p4_1=rest_y; p6_1=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2; h3_2=rest_x%h3d; rest_x=rest_x/h3d; h1_2=rest_y%h1d; rest_y=rest_y/h1d; p5_2=rest_y%p5d; rest_y=rest_y/p5d; p4_2=rest_y; p6_2=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3; h3_3=rest_x%h3d; rest_x=rest_x/h3d; h1_3=rest_y%h1d; rest_y=rest_y/h1d; p5_3=rest_y%p5d; rest_y=rest_y/p5d; p4_3=rest_y; p6_3=rest_x; size_t t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){size_t h7l_hi; h7l_hi = MIN(Tcomm+h7T,h7d)-h7T; t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub; v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub; if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub; v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub; if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub; v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub; if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub; v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub; if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } __syncthreads(); for(h7l=0;h7l<h7l_hi;++h7l){ a1=t2sub_shm[in1_idxl+T1*0][h7l]; a2=t2sub_shm[in1_idxl+T1*1][h7l]; a3=t2sub_shm[in1_idxl+T1*2][h7l]; a4=t2sub_shm[in1_idxl+T1*3][h7l]; b1=v2sub_shm[h7l][in2_idxl+T2*0]; b2=v2sub_shm[h7l][in2_idxl+T2*1]; b3=v2sub_shm[h7l][in2_idxl+T2*2]; b4=v2sub_shm[h7l][in2_idxl+T2*3]; tlocal1+=a1*b1; tlocal2+=a2*b1; tlocal3+=a3*b1; tlocal4+=a4*b1; tlocal5+=a1*b2; tlocal6+=a2*b2; tlocal7+=a3*b2; tlocal8+=a4*b2; tlocal9+=a1*b3; tlocal10+=a2*b3; tlocal11+=a3*b3; tlocal12+=a4*b3; tlocal13+=a1*b4; tlocal14+=a2*b4; tlocal15+=a3*b4; tlocal16+=a4*b4; } __syncthreads(); } if(thread_x+T1*0<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1; triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2; triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3; triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p6_0*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal4; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1; triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2; triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1; triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1; } } if(thread_x+T1*1<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5; triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6; triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7; triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p6_1*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal8; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5; triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6; triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5; triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5; } } if(thread_x+T1*2<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9; triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10; triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11; triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p6_2*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal12; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9; triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10; triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9; triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9; } } if(thread_x+T1*3<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13; triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14; triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15; triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+p5_3*p5ld_triplesx+p6_3*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal16; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13; triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14; triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13; triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13; } } __syncthreads(); } extern "C" void sd_t_d1_7_cuda(size_t h1d, size_t h2d, size_t h3d, size_t h7d, size_t p4d, size_t p5d, size_t p6d, double *triplesx, double *t2sub, double *v2sub) { h3d=h3d*h2d; size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx; size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub; cudaStream_t *streams; size_t nstreams,i; double *t2sub_d,*v2sub_d; size_triplesx=h3d*h1d*p5d*p6d*p4d*sizeof(double); size_t2sub=h7d*p4d*p5d*h1d*sizeof(double); size_v2sub=h3d*p6d*h7d*sizeof(double); cudaFuncSetCacheConfig(sd_t_d1_7_kernel, cudaFuncCachePreferShared); nstreams=1; size_block_triplesx=size_triplesx/nstreams; size_el_block_triplesx=size_block_triplesx/sizeof(double); t2sub_d=(double*)getGpuMem(size_t2sub); v2sub_d=(double*)getGpuMem(size_v2sub); streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t)); assert(streams!= NULL); for(i=0;i<nstreams;++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])) ; } CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice)); h7ld_t2sub=1; p4ld_t2sub=h7d; p5ld_t2sub=p4d*h7d; h1ld_t2sub=p5d*p4d*h7d; h3ld_v2sub=1; p6ld_v2sub=h3d; h7ld_v2sub=p6d*h3d; h3ld_triplesx=1; h1ld_triplesx=h3d; p5ld_triplesx=h1d*h3d; p6ld_triplesx=p5d*h1d*h3d; p4ld_triplesx=p6d*p5d*h1d*h3d; size_t total_x = h3d*p6d*1; size_t total_y = p4d*p5d*h1d; dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1))); for(i=0;i<nstreams;++i){ sd_t_d1_7_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y); CHECK_ERR("Kernel execution failed"); } cudaThreadSynchronize(); for(i=0;i<nstreams;++i){ cudaStreamDestroy(streams[i]);} freeGpuMem(t2sub_d); freeGpuMem(v2sub_d); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_d1_7_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_7_cuda((size_t)*h1d,(size_t)*h2d,(size_t)*h3d,(size_t)*h7d,(size_t)*p4d,(size_t)*p5d,(size_t)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h3,h1,h2,p5,p6,p4] -= t2sub[h7,p4,p5,h1] * v2sub[h3,h2,p6,h7] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_d1_8_kernel(size_t h1d,size_t h2d,size_t h3d,size_t h7d,size_t p4d,size_t p5d,size_t p6d,size_t h7ld_t2sub,size_t p4ld_t2sub,size_t p5ld_t2sub,size_t h1ld_t2sub,size_t h3ld_v2sub,size_t h2ld_v2sub,size_t p6ld_v2sub,size_t h7ld_v2sub,size_t h3ld_triplesx,size_t h1ld_triplesx,size_t h2ld_triplesx,size_t p5ld_triplesx,size_t p6ld_triplesx,size_t p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,size_t unused_idx, size_t total_x, size_t total_y) { size_t h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3; double a1,b1; double a2,b2; double a3,b3; double a4,b4; size_t in1_idxl,in2_idxl,h7l,h7T; __shared__ double t2sub_shm[4*T1][Tcomm]; __shared__ double v2sub_shm[Tcomm][4*T2]; size_t rest_x=blockIdx.x; size_t rest_y=blockIdx.y; size_t thread_x = T2*4 * rest_x + threadIdx.x; size_t thread_y = T1*4 * rest_y + threadIdx.y; in1_idxl=threadIdx.y; in2_idxl=threadIdx.x ; double tlocal1=0; double tlocal2=0; double tlocal3=0; double tlocal4=0; double tlocal5=0; double tlocal6=0; double tlocal7=0; double tlocal8=0; double tlocal9=0; double tlocal10=0; double tlocal11=0; double tlocal12=0; double tlocal13=0; double tlocal14=0; double tlocal15=0; double tlocal16=0; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0; h3_0=rest_x%h3d; rest_x=rest_x/h3d; h1_0=rest_y%h1d; rest_y=rest_y/h1d; h2_0=rest_x%h2d; rest_x=rest_x/h2d; p5_0=rest_y%p5d; rest_y=rest_y/p5d; p4_0=rest_y; p6_0=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1; h3_1=rest_x%h3d; rest_x=rest_x/h3d; h1_1=rest_y%h1d; rest_y=rest_y/h1d; h2_1=rest_x%h2d; rest_x=rest_x/h2d; p5_1=rest_y%p5d; rest_y=rest_y/p5d; p4_1=rest_y; p6_1=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2; h3_2=rest_x%h3d; rest_x=rest_x/h3d; h1_2=rest_y%h1d; rest_y=rest_y/h1d; h2_2=rest_x%h2d; rest_x=rest_x/h2d; p5_2=rest_y%p5d; rest_y=rest_y/p5d; p4_2=rest_y; p6_2=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3; h3_3=rest_x%h3d; rest_x=rest_x/h3d; h1_3=rest_y%h1d; rest_y=rest_y/h1d; h2_3=rest_x%h2d; rest_x=rest_x/h2d; p5_3=rest_y%p5d; rest_y=rest_y/p5d; p4_3=rest_y; p6_3=rest_x; size_t t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){size_t h7l_hi; h7l_hi = MIN(Tcomm+h7T,h7d)-h7T; t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub; v2sub_d_off=h3_0*h3ld_v2sub+h2_0*h2ld_v2sub+p6_0*p6ld_v2sub; if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub; v2sub_d_off=h3_1*h3ld_v2sub+h2_1*h2ld_v2sub+p6_1*p6ld_v2sub; if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub; v2sub_d_off=h3_2*h3ld_v2sub+h2_2*h2ld_v2sub+p6_2*p6ld_v2sub; if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub; v2sub_d_off=h3_3*h3ld_v2sub+h2_3*h2ld_v2sub+p6_3*p6ld_v2sub; if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } __syncthreads(); for(h7l=0;h7l<h7l_hi;++h7l){ a1=t2sub_shm[in1_idxl+T1*0][h7l]; a2=t2sub_shm[in1_idxl+T1*1][h7l]; a3=t2sub_shm[in1_idxl+T1*2][h7l]; a4=t2sub_shm[in1_idxl+T1*3][h7l]; b1=v2sub_shm[h7l][in2_idxl+T2*0]; b2=v2sub_shm[h7l][in2_idxl+T2*1]; b3=v2sub_shm[h7l][in2_idxl+T2*2]; b4=v2sub_shm[h7l][in2_idxl+T2*3]; tlocal1+=a1*b1; tlocal2+=a2*b1; tlocal3+=a3*b1; tlocal4+=a4*b1; tlocal5+=a1*b2; tlocal6+=a2*b2; tlocal7+=a3*b2; tlocal8+=a4*b2; tlocal9+=a1*b3; tlocal10+=a2*b3; tlocal11+=a3*b3; tlocal12+=a4*b3; tlocal13+=a1*b4; tlocal14+=a2*b4; tlocal15+=a3*b4; tlocal16+=a4*b4; } __syncthreads(); } if(thread_x+T1*0<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1; triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2; triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3; triplesx_d[h3_0*h3ld_triplesx+h1_3*h1ld_triplesx+h2_0*h2ld_triplesx+p5_3*p5ld_triplesx+p6_0*p6ld_triplesx+p4_3*p4ld_triplesx]-=tlocal4; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1; triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2; triplesx_d[h3_0*h3ld_triplesx+h1_2*h1ld_triplesx+h2_0*h2ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal3; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1; triplesx_d[h3_0*h3ld_triplesx+h1_1*h1ld_triplesx+h2_0*h2ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal2; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_0*h3ld_triplesx+h1_0*h1ld_triplesx+h2_0*h2ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal1; } } if(thread_x+T1*1<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5; triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6; triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7; triplesx_d[h3_1*h3ld_triplesx+h1_3*h1ld_triplesx+h2_1*h2ld_triplesx+p5_3*p5ld_triplesx+p6_1*p6ld_triplesx+p4_3*p4ld_triplesx]-=tlocal8; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5; triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6; triplesx_d[h3_1*h3ld_triplesx+h1_2*h1ld_triplesx+h2_1*h2ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal7; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5; triplesx_d[h3_1*h3ld_triplesx+h1_1*h1ld_triplesx+h2_1*h2ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal6; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_1*h3ld_triplesx+h1_0*h1ld_triplesx+h2_1*h2ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal5; } } if(thread_x+T1*2<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9; triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10; triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11; triplesx_d[h3_2*h3ld_triplesx+h1_3*h1ld_triplesx+h2_2*h2ld_triplesx+p5_3*p5ld_triplesx+p6_2*p6ld_triplesx+p4_3*p4ld_triplesx]-=tlocal12; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9; triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10; triplesx_d[h3_2*h3ld_triplesx+h1_2*h1ld_triplesx+h2_2*h2ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal11; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9; triplesx_d[h3_2*h3ld_triplesx+h1_1*h1ld_triplesx+h2_2*h2ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal10; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_2*h3ld_triplesx+h1_0*h1ld_triplesx+h2_2*h2ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal9; } } if(thread_x+T1*3<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13; triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14; triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15; triplesx_d[h3_3*h3ld_triplesx+h1_3*h1ld_triplesx+h2_3*h2ld_triplesx+p5_3*p5ld_triplesx+p6_3*p6ld_triplesx+p4_3*p4ld_triplesx]-=tlocal16; } else if(thread_y+T2*2<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13; triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14; triplesx_d[h3_3*h3ld_triplesx+h1_2*h1ld_triplesx+h2_3*h2ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]-=tlocal15; } else if(thread_y+T2*1<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13; triplesx_d[h3_3*h3ld_triplesx+h1_1*h1ld_triplesx+h2_3*h2ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]-=tlocal14; } else if(thread_y+T2*0<total_y) { triplesx_d[h3_3*h3ld_triplesx+h1_0*h1ld_triplesx+h2_3*h2ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]-=tlocal13; } } __syncthreads(); } extern "C" void sd_t_d1_8_cuda(size_t h1d, size_t h2d, size_t h3d, size_t h7d, size_t p4d, size_t p5d, size_t p6d, double *triplesx, double *t2sub, double *v2sub) { size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx; size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub; cudaStream_t *streams; size_t nstreams,i; double *t2sub_d,*v2sub_d; size_triplesx=h3d*h1d*h2d*p5d*p6d*p4d*sizeof(double); size_t2sub=h7d*p4d*p5d*h1d*sizeof(double); size_v2sub=h3d*h2d*p6d*h7d*sizeof(double); cudaFuncSetCacheConfig(sd_t_d1_8_kernel, cudaFuncCachePreferShared); nstreams=1; size_block_triplesx=size_triplesx/nstreams; size_el_block_triplesx=size_block_triplesx/sizeof(double); t2sub_d=(double*)getGpuMem(size_t2sub); v2sub_d=(double*)getGpuMem(size_v2sub); streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t)); assert(streams!= NULL); for(i=0;i<nstreams;++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])) ; } CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice)); h7ld_t2sub=1; p4ld_t2sub=h7d; p5ld_t2sub=p4d*h7d; h1ld_t2sub=p5d*p4d*h7d; h3ld_v2sub=1; h2ld_v2sub=h3d; p6ld_v2sub=h2d*h3d; h7ld_v2sub=p6d*h2d*h3d; h3ld_triplesx=1; h1ld_triplesx=h3d; h2ld_triplesx=h1d*h3d; p5ld_triplesx=h2d*h1d*h3d; p6ld_triplesx=p5d*h2d*h1d*h3d; p4ld_triplesx=p6d*p5d*h2d*h1d*h3d; size_t total_x = h3d*h2d*p6d*1; size_t total_y = p4d*p5d*h1d; dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1))); for(i=0;i<nstreams;++i){ sd_t_d1_8_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,h2ld_v2sub,p6ld_v2sub,h7ld_v2sub,h3ld_triplesx,h1ld_triplesx,h2ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y); CHECK_ERR("Kernel execution failed"); } cudaThreadSynchronize(); for(i=0;i<nstreams;++i){ cudaStreamDestroy(streams[i]);} freeGpuMem(t2sub_d); freeGpuMem(v2sub_d); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_d1_8_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_8_cuda((size_t)*h1d,(size_t)*h2d,(size_t)*h3d,(size_t)*h7d,(size_t)*p4d,(size_t)*p5d,(size_t)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *triplesx[h1,h3,p5,p6,p4] += t2sub[h7,p4,p5,h1] * v2sub[h3,p6,h7] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_d1_9_kernel(size_t h1d,size_t h3d,size_t h7d,size_t p4d,size_t p5d,size_t p6d,size_t h7ld_t2sub,size_t p4ld_t2sub,size_t p5ld_t2sub,size_t h1ld_t2sub,size_t h3ld_v2sub,size_t p6ld_v2sub,size_t h7ld_v2sub,size_t h1ld_triplesx,size_t h3ld_triplesx,size_t p5ld_triplesx,size_t p6ld_triplesx,size_t p4ld_triplesx,double *triplesx_d, double *t2sub_d, double *v2sub_d,size_t unused_idx, size_t total_x, size_t total_y) { size_t h1_0,h1_1,h1_2,h1_3,h3_0,h3_1,h3_2,h3_3,h7,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3; double a1,b1; double a2,b2; double a3,b3; double a4,b4; size_t in1_idxl,in2_idxl,h7l,h7T; __shared__ double t2sub_shm[4*T1][Tcomm]; __shared__ double v2sub_shm[Tcomm][4*T2]; size_t rest_x=blockIdx.x; size_t rest_y=blockIdx.y; size_t thread_x = T2*4 * rest_x + threadIdx.x; size_t thread_y = T1*4 * rest_y + threadIdx.y; in1_idxl=threadIdx.y; in2_idxl=threadIdx.x ; double tlocal1=0; double tlocal2=0; double tlocal3=0; double tlocal4=0; double tlocal5=0; double tlocal6=0; double tlocal7=0; double tlocal8=0; double tlocal9=0; double tlocal10=0; double tlocal11=0; double tlocal12=0; double tlocal13=0; double tlocal14=0; double tlocal15=0; double tlocal16=0; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0; h1_0=rest_y%h1d; rest_y=rest_y/h1d; h3_0=rest_x%h3d; rest_x=rest_x/h3d; p5_0=rest_y%p5d; rest_y=rest_y/p5d; p4_0=rest_y; p6_0=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1; h1_1=rest_y%h1d; rest_y=rest_y/h1d; h3_1=rest_x%h3d; rest_x=rest_x/h3d; p5_1=rest_y%p5d; rest_y=rest_y/p5d; p4_1=rest_y; p6_1=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2; h1_2=rest_y%h1d; rest_y=rest_y/h1d; h3_2=rest_x%h3d; rest_x=rest_x/h3d; p5_2=rest_y%p5d; rest_y=rest_y/p5d; p4_2=rest_y; p6_2=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3; h1_3=rest_y%h1d; rest_y=rest_y/h1d; h3_3=rest_x%h3d; rest_x=rest_x/h3d; p5_3=rest_y%p5d; rest_y=rest_y/p5d; p4_3=rest_y; p6_3=rest_x; size_t t2sub_d_off, v2sub_d_off;for(h7T=0;h7T<h7d;h7T+=Tcomm){size_t h7l_hi; h7l_hi = MIN(Tcomm+h7T,h7d)-h7T; t2sub_d_off=p4_0*p4ld_t2sub+p5_0*p5ld_t2sub+h1_0*h1ld_t2sub; v2sub_d_off=h3_0*h3ld_v2sub+p6_0*p6ld_v2sub; if(thread_y+T1*0<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*0][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*0<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*0] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_1*p4ld_t2sub+p5_1*p5ld_t2sub+h1_1*h1ld_t2sub; v2sub_d_off=h3_1*h3ld_v2sub+p6_1*p6ld_v2sub; if(thread_y+T1*1<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*1][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*1<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*1] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_2*p4ld_t2sub+p5_2*p5ld_t2sub+h1_2*h1ld_t2sub; v2sub_d_off=h3_2*h3ld_v2sub+p6_2*p6ld_v2sub; if(thread_y+T1*2<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*2][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*2<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*2] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } t2sub_d_off=p4_3*p4ld_t2sub+p5_3*p5ld_t2sub+h1_3*h1ld_t2sub; v2sub_d_off=h3_3*h3ld_v2sub+p6_3*p6ld_v2sub; if(thread_y+T1*3<total_y)for(h7l=threadIdx.x;h7l<h7l_hi;h7l+=blockDim.x){ h7=h7l+h7T; t2sub_shm[in1_idxl+T1*3][h7l] = t2sub_d[t2sub_d_off+h7*h7ld_t2sub]; } if(thread_x+T1*3<total_x)for(h7l=threadIdx.y;h7l<h7l_hi;h7l+=blockDim.y){ h7=h7l+h7T; v2sub_shm[h7l][in2_idxl+T1*3] = v2sub_d[v2sub_d_off+h7*h7ld_v2sub]; } __syncthreads(); for(h7l=0;h7l<h7l_hi;++h7l){ a1=t2sub_shm[in1_idxl+T1*0][h7l]; a2=t2sub_shm[in1_idxl+T1*1][h7l]; a3=t2sub_shm[in1_idxl+T1*2][h7l]; a4=t2sub_shm[in1_idxl+T1*3][h7l]; b1=v2sub_shm[h7l][in2_idxl+T2*0]; b2=v2sub_shm[h7l][in2_idxl+T2*1]; b3=v2sub_shm[h7l][in2_idxl+T2*2]; b4=v2sub_shm[h7l][in2_idxl+T2*3]; tlocal1+=a1*b1; tlocal2+=a2*b1; tlocal3+=a3*b1; tlocal4+=a4*b1; tlocal5+=a1*b2; tlocal6+=a2*b2; tlocal7+=a3*b2; tlocal8+=a4*b2; tlocal9+=a1*b3; tlocal10+=a2*b3; tlocal11+=a3*b3; tlocal12+=a4*b3; tlocal13+=a1*b4; tlocal14+=a2*b4; tlocal15+=a3*b4; tlocal16+=a4*b4; } __syncthreads(); } if(thread_x+T1*0<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1; triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2; triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3; triplesx_d[h1_3*h1ld_triplesx+h3_0*h3ld_triplesx+p5_3*p5ld_triplesx+p6_0*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal4; } else if(thread_y+T2*2<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1; triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2; triplesx_d[h1_2*h1ld_triplesx+h3_0*h3ld_triplesx+p5_2*p5ld_triplesx+p6_0*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal3; } else if(thread_y+T2*1<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1; triplesx_d[h1_1*h1ld_triplesx+h3_0*h3ld_triplesx+p5_1*p5ld_triplesx+p6_0*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal2; } else if(thread_y+T2*0<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_0*h3ld_triplesx+p5_0*p5ld_triplesx+p6_0*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal1; } } if(thread_x+T1*1<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5; triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6; triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7; triplesx_d[h1_3*h1ld_triplesx+h3_1*h3ld_triplesx+p5_3*p5ld_triplesx+p6_1*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal8; } else if(thread_y+T2*2<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5; triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6; triplesx_d[h1_2*h1ld_triplesx+h3_1*h3ld_triplesx+p5_2*p5ld_triplesx+p6_1*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal7; } else if(thread_y+T2*1<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5; triplesx_d[h1_1*h1ld_triplesx+h3_1*h3ld_triplesx+p5_1*p5ld_triplesx+p6_1*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal6; } else if(thread_y+T2*0<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_1*h3ld_triplesx+p5_0*p5ld_triplesx+p6_1*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal5; } } if(thread_x+T1*2<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9; triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10; triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11; triplesx_d[h1_3*h1ld_triplesx+h3_2*h3ld_triplesx+p5_3*p5ld_triplesx+p6_2*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal12; } else if(thread_y+T2*2<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9; triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10; triplesx_d[h1_2*h1ld_triplesx+h3_2*h3ld_triplesx+p5_2*p5ld_triplesx+p6_2*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal11; } else if(thread_y+T2*1<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9; triplesx_d[h1_1*h1ld_triplesx+h3_2*h3ld_triplesx+p5_1*p5ld_triplesx+p6_2*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal10; } else if(thread_y+T2*0<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_2*h3ld_triplesx+p5_0*p5ld_triplesx+p6_2*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal9; } } if(thread_x+T1*3<total_x){ if(thread_y+T2*3<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13; triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14; triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15; triplesx_d[h1_3*h1ld_triplesx+h3_3*h3ld_triplesx+p5_3*p5ld_triplesx+p6_3*p6ld_triplesx+p4_3*p4ld_triplesx]+=tlocal16; } else if(thread_y+T2*2<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13; triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14; triplesx_d[h1_2*h1ld_triplesx+h3_3*h3ld_triplesx+p5_2*p5ld_triplesx+p6_3*p6ld_triplesx+p4_2*p4ld_triplesx]+=tlocal15; } else if(thread_y+T2*1<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13; triplesx_d[h1_1*h1ld_triplesx+h3_3*h3ld_triplesx+p5_1*p5ld_triplesx+p6_3*p6ld_triplesx+p4_1*p4ld_triplesx]+=tlocal14; } else if(thread_y+T2*0<total_y) { triplesx_d[h1_0*h1ld_triplesx+h3_3*h3ld_triplesx+p5_0*p5ld_triplesx+p6_3*p6ld_triplesx+p4_0*p4ld_triplesx]+=tlocal13; } } __syncthreads(); } extern "C" void sd_t_d1_9_cuda(size_t h1d, size_t h2d, size_t h3d, size_t h7d, size_t p4d, size_t p5d, size_t p6d, double *triplesx, double *t2sub, double *v2sub) { h3d=h3d*h2d; size_t h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx; size_t size_triplesx,size_block_triplesx,size_el_block_triplesx,size_t2sub,size_v2sub; cudaStream_t *streams; size_t nstreams,i; double *t2sub_d,*v2sub_d; size_triplesx=h1d*h3d*p5d*p6d*p4d*sizeof(double); size_t2sub=h7d*p4d*p5d*h1d*sizeof(double); size_v2sub=h3d*p6d*h7d*sizeof(double); cudaFuncSetCacheConfig(sd_t_d1_9_kernel, cudaFuncCachePreferShared); nstreams=1; size_block_triplesx=size_triplesx/nstreams; size_el_block_triplesx=size_block_triplesx/sizeof(double); t2sub_d=(double*)getGpuMem(size_t2sub); v2sub_d=(double*)getGpuMem(size_v2sub); streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t)); assert(streams!= NULL); for(i=0;i<nstreams;++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])) ; } CUDA_SAFE(cudaMemcpy(t2sub_d,t2sub,size_t2sub,cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2sub_d,v2sub,size_v2sub,cudaMemcpyHostToDevice)); h7ld_t2sub=1; p4ld_t2sub=h7d; p5ld_t2sub=p4d*h7d; h1ld_t2sub=p5d*p4d*h7d; h3ld_v2sub=1; p6ld_v2sub=h3d; h7ld_v2sub=p6d*h3d; h1ld_triplesx=1; h3ld_triplesx=h1d; p5ld_triplesx=h3d*h1d; p6ld_triplesx=p5d*h3d*h1d; p4ld_triplesx=p6d*p5d*h3d*h1d; size_t total_x = h3d*p6d*1; size_t total_y = p4d*p5d*h1d; dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1))); for(i=0;i<nstreams;++i){ sd_t_d1_9_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h3d,h7d,p4d,p5d,p6d,h7ld_t2sub,p4ld_t2sub,p5ld_t2sub,h1ld_t2sub,h3ld_v2sub,p6ld_v2sub,h7ld_v2sub,h1ld_triplesx,h3ld_triplesx,p5ld_triplesx,p6ld_triplesx,p4ld_triplesx,t3_d,t2sub_d,v2sub_d,i,total_x,total_y); CHECK_ERR("Kernel execution failed"); } cudaThreadSynchronize(); for(i=0;i<nstreams;++i){ cudaStreamDestroy(streams[i]);} freeGpuMem(t2sub_d); freeGpuMem(v2sub_d); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_d1_9_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* h7d, Integer* p4d, Integer* p5d, Integer* p6d, double *triplesx, double *t2sub, double *v2sub) { sd_t_d1_9_cuda((size_t)*h1d,(size_t)*h2d,(size_t)*h3d,(size_t)*h7d,(size_t)*p4d,(size_t)*p5d,(size_t)*p6d,triplesx,t2sub,v2sub); } /*----------------------------------------------------------------------* *t3[h3,h2,h1,p6,p4] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_d2_1_kernel(size_t h1d,size_t h2d,size_t h3d,size_t p4d,size_t p6d,size_t p7d,size_t p7ld_t2,size_t p4ld_t2,size_t h1ld_t2,size_t h2ld_t2,size_t p7ld_v2,size_t h3ld_v2,size_t p6ld_v2,size_t h3ld_t3,size_t h2ld_t3,size_t h1ld_t3,size_t p6ld_t3,size_t p4ld_t3,double *t3d, double *t2_d, double *v2_d,size_t unused_idx, size_t total_x, size_t total_y) { size_t h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p6_0,p6_1,p6_2,p6_3,p7; double a1,b1; double a2,b2; double a3,b3; double a4,b4; size_t in1_idxl,in2_idxl,p7l,p7T; __shared__ double t2_shm[4*T1][Tcomm]; __shared__ double v2_shm[Tcomm][4*T2]; size_t rest_x=blockIdx.x; size_t rest_y=blockIdx.y; size_t thread_x = T2*4 * rest_x + threadIdx.x; size_t thread_y = T1*4 * rest_y + threadIdx.y; in1_idxl=threadIdx.y; in2_idxl=threadIdx.x ; double tlocal1=0; double tlocal2=0; double tlocal3=0; double tlocal4=0; double tlocal5=0; double tlocal6=0; double tlocal7=0; double tlocal8=0; double tlocal9=0; double tlocal10=0; double tlocal11=0; double tlocal12=0; double tlocal13=0; double tlocal14=0; double tlocal15=0; double tlocal16=0; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0; h3_0=rest_x%h3d; rest_x=rest_x/h3d; h2_0=rest_y%h2d; rest_y=rest_y/h2d; h1_0=rest_y%h1d; rest_y=rest_y/h1d; p4_0=rest_y; p6_0=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1; h3_1=rest_x%h3d; rest_x=rest_x/h3d; h2_1=rest_y%h2d; rest_y=rest_y/h2d; h1_1=rest_y%h1d; rest_y=rest_y/h1d; p4_1=rest_y; p6_1=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2; h3_2=rest_x%h3d; rest_x=rest_x/h3d; h2_2=rest_y%h2d; rest_y=rest_y/h2d; h1_2=rest_y%h1d; rest_y=rest_y/h1d; p4_2=rest_y; p6_2=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3; h3_3=rest_x%h3d; rest_x=rest_x/h3d; h2_3=rest_y%h2d; rest_y=rest_y/h2d; h1_3=rest_y%h1d; rest_y=rest_y/h1d; p4_3=rest_y; p6_3=rest_x; size_t t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){size_t p7l_hi; p7l_hi = MIN(Tcomm+p7T,p7d)-p7T; t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2; v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2; if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2; v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2; if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2; v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2; if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2; v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2; if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2]; } __syncthreads(); for(p7l=0;p7l<p7l_hi;++p7l){ a1=t2_shm[in1_idxl+T1*0][p7l]; a2=t2_shm[in1_idxl+T1*1][p7l]; a3=t2_shm[in1_idxl+T1*2][p7l]; a4=t2_shm[in1_idxl+T1*3][p7l]; b1=v2_shm[p7l][in2_idxl+T2*0]; b2=v2_shm[p7l][in2_idxl+T2*1]; b3=v2_shm[p7l][in2_idxl+T2*2]; b4=v2_shm[p7l][in2_idxl+T2*3]; tlocal1+=a1*b1; tlocal2+=a2*b1; tlocal3+=a3*b1; tlocal4+=a4*b1; tlocal5+=a1*b2; tlocal6+=a2*b2; tlocal7+=a3*b2; tlocal8+=a4*b2; tlocal9+=a1*b3; tlocal10+=a2*b3; tlocal11+=a3*b3; tlocal12+=a4*b3; tlocal13+=a1*b4; tlocal14+=a2*b4; tlocal15+=a3*b4; tlocal16+=a4*b4; } __syncthreads(); } if(thread_x+T1*0<total_x){ if(thread_y+T2*3<total_y) { t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]-=tlocal1; t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]-=tlocal2; t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3]-=tlocal3; t3d[h3_0*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_0*p6ld_t3+p4_3*p4ld_t3]-=tlocal4; } else if(thread_y+T2*2<total_y) { t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]-=tlocal1; t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]-=tlocal2; t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3]-=tlocal3; } else if(thread_y+T2*1<total_y) { t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]-=tlocal1; t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]-=tlocal2; } else if(thread_y+T2*0<total_y) { t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]-=tlocal1; } } if(thread_x+T1*1<total_x){ if(thread_y+T2*3<total_y) { t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]-=tlocal5; t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]-=tlocal6; t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3]-=tlocal7; t3d[h3_1*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_1*p6ld_t3+p4_3*p4ld_t3]-=tlocal8; } else if(thread_y+T2*2<total_y) { t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]-=tlocal5; t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]-=tlocal6; t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3]-=tlocal7; } else if(thread_y+T2*1<total_y) { t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]-=tlocal5; t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]-=tlocal6; } else if(thread_y+T2*0<total_y) { t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]-=tlocal5; } } if(thread_x+T1*2<total_x){ if(thread_y+T2*3<total_y) { t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]-=tlocal9; t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]-=tlocal10; t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3]-=tlocal11; t3d[h3_2*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_2*p6ld_t3+p4_3*p4ld_t3]-=tlocal12; } else if(thread_y+T2*2<total_y) { t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]-=tlocal9; t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]-=tlocal10; t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3]-=tlocal11; } else if(thread_y+T2*1<total_y) { t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]-=tlocal9; t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]-=tlocal10; } else if(thread_y+T2*0<total_y) { t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]-=tlocal9; } } if(thread_x+T1*3<total_x){ if(thread_y+T2*3<total_y) { t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]-=tlocal13; t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]-=tlocal14; t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3]-=tlocal15; t3d[h3_3*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_3*p6ld_t3+p4_3*p4ld_t3]-=tlocal16; } else if(thread_y+T2*2<total_y) { t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]-=tlocal13; t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]-=tlocal14; t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3]-=tlocal15; } else if(thread_y+T2*1<total_y) { t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]-=tlocal13; t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]-=tlocal14; } else if(thread_y+T2*0<total_y) { t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]-=tlocal13; } } __syncthreads(); } extern "C" void sd_t_d2_1_cuda(size_t h1d, size_t h2d, size_t h3d, size_t p4d, size_t p5d, size_t p6d, size_t p7d, double *t3, double *t2, double *v2) { p6d=p6d*p5d; size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3; size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2; cudaStream_t *streams; size_t nstreams,i; double *t2_d,*v2_d; size_t3=h3d*h2d*h1d*p6d*p4d*sizeof(double); size_t2=p7d*p4d*h1d*h2d*sizeof(double); size_v2=p7d*h3d*p6d*sizeof(double); cudaFuncSetCacheConfig(sd_t_d2_1_kernel, cudaFuncCachePreferShared); nstreams=1; size_block_t3=size_t3/nstreams; size_el_block_t3=size_block_t3/sizeof(double); //t3d=(double*)getGpuMem(size_t3); t2_d=(double*)getGpuMem(size_t2); v2_d=(double*)getGpuMem(size_v2); streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t)); assert(streams!= NULL); for(i=0;i<nstreams;++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])) ; } CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice)); p7ld_t2=1; p4ld_t2=p7d; h1ld_t2=p4d*p7d; h2ld_t2=h1d*p4d*p7d; p7ld_v2=1; h3ld_v2=p7d; p6ld_v2=h3d*p7d; h3ld_t3=1; h2ld_t3=h3d; h1ld_t3=h2d*h3d; p6ld_t3=h1d*h2d*h3d; p4ld_t3=p6d*h1d*h2d*h3d; size_t total_x = h3d*p6d; size_t total_y = p4d*h1d*h2d; dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1))); for(i=0;i<nstreams;++i){ sd_t_d2_1_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y); CHECK_ERR("Kernel execution failed"); } cudaThreadSynchronize(); for(i=0;i<nstreams;++i){ cudaStreamDestroy(streams[i]);} //freeGpuMem(t3d); freeGpuMem(t2_d); freeGpuMem(v2_d); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_d2_1_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_1_cuda((size_t)*h1d,(size_t)*h2d,(size_t)*h3d,(size_t)*p4d,(size_t)*p5d,(size_t)*p6d,(size_t)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h2,h1,h3,p4] -= t2[p7,p4,h1,h2] * v2[p7,h3] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_d2_2_kernel(size_t h1d,size_t h2d,size_t h3d,size_t p4d,size_t p7d,size_t p7ld_t2,size_t p4ld_t2,size_t h1ld_t2,size_t h2ld_t2,size_t p7ld_v2,size_t h3ld_v2,size_t h2ld_t3,size_t h1ld_t3,size_t h3ld_t3,size_t p4ld_t3,double *t3d, double *t2_d, double *v2_d,size_t unused_idx, size_t total_x, size_t total_y) { size_t h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p7; double a1,b1; double a2,b2; double a3,b3; double a4,b4; size_t in1_idxl,in2_idxl,p7l,p7T; __shared__ double t2_shm[4*T1][Tcomm]; __shared__ double v2_shm[Tcomm][4*T2]; size_t rest_x=blockIdx.x; size_t rest_y=blockIdx.y; size_t thread_x = T2*4 * rest_x + threadIdx.x; size_t thread_y = T1*4 * rest_y + threadIdx.y; in1_idxl=threadIdx.y; in2_idxl=threadIdx.x ; double tlocal1=0; double tlocal2=0; double tlocal3=0; double tlocal4=0; double tlocal5=0; double tlocal6=0; double tlocal7=0; double tlocal8=0; double tlocal9=0; double tlocal10=0; double tlocal11=0; double tlocal12=0; double tlocal13=0; double tlocal14=0; double tlocal15=0; double tlocal16=0; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0; h2_0=rest_y%h2d; rest_y=rest_y/h2d; h1_0=rest_y%h1d; rest_y=rest_y/h1d; p4_0=rest_y; h3_0=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1; h2_1=rest_y%h2d; rest_y=rest_y/h2d; h1_1=rest_y%h1d; rest_y=rest_y/h1d; p4_1=rest_y; h3_1=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2; h2_2=rest_y%h2d; rest_y=rest_y/h2d; h1_2=rest_y%h1d; rest_y=rest_y/h1d; p4_2=rest_y; h3_2=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3; h2_3=rest_y%h2d; rest_y=rest_y/h2d; h1_3=rest_y%h1d; rest_y=rest_y/h1d; p4_3=rest_y; h3_3=rest_x; size_t t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){size_t p7l_hi; p7l_hi = MIN(Tcomm+p7T,p7d)-p7T; t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2; v2_d_off=h3_0*h3ld_v2; if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2; v2_d_off=h3_1*h3ld_v2; if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2; v2_d_off=h3_2*h3ld_v2; if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2; v2_d_off=h3_3*h3ld_v2; if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2]; } __syncthreads(); for(p7l=0;p7l<p7l_hi;++p7l){ a1=t2_shm[in1_idxl+T1*0][p7l]; a2=t2_shm[in1_idxl+T1*1][p7l]; a3=t2_shm[in1_idxl+T1*2][p7l]; a4=t2_shm[in1_idxl+T1*3][p7l]; b1=v2_shm[p7l][in2_idxl+T2*0]; b2=v2_shm[p7l][in2_idxl+T2*1]; b3=v2_shm[p7l][in2_idxl+T2*2]; b4=v2_shm[p7l][in2_idxl+T2*3]; tlocal1+=a1*b1; tlocal2+=a2*b1; tlocal3+=a3*b1; tlocal4+=a4*b1; tlocal5+=a1*b2; tlocal6+=a2*b2; tlocal7+=a3*b2; tlocal8+=a4*b2; tlocal9+=a1*b3; tlocal10+=a2*b3; tlocal11+=a3*b3; tlocal12+=a4*b3; tlocal13+=a1*b4; tlocal14+=a2*b4; tlocal15+=a3*b4; tlocal16+=a4*b4; } __syncthreads(); } if(thread_x+T1*0<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3]-=tlocal1; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3]-=tlocal2; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3]-=tlocal3; t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_0*h3ld_t3+p4_3*p4ld_t3]-=tlocal4; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3]-=tlocal1; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3]-=tlocal2; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3]-=tlocal3; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3]-=tlocal1; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3]-=tlocal2; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3]-=tlocal1; } } if(thread_x+T1*1<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3]-=tlocal5; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3]-=tlocal6; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3]-=tlocal7; t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_1*h3ld_t3+p4_3*p4ld_t3]-=tlocal8; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3]-=tlocal5; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3]-=tlocal6; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3]-=tlocal7; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3]-=tlocal5; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3]-=tlocal6; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3]-=tlocal5; } } if(thread_x+T1*2<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3]-=tlocal9; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3]-=tlocal10; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3]-=tlocal11; t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_2*h3ld_t3+p4_3*p4ld_t3]-=tlocal12; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3]-=tlocal9; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3]-=tlocal10; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3]-=tlocal11; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3]-=tlocal9; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3]-=tlocal10; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3]-=tlocal9; } } if(thread_x+T1*3<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3]-=tlocal13; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3]-=tlocal14; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3]-=tlocal15; t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_3*h3ld_t3+p4_3*p4ld_t3]-=tlocal16; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3]-=tlocal13; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3]-=tlocal14; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3]-=tlocal15; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3]-=tlocal13; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3]-=tlocal14; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3]-=tlocal13; } } __syncthreads(); } extern "C" void sd_t_d2_2_cuda(size_t h1d, size_t h2d, size_t h3d, size_t p4d, size_t p5d, size_t p6d, size_t p7d, double *t3, double *t2, double *v2) { h3d=h3d*p6d; h3d=h3d*p5d; size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3; size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2; cudaStream_t *streams; size_t nstreams,i; double *t2_d,*v2_d; size_t3=h2d*h1d*h3d*p4d*sizeof(double); size_t2=p7d*p4d*h1d*h2d*sizeof(double); size_v2=p7d*h3d*sizeof(double); cudaFuncSetCacheConfig(sd_t_d2_2_kernel, cudaFuncCachePreferShared); nstreams=1; size_block_t3=size_t3/nstreams; size_el_block_t3=size_block_t3/sizeof(double); //t3d=(double*)getGpuMem(size_t3); t2_d=(double*)getGpuMem(size_t2); v2_d=(double*)getGpuMem(size_v2); streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t)); assert(streams!= NULL); for(i=0;i<nstreams;++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])) ; } CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice)); p7ld_t2=1; p4ld_t2=p7d; h1ld_t2=p4d*p7d; h2ld_t2=h1d*p4d*p7d; p7ld_v2=1; h3ld_v2=p7d; h2ld_t3=1; h1ld_t3=h2d; h3ld_t3=h1d*h2d; p4ld_t3=h3d*h1d*h2d; size_t total_x = h3d; size_t total_y = p4d*h1d*h2d; dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1))); for(i=0;i<nstreams;++i){ sd_t_d2_2_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y); CHECK_ERR("Kernel execution failed"); } cudaThreadSynchronize(); for(i=0;i<nstreams;++i){ cudaStreamDestroy(streams[i]);} //freeGpuMem(t3d); freeGpuMem(t2_d); freeGpuMem(v2_d); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_d2_2_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_2_cuda((size_t)*h1d,(size_t)*h2d,(size_t)*h3d,(size_t)*p4d,(size_t)*p5d,(size_t)*p6d,(size_t)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h2,h3,h1,p6,p4] += t2[p7,p4,h1,h2] * v2[p7,h3,p6] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_d2_3_kernel(size_t h1d,size_t h2d,size_t h3d,size_t p4d,size_t p6d,size_t p7d,size_t p7ld_t2,size_t p4ld_t2,size_t h1ld_t2,size_t h2ld_t2,size_t p7ld_v2,size_t h3ld_v2,size_t p6ld_v2,size_t h2ld_t3,size_t h3ld_t3,size_t h1ld_t3,size_t p6ld_t3,size_t p4ld_t3,double *t3d, double *t2_d, double *v2_d,size_t unused_idx, size_t total_x, size_t total_y) { size_t h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p6_0,p6_1,p6_2,p6_3,p7; double a1,b1; double a2,b2; double a3,b3; double a4,b4; size_t in1_idxl,in2_idxl,p7l,p7T; __shared__ double t2_shm[4*T1][Tcomm]; __shared__ double v2_shm[Tcomm][4*T2]; size_t rest_x=blockIdx.x; size_t rest_y=blockIdx.y; size_t thread_x = T2*4 * rest_x + threadIdx.x; size_t thread_y = T1*4 * rest_y + threadIdx.y; in1_idxl=threadIdx.y; in2_idxl=threadIdx.x ; double tlocal1=0; double tlocal2=0; double tlocal3=0; double tlocal4=0; double tlocal5=0; double tlocal6=0; double tlocal7=0; double tlocal8=0; double tlocal9=0; double tlocal10=0; double tlocal11=0; double tlocal12=0; double tlocal13=0; double tlocal14=0; double tlocal15=0; double tlocal16=0; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0; h2_0=rest_y%h2d; rest_y=rest_y/h2d; h3_0=rest_x%h3d; rest_x=rest_x/h3d; h1_0=rest_y%h1d; rest_y=rest_y/h1d; p4_0=rest_y; p6_0=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1; h2_1=rest_y%h2d; rest_y=rest_y/h2d; h3_1=rest_x%h3d; rest_x=rest_x/h3d; h1_1=rest_y%h1d; rest_y=rest_y/h1d; p4_1=rest_y; p6_1=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2; h2_2=rest_y%h2d; rest_y=rest_y/h2d; h3_2=rest_x%h3d; rest_x=rest_x/h3d; h1_2=rest_y%h1d; rest_y=rest_y/h1d; p4_2=rest_y; p6_2=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3; h2_3=rest_y%h2d; rest_y=rest_y/h2d; h3_3=rest_x%h3d; rest_x=rest_x/h3d; h1_3=rest_y%h1d; rest_y=rest_y/h1d; p4_3=rest_y; p6_3=rest_x; size_t t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){size_t p7l_hi; p7l_hi = MIN(Tcomm+p7T,p7d)-p7T; t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2; v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2; if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2; v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2; if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2; v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2; if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2; v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2; if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2]; } __syncthreads(); for(p7l=0;p7l<p7l_hi;++p7l){ a1=t2_shm[in1_idxl+T1*0][p7l]; a2=t2_shm[in1_idxl+T1*1][p7l]; a3=t2_shm[in1_idxl+T1*2][p7l]; a4=t2_shm[in1_idxl+T1*3][p7l]; b1=v2_shm[p7l][in2_idxl+T2*0]; b2=v2_shm[p7l][in2_idxl+T2*1]; b3=v2_shm[p7l][in2_idxl+T2*2]; b4=v2_shm[p7l][in2_idxl+T2*3]; tlocal1+=a1*b1; tlocal2+=a2*b1; tlocal3+=a3*b1; tlocal4+=a4*b1; tlocal5+=a1*b2; tlocal6+=a2*b2; tlocal7+=a3*b2; tlocal8+=a4*b2; tlocal9+=a1*b3; tlocal10+=a2*b3; tlocal11+=a3*b3; tlocal12+=a4*b3; tlocal13+=a1*b4; tlocal14+=a2*b4; tlocal15+=a3*b4; tlocal16+=a4*b4; } __syncthreads(); } if(thread_x+T1*0<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]+=tlocal1; t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]+=tlocal2; t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3]+=tlocal3; t3d[h2_3*h2ld_t3+h3_0*h3ld_t3+h1_3*h1ld_t3+p6_0*p6ld_t3+p4_3*p4ld_t3]+=tlocal4; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]+=tlocal1; t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]+=tlocal2; t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3]+=tlocal3; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]+=tlocal1; t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3]+=tlocal2; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3]+=tlocal1; } } if(thread_x+T1*1<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]+=tlocal5; t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]+=tlocal6; t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3]+=tlocal7; t3d[h2_3*h2ld_t3+h3_1*h3ld_t3+h1_3*h1ld_t3+p6_1*p6ld_t3+p4_3*p4ld_t3]+=tlocal8; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]+=tlocal5; t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]+=tlocal6; t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3]+=tlocal7; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]+=tlocal5; t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3]+=tlocal6; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3]+=tlocal5; } } if(thread_x+T1*2<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]+=tlocal9; t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]+=tlocal10; t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3]+=tlocal11; t3d[h2_3*h2ld_t3+h3_2*h3ld_t3+h1_3*h1ld_t3+p6_2*p6ld_t3+p4_3*p4ld_t3]+=tlocal12; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]+=tlocal9; t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]+=tlocal10; t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3]+=tlocal11; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]+=tlocal9; t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3]+=tlocal10; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3]+=tlocal9; } } if(thread_x+T1*3<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]+=tlocal13; t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]+=tlocal14; t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3]+=tlocal15; t3d[h2_3*h2ld_t3+h3_3*h3ld_t3+h1_3*h1ld_t3+p6_3*p6ld_t3+p4_3*p4ld_t3]+=tlocal16; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]+=tlocal13; t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]+=tlocal14; t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3]+=tlocal15; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]+=tlocal13; t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3]+=tlocal14; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3]+=tlocal13; } } __syncthreads(); } extern "C" void sd_t_d2_3_cuda(size_t h1d, size_t h2d, size_t h3d, size_t p4d, size_t p5d, size_t p6d, size_t p7d, double *t3, double *t2, double *v2) { p6d=p6d*p5d; size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p6ld_t3,p4ld_t3; size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2; cudaStream_t *streams; size_t nstreams,i; double *t2_d,*v2_d; size_t3=h2d*h3d*h1d*p6d*p4d*sizeof(double); size_t2=p7d*p4d*h1d*h2d*sizeof(double); size_v2=p7d*h3d*p6d*sizeof(double); cudaFuncSetCacheConfig(sd_t_d2_3_kernel, cudaFuncCachePreferShared); nstreams=1; size_block_t3=size_t3/nstreams; size_el_block_t3=size_block_t3/sizeof(double); //t3d=(double*)getGpuMem(size_t3); t2_d=(double*)getGpuMem(size_t2); v2_d=(double*)getGpuMem(size_v2); streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t)); assert(streams!= NULL); for(i=0;i<nstreams;++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])) ; } CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice)); p7ld_t2=1; p4ld_t2=p7d; h1ld_t2=p4d*p7d; h2ld_t2=h1d*p4d*p7d; p7ld_v2=1; h3ld_v2=p7d; p6ld_v2=h3d*p7d; h2ld_t3=1; h3ld_t3=h2d; h1ld_t3=h3d*h2d; p6ld_t3=h1d*h3d*h2d; p4ld_t3=p6d*h1d*h3d*h2d; size_t total_x = h3d*p6d; size_t total_y = p4d*h1d*h2d; dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1))); for(i=0;i<nstreams;++i){ sd_t_d2_3_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y); CHECK_ERR("Kernel execution failed"); } cudaThreadSynchronize(); for(i=0;i<nstreams;++i){ cudaStreamDestroy(streams[i]);} // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_d2_3_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_3_cuda((size_t)*h1d,(size_t)*h2d,(size_t)*h3d,(size_t)*p4d,(size_t)*p5d,(size_t)*p6d,(size_t)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h3,h2,h1,p6,p4,p5] += t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_d2_4_kernel(size_t h1d,size_t h2d,size_t h3d,size_t p4d,size_t p5d,size_t p6d,size_t p7d,size_t p7ld_t2,size_t p4ld_t2,size_t h1ld_t2,size_t h2ld_t2,size_t p7ld_v2,size_t h3ld_v2,size_t p6ld_v2,size_t p5ld_v2,size_t h3ld_t3,size_t h2ld_t3,size_t h1ld_t3,size_t p6ld_t3,size_t p4ld_t3,size_t p5ld_t3,double *t3d, double *t2_d, double *v2_d,size_t unused_idx, size_t total_x, size_t total_y) { size_t h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7; double a1,b1; double a2,b2; double a3,b3; double a4,b4; size_t in1_idxl,in2_idxl,p7l,p7T; __shared__ double t2_shm[4*T1][Tcomm]; __shared__ double v2_shm[Tcomm][4*T2]; size_t rest_x=blockIdx.x; size_t rest_y=blockIdx.y; size_t thread_x = T2*4 * rest_x + threadIdx.x; size_t thread_y = T1*4 * rest_y + threadIdx.y; in1_idxl=threadIdx.y; in2_idxl=threadIdx.x ; double tlocal1=0; double tlocal2=0; double tlocal3=0; double tlocal4=0; double tlocal5=0; double tlocal6=0; double tlocal7=0; double tlocal8=0; double tlocal9=0; double tlocal10=0; double tlocal11=0; double tlocal12=0; double tlocal13=0; double tlocal14=0; double tlocal15=0; double tlocal16=0; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0; h3_0=rest_x%h3d; rest_x=rest_x/h3d; h2_0=rest_y%h2d; rest_y=rest_y/h2d; h1_0=rest_y%h1d; rest_y=rest_y/h1d; p6_0=rest_x%p6d; rest_x=rest_x/p6d; p4_0=rest_y; p5_0=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1; h3_1=rest_x%h3d; rest_x=rest_x/h3d; h2_1=rest_y%h2d; rest_y=rest_y/h2d; h1_1=rest_y%h1d; rest_y=rest_y/h1d; p6_1=rest_x%p6d; rest_x=rest_x/p6d; p4_1=rest_y; p5_1=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2; h3_2=rest_x%h3d; rest_x=rest_x/h3d; h2_2=rest_y%h2d; rest_y=rest_y/h2d; h1_2=rest_y%h1d; rest_y=rest_y/h1d; p6_2=rest_x%p6d; rest_x=rest_x/p6d; p4_2=rest_y; p5_2=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3; h3_3=rest_x%h3d; rest_x=rest_x/h3d; h2_3=rest_y%h2d; rest_y=rest_y/h2d; h1_3=rest_y%h1d; rest_y=rest_y/h1d; p6_3=rest_x%p6d; rest_x=rest_x/p6d; p4_3=rest_y; p5_3=rest_x; size_t t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){size_t p7l_hi; p7l_hi = MIN(Tcomm+p7T,p7d)-p7T; t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2; v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2; if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2; v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2; if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2; v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2; if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2; v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2; if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2]; } __syncthreads(); for(p7l=0;p7l<p7l_hi;++p7l){ a1=t2_shm[in1_idxl+T1*0][p7l]; a2=t2_shm[in1_idxl+T1*1][p7l]; a3=t2_shm[in1_idxl+T1*2][p7l]; a4=t2_shm[in1_idxl+T1*3][p7l]; b1=v2_shm[p7l][in2_idxl+T2*0]; b2=v2_shm[p7l][in2_idxl+T2*1]; b3=v2_shm[p7l][in2_idxl+T2*2]; b4=v2_shm[p7l][in2_idxl+T2*3]; tlocal1+=a1*b1; tlocal2+=a2*b1; tlocal3+=a3*b1; tlocal4+=a4*b1; tlocal5+=a1*b2; tlocal6+=a2*b2; tlocal7+=a3*b2; tlocal8+=a4*b2; tlocal9+=a1*b3; tlocal10+=a2*b3; tlocal11+=a3*b3; tlocal12+=a4*b3; tlocal13+=a1*b4; tlocal14+=a2*b4; tlocal15+=a3*b4; tlocal16+=a4*b4; } __syncthreads(); } if(thread_x+T1*0<total_x){ if(thread_y+T2*3<total_y) { t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1; t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2; t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]+=tlocal3; t3d[h3_0*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_0*p6ld_t3+p4_3*p4ld_t3+p5_0*p5ld_t3]+=tlocal4; } else if(thread_y+T2*2<total_y) { t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1; t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2; t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]+=tlocal3; } else if(thread_y+T2*1<total_y) { t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1; t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2; } else if(thread_y+T2*0<total_y) { t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1; } } if(thread_x+T1*1<total_x){ if(thread_y+T2*3<total_y) { t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5; t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6; t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]+=tlocal7; t3d[h3_1*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_1*p6ld_t3+p4_3*p4ld_t3+p5_1*p5ld_t3]+=tlocal8; } else if(thread_y+T2*2<total_y) { t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5; t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6; t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]+=tlocal7; } else if(thread_y+T2*1<total_y) { t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5; t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6; } else if(thread_y+T2*0<total_y) { t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5; } } if(thread_x+T1*2<total_x){ if(thread_y+T2*3<total_y) { t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9; t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10; t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]+=tlocal11; t3d[h3_2*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_2*p6ld_t3+p4_3*p4ld_t3+p5_2*p5ld_t3]+=tlocal12; } else if(thread_y+T2*2<total_y) { t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9; t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10; t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]+=tlocal11; } else if(thread_y+T2*1<total_y) { t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9; t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10; } else if(thread_y+T2*0<total_y) { t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9; } } if(thread_x+T1*3<total_x){ if(thread_y+T2*3<total_y) { t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13; t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14; t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]+=tlocal15; t3d[h3_3*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p6_3*p6ld_t3+p4_3*p4ld_t3+p5_3*p5ld_t3]+=tlocal16; } else if(thread_y+T2*2<total_y) { t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13; t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14; t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]+=tlocal15; } else if(thread_y+T2*1<total_y) { t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13; t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14; } else if(thread_y+T2*0<total_y) { t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13; } } __syncthreads(); } extern "C" void sd_t_d2_4_cuda(size_t h1d, size_t h2d, size_t h3d, size_t p4d, size_t p5d, size_t p6d, size_t p7d, double *t3, double *t2, double *v2) { size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,p5ld_t3; size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2; cudaStream_t *streams; size_t nstreams,i; double *t2_d,*v2_d; size_t3=h3d*h2d*h1d*p6d*p4d*p5d*sizeof(double); size_t2=p7d*p4d*h1d*h2d*sizeof(double); size_v2=p7d*h3d*p6d*p5d*sizeof(double); cudaFuncSetCacheConfig(sd_t_d2_4_kernel, cudaFuncCachePreferShared); nstreams=1; size_block_t3=size_t3/nstreams; size_el_block_t3=size_block_t3/sizeof(double); //t3d=(double*)getGpuMem(size_t3); t2_d=(double*)getGpuMem(size_t2); v2_d=(double*)getGpuMem(size_v2); streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t)); assert(streams!= NULL); for(i=0;i<nstreams;++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])) ; } CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice)); p7ld_t2=1; p4ld_t2=p7d; h1ld_t2=p4d*p7d; h2ld_t2=h1d*p4d*p7d; p7ld_v2=1; h3ld_v2=p7d; p6ld_v2=h3d*p7d; p5ld_v2=p6d*h3d*p7d; h3ld_t3=1; h2ld_t3=h3d; h1ld_t3=h2d*h3d; p6ld_t3=h1d*h2d*h3d; p4ld_t3=p6d*h1d*h2d*h3d; p5ld_t3=p4d*p6d*h1d*h2d*h3d; size_t total_x = h3d*p6d*p5d; size_t total_y = p4d*h1d*h2d; dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1))); for(i=0;i<nstreams;++i){ sd_t_d2_4_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y); CHECK_ERR("Kernel execution failed"); } cudaThreadSynchronize(); for(i=0;i<nstreams;++i){ cudaStreamDestroy(streams[i]);} //freeGpuMem(t3d); freeGpuMem(t2_d); freeGpuMem(v2_d); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_d2_4_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_4_cuda((size_t)*h1d,(size_t)*h2d,(size_t)*h3d,(size_t)*p4d,(size_t)*p5d,(size_t)*p6d,(size_t)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h2,h1,h3,p4,p5] += t2[p7,p4,h1,h2] * v2[p7,h3,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_d2_5_kernel(size_t h1d,size_t h2d,size_t h3d,size_t p4d,size_t p5d,size_t p7d,size_t p7ld_t2,size_t p4ld_t2,size_t h1ld_t2,size_t h2ld_t2,size_t p7ld_v2,size_t h3ld_v2,size_t p5ld_v2,size_t h2ld_t3,size_t h1ld_t3,size_t h3ld_t3,size_t p4ld_t3,size_t p5ld_t3,double *t3d, double *t2_d, double *v2_d,size_t unused_idx, size_t total_x, size_t total_y) { size_t h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p7; double a1,b1; double a2,b2; double a3,b3; double a4,b4; size_t in1_idxl,in2_idxl,p7l,p7T; __shared__ double t2_shm[4*T1][Tcomm]; __shared__ double v2_shm[Tcomm][4*T2]; size_t rest_x=blockIdx.x; size_t rest_y=blockIdx.y; size_t thread_x = T2*4 * rest_x + threadIdx.x; size_t thread_y = T1*4 * rest_y + threadIdx.y; in1_idxl=threadIdx.y; in2_idxl=threadIdx.x ; double tlocal1=0; double tlocal2=0; double tlocal3=0; double tlocal4=0; double tlocal5=0; double tlocal6=0; double tlocal7=0; double tlocal8=0; double tlocal9=0; double tlocal10=0; double tlocal11=0; double tlocal12=0; double tlocal13=0; double tlocal14=0; double tlocal15=0; double tlocal16=0; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0; h2_0=rest_y%h2d; rest_y=rest_y/h2d; h1_0=rest_y%h1d; rest_y=rest_y/h1d; h3_0=rest_x%h3d; rest_x=rest_x/h3d; p4_0=rest_y; p5_0=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1; h2_1=rest_y%h2d; rest_y=rest_y/h2d; h1_1=rest_y%h1d; rest_y=rest_y/h1d; h3_1=rest_x%h3d; rest_x=rest_x/h3d; p4_1=rest_y; p5_1=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2; h2_2=rest_y%h2d; rest_y=rest_y/h2d; h1_2=rest_y%h1d; rest_y=rest_y/h1d; h3_2=rest_x%h3d; rest_x=rest_x/h3d; p4_2=rest_y; p5_2=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3; h2_3=rest_y%h2d; rest_y=rest_y/h2d; h1_3=rest_y%h1d; rest_y=rest_y/h1d; h3_3=rest_x%h3d; rest_x=rest_x/h3d; p4_3=rest_y; p5_3=rest_x; size_t t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){size_t p7l_hi; p7l_hi = MIN(Tcomm+p7T,p7d)-p7T; t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2; v2_d_off=h3_0*h3ld_v2+p5_0*p5ld_v2; if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2; v2_d_off=h3_1*h3ld_v2+p5_1*p5ld_v2; if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2; v2_d_off=h3_2*h3ld_v2+p5_2*p5ld_v2; if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2; v2_d_off=h3_3*h3ld_v2+p5_3*p5ld_v2; if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2]; } __syncthreads(); for(p7l=0;p7l<p7l_hi;++p7l){ a1=t2_shm[in1_idxl+T1*0][p7l]; a2=t2_shm[in1_idxl+T1*1][p7l]; a3=t2_shm[in1_idxl+T1*2][p7l]; a4=t2_shm[in1_idxl+T1*3][p7l]; b1=v2_shm[p7l][in2_idxl+T2*0]; b2=v2_shm[p7l][in2_idxl+T2*1]; b3=v2_shm[p7l][in2_idxl+T2*2]; b4=v2_shm[p7l][in2_idxl+T2*3]; tlocal1+=a1*b1; tlocal2+=a2*b1; tlocal3+=a3*b1; tlocal4+=a4*b1; tlocal5+=a1*b2; tlocal6+=a2*b2; tlocal7+=a3*b2; tlocal8+=a4*b2; tlocal9+=a1*b3; tlocal10+=a2*b3; tlocal11+=a3*b3; tlocal12+=a4*b3; tlocal13+=a1*b4; tlocal14+=a2*b4; tlocal15+=a3*b4; tlocal16+=a4*b4; } __syncthreads(); } if(thread_x+T1*0<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]+=tlocal3; t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_0*h3ld_t3+p4_3*p4ld_t3+p5_0*p5ld_t3]+=tlocal4; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]+=tlocal3; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]+=tlocal2; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]+=tlocal1; } } if(thread_x+T1*1<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]+=tlocal7; t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_1*h3ld_t3+p4_3*p4ld_t3+p5_1*p5ld_t3]+=tlocal8; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]+=tlocal7; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]+=tlocal6; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]+=tlocal5; } } if(thread_x+T1*2<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]+=tlocal11; t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_2*h3ld_t3+p4_3*p4ld_t3+p5_2*p5ld_t3]+=tlocal12; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]+=tlocal11; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]+=tlocal10; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]+=tlocal9; } } if(thread_x+T1*3<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]+=tlocal15; t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_3*h3ld_t3+p4_3*p4ld_t3+p5_3*p5ld_t3]+=tlocal16; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]+=tlocal15; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]+=tlocal14; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]+=tlocal13; } } __syncthreads(); } extern "C" void sd_t_d2_5_cuda(size_t h1d, size_t h2d, size_t h3d, size_t p4d, size_t p5d, size_t p6d, size_t p7d, double *t3, double *t2, double *v2) { h3d=h3d*p6d; size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p5ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,p5ld_t3; size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2; cudaStream_t *streams; size_t nstreams,i; double *t2_d,*v2_d; size_t3=h2d*h1d*h3d*p4d*p5d*sizeof(double); size_t2=p7d*p4d*h1d*h2d*sizeof(double); size_v2=p7d*h3d*p5d*sizeof(double); cudaFuncSetCacheConfig(sd_t_d2_5_kernel, cudaFuncCachePreferShared); nstreams=1; size_block_t3=size_t3/nstreams; size_el_block_t3=size_block_t3/sizeof(double); //t3d=(double*)getGpuMem(size_t3); t2_d=(double*)getGpuMem(size_t2); v2_d=(double*)getGpuMem(size_v2); streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t)); assert(streams!= NULL); for(i=0;i<nstreams;++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])) ; } CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice)); p7ld_t2=1; p4ld_t2=p7d; h1ld_t2=p4d*p7d; h2ld_t2=h1d*p4d*p7d; p7ld_v2=1; h3ld_v2=p7d; p5ld_v2=h3d*p7d; h2ld_t3=1; h1ld_t3=h2d; h3ld_t3=h1d*h2d; p4ld_t3=h3d*h1d*h2d; p5ld_t3=p4d*h3d*h1d*h2d; size_t total_x = h3d*p5d; size_t total_y = p4d*h1d*h2d; dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1))); for(i=0;i<nstreams;++i){ sd_t_d2_5_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p5ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y); CHECK_ERR("Kernel execution failed"); } cudaThreadSynchronize(); for(i=0;i<nstreams;++i){ cudaStreamDestroy(streams[i]);} //freeGpuMem(t3d); freeGpuMem(t2_d); freeGpuMem(v2_d); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_d2_5_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_5_cuda((size_t)*h1d,(size_t)*h2d,(size_t)*h3d,(size_t)*p4d,(size_t)*p5d,(size_t)*p6d,(size_t)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h2,h3,h1,p6,p4,p5] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_d2_6_kernel(size_t h1d,size_t h2d,size_t h3d,size_t p4d,size_t p5d,size_t p6d,size_t p7d,size_t p7ld_t2,size_t p4ld_t2,size_t h1ld_t2,size_t h2ld_t2,size_t p7ld_v2,size_t h3ld_v2,size_t p6ld_v2,size_t p5ld_v2,size_t h2ld_t3,size_t h3ld_t3,size_t h1ld_t3,size_t p6ld_t3,size_t p4ld_t3,size_t p5ld_t3,double *t3d, double *t2_d, double *v2_d,size_t unused_idx, size_t total_x, size_t total_y) { size_t h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7; double a1,b1; double a2,b2; double a3,b3; double a4,b4; size_t in1_idxl,in2_idxl,p7l,p7T; __shared__ double t2_shm[4*T1][Tcomm]; __shared__ double v2_shm[Tcomm][4*T2]; size_t rest_x=blockIdx.x; size_t rest_y=blockIdx.y; size_t thread_x = T2*4 * rest_x + threadIdx.x; size_t thread_y = T1*4 * rest_y + threadIdx.y; in1_idxl=threadIdx.y; in2_idxl=threadIdx.x ; double tlocal1=0; double tlocal2=0; double tlocal3=0; double tlocal4=0; double tlocal5=0; double tlocal6=0; double tlocal7=0; double tlocal8=0; double tlocal9=0; double tlocal10=0; double tlocal11=0; double tlocal12=0; double tlocal13=0; double tlocal14=0; double tlocal15=0; double tlocal16=0; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0; h2_0=rest_y%h2d; rest_y=rest_y/h2d; h3_0=rest_x%h3d; rest_x=rest_x/h3d; h1_0=rest_y%h1d; rest_y=rest_y/h1d; p6_0=rest_x%p6d; rest_x=rest_x/p6d; p4_0=rest_y; p5_0=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1; h2_1=rest_y%h2d; rest_y=rest_y/h2d; h3_1=rest_x%h3d; rest_x=rest_x/h3d; h1_1=rest_y%h1d; rest_y=rest_y/h1d; p6_1=rest_x%p6d; rest_x=rest_x/p6d; p4_1=rest_y; p5_1=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2; h2_2=rest_y%h2d; rest_y=rest_y/h2d; h3_2=rest_x%h3d; rest_x=rest_x/h3d; h1_2=rest_y%h1d; rest_y=rest_y/h1d; p6_2=rest_x%p6d; rest_x=rest_x/p6d; p4_2=rest_y; p5_2=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3; h2_3=rest_y%h2d; rest_y=rest_y/h2d; h3_3=rest_x%h3d; rest_x=rest_x/h3d; h1_3=rest_y%h1d; rest_y=rest_y/h1d; p6_3=rest_x%p6d; rest_x=rest_x/p6d; p4_3=rest_y; p5_3=rest_x; size_t t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){size_t p7l_hi; p7l_hi = MIN(Tcomm+p7T,p7d)-p7T; t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2; v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2; if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2; v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2; if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2; v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2; if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2; v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2; if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2]; } __syncthreads(); for(p7l=0;p7l<p7l_hi;++p7l){ a1=t2_shm[in1_idxl+T1*0][p7l]; a2=t2_shm[in1_idxl+T1*1][p7l]; a3=t2_shm[in1_idxl+T1*2][p7l]; a4=t2_shm[in1_idxl+T1*3][p7l]; b1=v2_shm[p7l][in2_idxl+T2*0]; b2=v2_shm[p7l][in2_idxl+T2*1]; b3=v2_shm[p7l][in2_idxl+T2*2]; b4=v2_shm[p7l][in2_idxl+T2*3]; tlocal1+=a1*b1; tlocal2+=a2*b1; tlocal3+=a3*b1; tlocal4+=a4*b1; tlocal5+=a1*b2; tlocal6+=a2*b2; tlocal7+=a3*b2; tlocal8+=a4*b2; tlocal9+=a1*b3; tlocal10+=a2*b3; tlocal11+=a3*b3; tlocal12+=a4*b3; tlocal13+=a1*b4; tlocal14+=a2*b4; tlocal15+=a3*b4; tlocal16+=a4*b4; } __syncthreads(); } if(thread_x+T1*0<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]-=tlocal1; t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]-=tlocal2; t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]-=tlocal3; t3d[h2_3*h2ld_t3+h3_0*h3ld_t3+h1_3*h1ld_t3+p6_0*p6ld_t3+p4_3*p4ld_t3+p5_0*p5ld_t3]-=tlocal4; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]-=tlocal1; t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]-=tlocal2; t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p6_0*p6ld_t3+p4_2*p4ld_t3+p5_0*p5ld_t3]-=tlocal3; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]-=tlocal1; t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p6_0*p6ld_t3+p4_1*p4ld_t3+p5_0*p5ld_t3]-=tlocal2; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p6_0*p6ld_t3+p4_0*p4ld_t3+p5_0*p5ld_t3]-=tlocal1; } } if(thread_x+T1*1<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]-=tlocal5; t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]-=tlocal6; t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]-=tlocal7; t3d[h2_3*h2ld_t3+h3_1*h3ld_t3+h1_3*h1ld_t3+p6_1*p6ld_t3+p4_3*p4ld_t3+p5_1*p5ld_t3]-=tlocal8; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]-=tlocal5; t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]-=tlocal6; t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p6_1*p6ld_t3+p4_2*p4ld_t3+p5_1*p5ld_t3]-=tlocal7; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]-=tlocal5; t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p6_1*p6ld_t3+p4_1*p4ld_t3+p5_1*p5ld_t3]-=tlocal6; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p6_1*p6ld_t3+p4_0*p4ld_t3+p5_1*p5ld_t3]-=tlocal5; } } if(thread_x+T1*2<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]-=tlocal9; t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]-=tlocal10; t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]-=tlocal11; t3d[h2_3*h2ld_t3+h3_2*h3ld_t3+h1_3*h1ld_t3+p6_2*p6ld_t3+p4_3*p4ld_t3+p5_2*p5ld_t3]-=tlocal12; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]-=tlocal9; t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]-=tlocal10; t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p6_2*p6ld_t3+p4_2*p4ld_t3+p5_2*p5ld_t3]-=tlocal11; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]-=tlocal9; t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p6_2*p6ld_t3+p4_1*p4ld_t3+p5_2*p5ld_t3]-=tlocal10; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p6_2*p6ld_t3+p4_0*p4ld_t3+p5_2*p5ld_t3]-=tlocal9; } } if(thread_x+T1*3<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]-=tlocal13; t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]-=tlocal14; t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]-=tlocal15; t3d[h2_3*h2ld_t3+h3_3*h3ld_t3+h1_3*h1ld_t3+p6_3*p6ld_t3+p4_3*p4ld_t3+p5_3*p5ld_t3]-=tlocal16; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]-=tlocal13; t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]-=tlocal14; t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p6_3*p6ld_t3+p4_2*p4ld_t3+p5_3*p5ld_t3]-=tlocal15; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]-=tlocal13; t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p6_3*p6ld_t3+p4_1*p4ld_t3+p5_3*p5ld_t3]-=tlocal14; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p6_3*p6ld_t3+p4_0*p4ld_t3+p5_3*p5ld_t3]-=tlocal13; } } __syncthreads(); } extern "C" void sd_t_d2_6_cuda(size_t h1d, size_t h2d, size_t h3d, size_t p4d, size_t p5d, size_t p6d, size_t p7d, double *t3, double *t2, double *v2) { size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,p5ld_t3; size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2; cudaStream_t *streams; size_t nstreams,i; double *t2_d,*v2_d; size_t3=h2d*h3d*h1d*p6d*p4d*p5d*sizeof(double); size_t2=p7d*p4d*h1d*h2d*sizeof(double); size_v2=p7d*h3d*p6d*p5d*sizeof(double); cudaFuncSetCacheConfig(sd_t_d2_6_kernel, cudaFuncCachePreferShared); nstreams=1; size_block_t3=size_t3/nstreams; size_el_block_t3=size_block_t3/sizeof(double); //t3d=(double*)getGpuMem(size_t3); t2_d=(double*)getGpuMem(size_t2); v2_d=(double*)getGpuMem(size_v2); streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t)); assert(streams!= NULL); for(i=0;i<nstreams;++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])) ; } CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice)); p7ld_t2=1; p4ld_t2=p7d; h1ld_t2=p4d*p7d; h2ld_t2=h1d*p4d*p7d; p7ld_v2=1; h3ld_v2=p7d; p6ld_v2=h3d*p7d; p5ld_v2=p6d*h3d*p7d; h2ld_t3=1; h3ld_t3=h2d; h1ld_t3=h3d*h2d; p6ld_t3=h1d*h3d*h2d; p4ld_t3=p6d*h1d*h3d*h2d; p5ld_t3=p4d*p6d*h1d*h3d*h2d; size_t total_x = h3d*p6d*p5d; size_t total_y = p4d*h1d*h2d; dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1))); for(i=0;i<nstreams;++i){ sd_t_d2_6_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y); CHECK_ERR(); } cudaThreadSynchronize(); for(i=0;i<nstreams;++i){ cudaStreamDestroy(streams[i]);} //freeGpuMem(t3d); freeGpuMem(t2_d); freeGpuMem(v2_d); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_d2_6_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_6_cuda((size_t)*h1d,(size_t)*h2d,(size_t)*h3d,(size_t)*p4d,(size_t)*p5d,(size_t)*p6d,(size_t)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h3,h2,h1,p4,p6,p5] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_d2_7_kernel(size_t h1d,size_t h2d,size_t h3d,size_t p4d,size_t p5d,size_t p6d,size_t p7d,size_t p7ld_t2,size_t p4ld_t2,size_t h1ld_t2,size_t h2ld_t2,size_t p7ld_v2,size_t h3ld_v2,size_t p6ld_v2,size_t p5ld_v2,size_t h3ld_t3,size_t h2ld_t3,size_t h1ld_t3,size_t p4ld_t3,size_t p6ld_t3,size_t p5ld_t3,double *t3d, double *t2_d, double *v2_d,size_t unused_idx, size_t total_x, size_t total_y) { size_t h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7; double a1,b1; double a2,b2; double a3,b3; double a4,b4; size_t in1_idxl,in2_idxl,p7l,p7T; __shared__ double t2_shm[4*T1][Tcomm]; __shared__ double v2_shm[Tcomm][4*T2]; size_t rest_x=blockIdx.x; size_t rest_y=blockIdx.y; size_t thread_x = T2*4 * rest_x + threadIdx.x; size_t thread_y = T1*4 * rest_y + threadIdx.y; in1_idxl=threadIdx.y; in2_idxl=threadIdx.x ; double tlocal1=0; double tlocal2=0; double tlocal3=0; double tlocal4=0; double tlocal5=0; double tlocal6=0; double tlocal7=0; double tlocal8=0; double tlocal9=0; double tlocal10=0; double tlocal11=0; double tlocal12=0; double tlocal13=0; double tlocal14=0; double tlocal15=0; double tlocal16=0; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0; h3_0=rest_x%h3d; rest_x=rest_x/h3d; h2_0=rest_y%h2d; rest_y=rest_y/h2d; h1_0=rest_y%h1d; rest_y=rest_y/h1d; p6_0=rest_x%p6d; rest_x=rest_x/p6d; p4_0=rest_y; p5_0=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1; h3_1=rest_x%h3d; rest_x=rest_x/h3d; h2_1=rest_y%h2d; rest_y=rest_y/h2d; h1_1=rest_y%h1d; rest_y=rest_y/h1d; p6_1=rest_x%p6d; rest_x=rest_x/p6d; p4_1=rest_y; p5_1=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2; h3_2=rest_x%h3d; rest_x=rest_x/h3d; h2_2=rest_y%h2d; rest_y=rest_y/h2d; h1_2=rest_y%h1d; rest_y=rest_y/h1d; p6_2=rest_x%p6d; rest_x=rest_x/p6d; p4_2=rest_y; p5_2=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3; h3_3=rest_x%h3d; rest_x=rest_x/h3d; h2_3=rest_y%h2d; rest_y=rest_y/h2d; h1_3=rest_y%h1d; rest_y=rest_y/h1d; p6_3=rest_x%p6d; rest_x=rest_x/p6d; p4_3=rest_y; p5_3=rest_x; size_t t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){size_t p7l_hi; p7l_hi = MIN(Tcomm+p7T,p7d)-p7T; t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2; v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2; if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2; v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2; if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2; v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2; if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2; v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2; if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2]; } __syncthreads(); for(p7l=0;p7l<p7l_hi;++p7l){ a1=t2_shm[in1_idxl+T1*0][p7l]; a2=t2_shm[in1_idxl+T1*1][p7l]; a3=t2_shm[in1_idxl+T1*2][p7l]; a4=t2_shm[in1_idxl+T1*3][p7l]; b1=v2_shm[p7l][in2_idxl+T2*0]; b2=v2_shm[p7l][in2_idxl+T2*1]; b3=v2_shm[p7l][in2_idxl+T2*2]; b4=v2_shm[p7l][in2_idxl+T2*3]; tlocal1+=a1*b1; tlocal2+=a2*b1; tlocal3+=a3*b1; tlocal4+=a4*b1; tlocal5+=a1*b2; tlocal6+=a2*b2; tlocal7+=a3*b2; tlocal8+=a4*b2; tlocal9+=a1*b3; tlocal10+=a2*b3; tlocal11+=a3*b3; tlocal12+=a4*b3; tlocal13+=a1*b4; tlocal14+=a2*b4; tlocal15+=a3*b4; tlocal16+=a4*b4; } __syncthreads(); } if(thread_x+T1*0<total_x){ if(thread_y+T2*3<total_y) { t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1; t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2; t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal3; t3d[h3_0*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal4; } else if(thread_y+T2*2<total_y) { t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1; t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2; t3d[h3_0*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal3; } else if(thread_y+T2*1<total_y) { t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1; t3d[h3_0*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2; } else if(thread_y+T2*0<total_y) { t3d[h3_0*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1; } } if(thread_x+T1*1<total_x){ if(thread_y+T2*3<total_y) { t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5; t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6; t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal7; t3d[h3_1*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal8; } else if(thread_y+T2*2<total_y) { t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5; t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6; t3d[h3_1*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal7; } else if(thread_y+T2*1<total_y) { t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5; t3d[h3_1*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6; } else if(thread_y+T2*0<total_y) { t3d[h3_1*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5; } } if(thread_x+T1*2<total_x){ if(thread_y+T2*3<total_y) { t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9; t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10; t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal11; t3d[h3_2*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal12; } else if(thread_y+T2*2<total_y) { t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9; t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10; t3d[h3_2*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal11; } else if(thread_y+T2*1<total_y) { t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9; t3d[h3_2*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10; } else if(thread_y+T2*0<total_y) { t3d[h3_2*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9; } } if(thread_x+T1*3<total_x){ if(thread_y+T2*3<total_y) { t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13; t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14; t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal15; t3d[h3_3*h3ld_t3+h2_3*h2ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal16; } else if(thread_y+T2*2<total_y) { t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13; t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14; t3d[h3_3*h3ld_t3+h2_2*h2ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal15; } else if(thread_y+T2*1<total_y) { t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13; t3d[h3_3*h3ld_t3+h2_1*h2ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14; } else if(thread_y+T2*0<total_y) { t3d[h3_3*h3ld_t3+h2_0*h2ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13; } } __syncthreads(); } extern "C" void sd_t_d2_7_cuda(size_t h1d, size_t h2d, size_t h3d, size_t p4d, size_t p5d, size_t p6d, size_t p7d, double *t3, double *t2, double *v2) { size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p4ld_t3,p6ld_t3,p5ld_t3; size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2; cudaStream_t *streams; size_t nstreams,i; double *t2_d,*v2_d; size_t3=h3d*h2d*h1d*p4d*p6d*p5d*sizeof(double); size_t2=p7d*p4d*h1d*h2d*sizeof(double); size_v2=p7d*h3d*p6d*p5d*sizeof(double); cudaFuncSetCacheConfig(sd_t_d2_7_kernel, cudaFuncCachePreferShared); nstreams=1; size_block_t3=size_t3/nstreams; size_el_block_t3=size_block_t3/sizeof(double); //t3d=(double*)getGpuMem(size_t3); t2_d=(double*)getGpuMem(size_t2); v2_d=(double*)getGpuMem(size_v2); streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t)); assert(streams!= NULL); for(i=0;i<nstreams;++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])) ; } CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice)); p7ld_t2=1; p4ld_t2=p7d; h1ld_t2=p4d*p7d; h2ld_t2=h1d*p4d*p7d; p7ld_v2=1; h3ld_v2=p7d; p6ld_v2=h3d*p7d; p5ld_v2=p6d*h3d*p7d; h3ld_t3=1; h2ld_t3=h3d; h1ld_t3=h2d*h3d; p4ld_t3=h1d*h2d*h3d; p6ld_t3=p4d*h1d*h2d*h3d; p5ld_t3=p6d*p4d*h1d*h2d*h3d; size_t total_x = h3d*p6d*p5d; size_t total_y = p4d*h1d*h2d; dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1))); for(i=0;i<nstreams;++i){ sd_t_d2_7_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p4ld_t3,p6ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y); CHECK_ERR("Kernel execution failed"); } cudaThreadSynchronize(); for(i=0;i<nstreams;++i){ cudaStreamDestroy(streams[i]);} //freeGpuMem(t3d); freeGpuMem(t2_d); freeGpuMem(v2_d); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_d2_7_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_7_cuda((size_t)*h1d,(size_t)*h2d,(size_t)*h3d,(size_t)*p4d,(size_t)*p5d,(size_t)*p6d,(size_t)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h2,h1,h3,p4,p6,p5] -= t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_d2_8_kernel(size_t h1d,size_t h2d,size_t h3d,size_t p4d,size_t p5d,size_t p6d,size_t p7d,size_t p7ld_t2,size_t p4ld_t2,size_t h1ld_t2,size_t h2ld_t2,size_t p7ld_v2,size_t h3ld_v2,size_t p6ld_v2,size_t p5ld_v2,size_t h2ld_t3,size_t h1ld_t3,size_t h3ld_t3,size_t p4ld_t3,size_t p6ld_t3,size_t p5ld_t3,double *t3d, double *t2_d, double *v2_d,size_t unused_idx, size_t total_x, size_t total_y) { size_t h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7; double a1,b1; double a2,b2; double a3,b3; double a4,b4; size_t in1_idxl,in2_idxl,p7l,p7T; __shared__ double t2_shm[4*T1][Tcomm]; __shared__ double v2_shm[Tcomm][4*T2]; size_t rest_x=blockIdx.x; size_t rest_y=blockIdx.y; size_t thread_x = T2*4 * rest_x + threadIdx.x; size_t thread_y = T1*4 * rest_y + threadIdx.y; in1_idxl=threadIdx.y; in2_idxl=threadIdx.x ; double tlocal1=0; double tlocal2=0; double tlocal3=0; double tlocal4=0; double tlocal5=0; double tlocal6=0; double tlocal7=0; double tlocal8=0; double tlocal9=0; double tlocal10=0; double tlocal11=0; double tlocal12=0; double tlocal13=0; double tlocal14=0; double tlocal15=0; double tlocal16=0; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0; h2_0=rest_y%h2d; rest_y=rest_y/h2d; h1_0=rest_y%h1d; rest_y=rest_y/h1d; h3_0=rest_x%h3d; rest_x=rest_x/h3d; p6_0=rest_x%p6d; rest_x=rest_x/p6d; p4_0=rest_y; p5_0=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1; h2_1=rest_y%h2d; rest_y=rest_y/h2d; h1_1=rest_y%h1d; rest_y=rest_y/h1d; h3_1=rest_x%h3d; rest_x=rest_x/h3d; p6_1=rest_x%p6d; rest_x=rest_x/p6d; p4_1=rest_y; p5_1=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2; h2_2=rest_y%h2d; rest_y=rest_y/h2d; h1_2=rest_y%h1d; rest_y=rest_y/h1d; h3_2=rest_x%h3d; rest_x=rest_x/h3d; p6_2=rest_x%p6d; rest_x=rest_x/p6d; p4_2=rest_y; p5_2=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3; h2_3=rest_y%h2d; rest_y=rest_y/h2d; h1_3=rest_y%h1d; rest_y=rest_y/h1d; h3_3=rest_x%h3d; rest_x=rest_x/h3d; p6_3=rest_x%p6d; rest_x=rest_x/p6d; p4_3=rest_y; p5_3=rest_x; size_t t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){size_t p7l_hi; p7l_hi = MIN(Tcomm+p7T,p7d)-p7T; t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2; v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2; if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2; v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2; if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2; v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2; if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2; v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2; if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2]; } __syncthreads(); for(p7l=0;p7l<p7l_hi;++p7l){ a1=t2_shm[in1_idxl+T1*0][p7l]; a2=t2_shm[in1_idxl+T1*1][p7l]; a3=t2_shm[in1_idxl+T1*2][p7l]; a4=t2_shm[in1_idxl+T1*3][p7l]; b1=v2_shm[p7l][in2_idxl+T2*0]; b2=v2_shm[p7l][in2_idxl+T2*1]; b3=v2_shm[p7l][in2_idxl+T2*2]; b4=v2_shm[p7l][in2_idxl+T2*3]; tlocal1+=a1*b1; tlocal2+=a2*b1; tlocal3+=a3*b1; tlocal4+=a4*b1; tlocal5+=a1*b2; tlocal6+=a2*b2; tlocal7+=a3*b2; tlocal8+=a4*b2; tlocal9+=a1*b3; tlocal10+=a2*b3; tlocal11+=a3*b3; tlocal12+=a4*b3; tlocal13+=a1*b4; tlocal14+=a2*b4; tlocal15+=a3*b4; tlocal16+=a4*b4; } __syncthreads(); } if(thread_x+T1*0<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal3; t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_0*h3ld_t3+p4_3*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal4; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_0*h3ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal3; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_0*h3ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal2; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_0*h3ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]-=tlocal1; } } if(thread_x+T1*1<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal7; t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_1*h3ld_t3+p4_3*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal8; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_1*h3ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal7; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_1*h3ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal6; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_1*h3ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]-=tlocal5; } } if(thread_x+T1*2<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal11; t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_2*h3ld_t3+p4_3*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal12; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_2*h3ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal11; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_2*h3ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal10; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_2*h3ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]-=tlocal9; } } if(thread_x+T1*3<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal15; t3d[h2_3*h2ld_t3+h1_3*h1ld_t3+h3_3*h3ld_t3+p4_3*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal16; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14; t3d[h2_2*h2ld_t3+h1_2*h1ld_t3+h3_3*h3ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal15; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13; t3d[h2_1*h2ld_t3+h1_1*h1ld_t3+h3_3*h3ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal14; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h1_0*h1ld_t3+h3_3*h3ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]-=tlocal13; } } __syncthreads(); } extern "C" void sd_t_d2_8_cuda(size_t h1d, size_t h2d, size_t h3d, size_t p4d, size_t p5d, size_t p6d, size_t p7d, double *t3, double *t2, double *v2) { size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,p6ld_t3,p5ld_t3; size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2; cudaStream_t *streams; size_t nstreams,i; double *t2_d,*v2_d; size_t3=h2d*h1d*h3d*p4d*p6d*p5d*sizeof(double); size_t2=p7d*p4d*h1d*h2d*sizeof(double); size_v2=p7d*h3d*p6d*p5d*sizeof(double); cudaFuncSetCacheConfig(sd_t_d2_8_kernel, cudaFuncCachePreferShared); nstreams=1; size_block_t3=size_t3/nstreams; size_el_block_t3=size_block_t3/sizeof(double); //t3d=(double*)getGpuMem(size_t3); t2_d=(double*)getGpuMem(size_t2); v2_d=(double*)getGpuMem(size_v2); streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t)); assert(streams!= NULL); for(i=0;i<nstreams;++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])) ; } CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice)); p7ld_t2=1; p4ld_t2=p7d; h1ld_t2=p4d*p7d; h2ld_t2=h1d*p4d*p7d; p7ld_v2=1; h3ld_v2=p7d; p6ld_v2=h3d*p7d; p5ld_v2=p6d*h3d*p7d; h2ld_t3=1; h1ld_t3=h2d; h3ld_t3=h1d*h2d; p4ld_t3=h3d*h1d*h2d; p6ld_t3=p4d*h3d*h1d*h2d; p5ld_t3=p6d*p4d*h3d*h1d*h2d; size_t total_x = h3d*p6d*p5d; size_t total_y = p4d*h1d*h2d; dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1))); for(i=0;i<nstreams;++i){ sd_t_d2_8_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h1ld_t3,h3ld_t3,p4ld_t3,p6ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y); CHECK_ERR("Kernel execution failed"); } cudaThreadSynchronize(); for(i=0;i<nstreams;++i){ cudaStreamDestroy(streams[i]);} //freeGpuMem(t3d); freeGpuMem(t2_d); freeGpuMem(v2_d); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_d2_8_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_8_cuda((size_t)*h1d,(size_t)*h2d,(size_t)*h3d,(size_t)*p4d,(size_t)*p5d,(size_t)*p6d,(size_t)*p7d,t3,t2,v2); } /*----------------------------------------------------------------------* *t3[h2,h3,h1,p4,p6,p5] += t2[p7,p4,h1,h2] * v2[p7,h3,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_d2_9_kernel(size_t h1d,size_t h2d,size_t h3d,size_t p4d,size_t p5d,size_t p6d,size_t p7d,size_t p7ld_t2,size_t p4ld_t2,size_t h1ld_t2,size_t h2ld_t2,size_t p7ld_v2,size_t h3ld_v2,size_t p6ld_v2,size_t p5ld_v2,size_t h2ld_t3,size_t h3ld_t3,size_t h1ld_t3,size_t p4ld_t3,size_t p6ld_t3,size_t p5ld_t3,double *t3d, double *t2_d, double *v2_d,size_t unused_idx, size_t total_x, size_t total_y) { size_t h1_0,h1_1,h1_2,h1_3,h2_0,h2_1,h2_2,h2_3,h3_0,h3_1,h3_2,h3_3,p4_0,p4_1,p4_2,p4_3,p5_0,p5_1,p5_2,p5_3,p6_0,p6_1,p6_2,p6_3,p7; double a1,b1; double a2,b2; double a3,b3; double a4,b4; size_t in1_idxl,in2_idxl,p7l,p7T; __shared__ double t2_shm[4*T1][Tcomm]; __shared__ double v2_shm[Tcomm][4*T2]; size_t rest_x=blockIdx.x; size_t rest_y=blockIdx.y; size_t thread_x = T2*4 * rest_x + threadIdx.x; size_t thread_y = T1*4 * rest_y + threadIdx.y; in1_idxl=threadIdx.y; in2_idxl=threadIdx.x ; double tlocal1=0; double tlocal2=0; double tlocal3=0; double tlocal4=0; double tlocal5=0; double tlocal6=0; double tlocal7=0; double tlocal8=0; double tlocal9=0; double tlocal10=0; double tlocal11=0; double tlocal12=0; double tlocal13=0; double tlocal14=0; double tlocal15=0; double tlocal16=0; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*0; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*0; h2_0=rest_y%h2d; rest_y=rest_y/h2d; h3_0=rest_x%h3d; rest_x=rest_x/h3d; h1_0=rest_y%h1d; rest_y=rest_y/h1d; p6_0=rest_x%p6d; rest_x=rest_x/p6d; p4_0=rest_y; p5_0=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*1; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*1; h2_1=rest_y%h2d; rest_y=rest_y/h2d; h3_1=rest_x%h3d; rest_x=rest_x/h3d; h1_1=rest_y%h1d; rest_y=rest_y/h1d; p6_1=rest_x%p6d; rest_x=rest_x/p6d; p4_1=rest_y; p5_1=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*2; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*2; h2_2=rest_y%h2d; rest_y=rest_y/h2d; h3_2=rest_x%h3d; rest_x=rest_x/h3d; h1_2=rest_y%h1d; rest_y=rest_y/h1d; p6_2=rest_x%p6d; rest_x=rest_x/p6d; p4_2=rest_y; p5_2=rest_x; rest_x = T2 *4* blockIdx.x + threadIdx.x+T1*3; rest_y = T1 *4* blockIdx.y + threadIdx.y+T1*3; h2_3=rest_y%h2d; rest_y=rest_y/h2d; h3_3=rest_x%h3d; rest_x=rest_x/h3d; h1_3=rest_y%h1d; rest_y=rest_y/h1d; p6_3=rest_x%p6d; rest_x=rest_x/p6d; p4_3=rest_y; p5_3=rest_x; size_t t2_d_off, v2_d_off;for(p7T=0;p7T<p7d;p7T+=Tcomm){size_t p7l_hi; p7l_hi = MIN(Tcomm+p7T,p7d)-p7T; t2_d_off=p4_0*p4ld_t2+h1_0*h1ld_t2+h2_0*h2ld_t2; v2_d_off=h3_0*h3ld_v2+p6_0*p6ld_v2+p5_0*p5ld_v2; if(thread_y+T1*0<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*0][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*0<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*0] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_1*p4ld_t2+h1_1*h1ld_t2+h2_1*h2ld_t2; v2_d_off=h3_1*h3ld_v2+p6_1*p6ld_v2+p5_1*p5ld_v2; if(thread_y+T1*1<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*1][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*1<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*1] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_2*p4ld_t2+h1_2*h1ld_t2+h2_2*h2ld_t2; v2_d_off=h3_2*h3ld_v2+p6_2*p6ld_v2+p5_2*p5ld_v2; if(thread_y+T1*2<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*2][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*2<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*2] = v2_d[v2_d_off+p7*p7ld_v2]; } t2_d_off=p4_3*p4ld_t2+h1_3*h1ld_t2+h2_3*h2ld_t2; v2_d_off=h3_3*h3ld_v2+p6_3*p6ld_v2+p5_3*p5ld_v2; if(thread_y+T1*3<total_y)for(p7l=threadIdx.x;p7l<p7l_hi;p7l+=blockDim.x){ p7=p7l+p7T; t2_shm[in1_idxl+T1*3][p7l] = t2_d[t2_d_off+p7*p7ld_t2]; } if(thread_x+T1*3<total_x)for(p7l=threadIdx.y;p7l<p7l_hi;p7l+=blockDim.y){ p7=p7l+p7T; v2_shm[p7l][in2_idxl+T1*3] = v2_d[v2_d_off+p7*p7ld_v2]; } __syncthreads(); for(p7l=0;p7l<p7l_hi;++p7l){ a1=t2_shm[in1_idxl+T1*0][p7l]; a2=t2_shm[in1_idxl+T1*1][p7l]; a3=t2_shm[in1_idxl+T1*2][p7l]; a4=t2_shm[in1_idxl+T1*3][p7l]; b1=v2_shm[p7l][in2_idxl+T2*0]; b2=v2_shm[p7l][in2_idxl+T2*1]; b3=v2_shm[p7l][in2_idxl+T2*2]; b4=v2_shm[p7l][in2_idxl+T2*3]; tlocal1+=a1*b1; tlocal2+=a2*b1; tlocal3+=a3*b1; tlocal4+=a4*b1; tlocal5+=a1*b2; tlocal6+=a2*b2; tlocal7+=a3*b2; tlocal8+=a4*b2; tlocal9+=a1*b3; tlocal10+=a2*b3; tlocal11+=a3*b3; tlocal12+=a4*b3; tlocal13+=a1*b4; tlocal14+=a2*b4; tlocal15+=a3*b4; tlocal16+=a4*b4; } __syncthreads(); } if(thread_x+T1*0<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal1; t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal2; t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal3; t3d[h2_3*h2ld_t3+h3_0*h3ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal4; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal1; t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal2; t3d[h2_2*h2ld_t3+h3_0*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal3; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal1; t3d[h2_1*h2ld_t3+h3_0*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal2; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h3_0*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_0*p6ld_t3+p5_0*p5ld_t3]+=tlocal1; } } if(thread_x+T1*1<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal5; t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal6; t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal7; t3d[h2_3*h2ld_t3+h3_1*h3ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal8; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal5; t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal6; t3d[h2_2*h2ld_t3+h3_1*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal7; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal5; t3d[h2_1*h2ld_t3+h3_1*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal6; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h3_1*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_1*p6ld_t3+p5_1*p5ld_t3]+=tlocal5; } } if(thread_x+T1*2<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal9; t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal10; t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal11; t3d[h2_3*h2ld_t3+h3_2*h3ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal12; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal9; t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal10; t3d[h2_2*h2ld_t3+h3_2*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal11; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal9; t3d[h2_1*h2ld_t3+h3_2*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal10; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h3_2*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_2*p6ld_t3+p5_2*p5ld_t3]+=tlocal9; } } if(thread_x+T1*3<total_x){ if(thread_y+T2*3<total_y) { t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal13; t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal14; t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal15; t3d[h2_3*h2ld_t3+h3_3*h3ld_t3+h1_3*h1ld_t3+p4_3*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal16; } else if(thread_y+T2*2<total_y) { t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal13; t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal14; t3d[h2_2*h2ld_t3+h3_3*h3ld_t3+h1_2*h1ld_t3+p4_2*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal15; } else if(thread_y+T2*1<total_y) { t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal13; t3d[h2_1*h2ld_t3+h3_3*h3ld_t3+h1_1*h1ld_t3+p4_1*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal14; } else if(thread_y+T2*0<total_y) { t3d[h2_0*h2ld_t3+h3_3*h3ld_t3+h1_0*h1ld_t3+p4_0*p4ld_t3+p6_3*p6ld_t3+p5_3*p5ld_t3]+=tlocal13; } } __syncthreads(); } extern "C" void sd_t_d2_9_cuda(size_t h1d, size_t h2d, size_t h3d, size_t p4d, size_t p5d, size_t p6d, size_t p7d, double *t3, double *t2, double *v2) { size_t p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p4ld_t3,p6ld_t3,p5ld_t3; size_t size_t3,size_block_t3,size_el_block_t3,size_t2,size_v2; cudaStream_t *streams; size_t nstreams,i; double *t2_d,*v2_d; size_t3=h2d*h3d*h1d*p4d*p6d*p5d*sizeof(double); size_t2=p7d*p4d*h1d*h2d*sizeof(double); size_v2=p7d*h3d*p6d*p5d*sizeof(double); cudaFuncSetCacheConfig(sd_t_d2_9_kernel, cudaFuncCachePreferShared); nstreams=1; size_block_t3=size_t3/nstreams; size_el_block_t3=size_block_t3/sizeof(double); //t3d=(double*)getGpuMem(size_t3); t2_d=(double*)getGpuMem(size_t2); v2_d=(double*)getGpuMem(size_v2); streams=(cudaStream_t*) malloc(nstreams*sizeof(cudaStream_t)); assert(streams!= NULL); for(i=0;i<nstreams;++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])) ; } CUDA_SAFE(cudaMemcpy(t2_d,t2,size_t2,cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d,v2,size_v2,cudaMemcpyHostToDevice)); p7ld_t2=1; p4ld_t2=p7d; h1ld_t2=p4d*p7d; h2ld_t2=h1d*p4d*p7d; p7ld_v2=1; h3ld_v2=p7d; p6ld_v2=h3d*p7d; p5ld_v2=p6d*h3d*p7d; h2ld_t3=1; h3ld_t3=h2d; h1ld_t3=h3d*h2d; p4ld_t3=h1d*h3d*h2d; p6ld_t3=p4d*h1d*h3d*h2d; p5ld_t3=p6d*p4d*h1d*h3d*h2d; size_t total_x = h3d*p6d*p5d; size_t total_y = p4d*h1d*h2d; dim3 dimBlock(T2,T1);dim3 dimGrid(DIV_UB(total_x,(4*T2)), DIV_UB(total_y,(4*T1))); for(i=0;i<nstreams;++i){ sd_t_d2_9_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p7d,p7ld_t2,p4ld_t2,h1ld_t2,h2ld_t2,p7ld_v2,h3ld_v2,p6ld_v2,p5ld_v2,h2ld_t3,h3ld_t3,h1ld_t3,p4ld_t3,p6ld_t3,p5ld_t3,t3_d,t2_d,v2_d,i,total_x,total_y); CHECK_ERR("Kernel execution failed"); } cudaThreadSynchronize(); for(i=0;i<nstreams;++i){ cudaStreamDestroy(streams[i]);} //freeGpuMem(t3d); freeGpuMem(t2_d); freeGpuMem(v2_d); free(streams); } extern "C" void sd_t_d2_9_cuda_(Integer *h1d, Integer* h2d, Integer* h3d, Integer* p4d, Integer* p5d, Integer* p6d, Integer* p7d, double *t3, double *t2, double *v2) { sd_t_d2_9_cuda((size_t)*h1d,(size_t)*h2d,(size_t)*h3d,(size_t)*p4d,(size_t)*p5d,(size_t)*p6d,(size_t)*p7d,t3,t2,v2); } #define MAX_h3 64 /* IMPORTANT!!!! t3_d must be passed as parameter to kernel function. A __global__ function can't access the global variable directly*/ __global__ void compute_energy_kernel(size_t h1d,size_t h2d,size_t h3d,size_t p4d,size_t p5d,size_t p6d,double* eval1,double* eval2,double* eval3,double* eval4,double* eval5,double* eval6, double* energy, double factor, size_t total_size, double* t3d, double* t3_sd) { size_t h1,h2,p6,p4,p5, h3,i=0; double e1,e2,e4,e5,e6; // __shared__ double t2_shm[MAX_h3]; __shared__ double energy_s[T1]; __shared__ double energy2_s[T1]; double inner_fac; size_t limit; size_t rest_x=blockIdx.x; size_t thread_x = T2*T1 * rest_x + threadIdx.x; if(threadIdx.x==0) { energy[blockIdx.x]=0; energy[blockIdx.x+gridDim.x]=0; energy_s[threadIdx.x] = 0.0; energy2_s[threadIdx.x] = 0.0; } for(size_t j =0; j<T2*T1;j++) { thread_x = T2*T1*blockIdx.x + j; rest_x = thread_x; __syncthreads(); h2=rest_x%h2d; rest_x=rest_x/h2d; h1=rest_x%h1d; rest_x=rest_x/h1d; p6=rest_x%p6d; rest_x=rest_x/p6d; p5=rest_x%p5d; rest_x=rest_x/p5d; p4=rest_x%p4d; e1 = eval1[h1]; e2 = eval2[h2]; e4 = eval4[p4]; e5 = eval5[p5]; e6 = eval6[p6]; /* for(p4=0;p4<p4d;p4++) for(p5 = 0;p5<p5d;p5++) for(p6=0;p6<p6d;p6++) for(h1= 0;h1<h1d;h1++) for(h2=0;h2<h2d;h2++) for(h3=0;h3<h3d;h3++) { inner_fac = -eval4[p4]-eval5[p5]-eval6[p6]+eval1[h1] +eval2[h2]+eval3[h3]; energy_s[0]+=factor*t3d[i]*t3d[i]/inner_fac; energy2_s[0]+=factor*t3d[i]*(t3_sd[i]+t3d[i])/inner_fac; i++; } */ if(thread_x<total_size) for(size_t i=0;i<h3d;i++) { inner_fac = -e4-e5-e6+e1+e2+eval3[i]; //t2_shm[i]; //ckbn avoid e1 in case we need just (T) energy_s[threadIdx.x] += factor* t3d[thread_x*h3d+i]*t3d[thread_x*h3d+i]/inner_fac; energy2_s[threadIdx.x] += factor* t3d[thread_x*h3d+i]*(t3_sd[thread_x*h3d+i]+t3d[thread_x*h3d+i])/inner_fac; } __syncthreads(); } if(threadIdx.x==0) { /* limit = blockDim.x; if (blockIdx.x == (gridDim.x-1)) limit = total_size%blockDim.x; for(size_t i=0;i<limit;i++) { energy[blockIdx.x]+=energy_s[i]; energy[blockIdx.x+gridDim.x]+=energy2_s[i]; } */ energy[blockIdx.x] = energy_s[0]; energy[blockIdx.x+gridDim.x] = energy2_s[0]; } __syncthreads(); } extern "C" void compute_energy(double factor, double* energy, double* eval1, double* eval2,double* eval3,double* eval4,double* eval5,double* eval6,size_t h1d, size_t h2d, size_t h3d, size_t p4d, size_t p5d,size_t p6d, double* host1, double* host2) //ckbn en_comment, double* total_d, double* total_s) { double* energy_d, *energy_h; double* eval_d1,*eval_d2,*eval_d3,*eval_d4,*eval_d5,*eval_d6; size_t size_energy = 2*sizeof(double); size_t total_block = DIV_UB((h1d*h2d*p4d*p5d*p6d), (T2*T1)); // size_t total_block = 1; size_t total_elements = h1d*h2d*p4d*p5d*p6d; energy_d = (double*)getGpuMem(size_energy*total_block*2); size_t i=0,in; double* t3 = (double*)malloc(sizeof(double)*h3d*total_elements); double* ts3 = (double*)malloc(sizeof(double)*h3d*total_elements); energy_h = (double*)getHostMem(size_energy*2*total_block); eval_d1 = (double*)getGpuMem(h1d*sizeof(double)); eval_d2 = (double*)getGpuMem(h2d*sizeof(double)); eval_d3 = (double*)getGpuMem(h3d*sizeof(double)); eval_d4 = (double*)getGpuMem(p4d*sizeof(double)); eval_d5 = (double*)getGpuMem(p5d*sizeof(double)); eval_d6 = (double*)getGpuMem(p6d*sizeof(double)); CUDA_SAFE(cudaMemcpy(eval_d1, eval1, h1d*sizeof(double), cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(eval_d2, eval2, h2d*sizeof(double), cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(eval_d3, eval3, h3d*sizeof(double), cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(eval_d4, eval4, p4d*sizeof(double), cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(eval_d5, eval5, p5d*sizeof(double), cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(eval_d6, eval6, p6d*sizeof(double), cudaMemcpyHostToDevice)); /* for test only */ //printf("host 2 is %f %f\n", host2[0], host2[1]); // CUDA_SAFE(cudaMemcpy(t3_s_d, host2, total_elements*h3d*sizeof(double), cudaMemcpyHostToDevice)); dim3 dimBlock(1); //T2*T1); dim3 dimGrid(total_block); compute_energy_kernel<<<dimGrid,dimBlock,0>>>(h1d,h2d,h3d,p4d,p5d,p6d, eval_d1,eval_d2,eval_d3,eval_d4,eval_d5,eval_d6,energy_d, factor, h1d*h2d*p4d*p5d*p6d, t3_d, t3_s_d); cudaThreadSynchronize(); //CHECK_ERR("Kernel execution failed"); CUDA_SAFE(cudaMemcpy(((char *) energy_h) , ((char *) energy_d) , size_energy*total_block*2, cudaMemcpyDeviceToHost)); for(size_t i=1;i<dimGrid.x;i++) { energy_h[0]+=energy_h[i]; energy_h[dimGrid.x]+=energy_h[i+dimGrid.x]; } // printf("CUDA energy_h is %f %f %d %d %d %d %d %d\n", energy_h[0], energy_h[dimGrid.x]); //, total_size, h1d, h2d, p4d, p5d,p6d); /* CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_d) , sizeof(double)*h3d*total_elements, cudaMemcpyDeviceToHost)); CUDA_SAFE(cudaMemcpy(((char *) ts3) , ((char *) t3_s_d) , sizeof(double)*h3d*total_elements, cudaMemcpyDeviceToHost)); total_s[0]=0.0, total_d[0]=0.0; for(size_t i=0;i<h3d*total_elements;i++) { total_s[0] += ts3[i]; total_d[0] += t3[i]; } */ // printf("Total doubles and singles %f, %f\n", total_d, total_s); energy[0] = energy_h[0]; energy[1] = energy_h[dimGrid.x]; freeGpuMem(energy_d); freeGpuMem(eval_d1); freeGpuMem(eval_d2); freeGpuMem(eval_d3); freeGpuMem(eval_d4); freeGpuMem(eval_d5); freeGpuMem(eval_d6); freeHostMem(energy_h); } extern "C" void compute_en_(double * factor, double * energy, double * eval1,double* eval2,double* eval3,double* eval4,double* eval5,double* eval6, Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double* host1, double* host2) //ckbn en_comment,double* total_d, double* total_s) { compute_energy((double) *factor, energy, eval1,eval2, eval3, eval4, eval5, eval6,(size_t) *h1d, (size_t) *h2d, (size_t) *h3d, (size_t) *p4d, (size_t) *p5d, (size_t) *p6d, host1, host2); //ckbn en_comment ,total_d, total_s); } //__device__ double* t3_d; extern "C" void set_dev_mem_s(size_t h1d, size_t h2d, size_t h3d, size_t p4d, size_t p5d,size_t p6d) { size_t size_t3; size_t3 = h1d*h2d*h3d*p4d*p5d*p6d; t3_s_d = (double *) getGpuMem(size_t3*sizeof(double)); cudaMemset(t3_s_d,0,size_t3*sizeof(double)); } extern "C" void dev_mem_s_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d) { set_dev_mem_s((size_t) *h1d, (size_t) *h2d, (size_t) *h3d, (size_t) *p4d, (size_t) *p5d, (size_t) *p6d); } /*----------------------------------------------------------------------* *t3[h3,h2,h1,p6,p5,p4] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_s1_1_kernel(size_t h1d,size_t h2d,size_t h3d,size_t p4d,size_t p6d,size_t p4ld_t2,size_t h1ld_t2,size_t h3ld_v2,size_t h2ld_v2,size_t p6ld_v2,size_t h3ld_t3,size_t h2ld_t3,size_t h1ld_t3,size_t p6ld_t3,size_t p4ld_t3, double *t2_d, double *v2_d,size_t p4, size_t total_x, double* t3d) { size_t h1,h2,h3,p6; __shared__ double t2_shm[T1*4*Tcomm]; for(size_t i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; size_t rest_x=blockIdx.x; size_t thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(size_t i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2]; } } __syncthreads(); } extern "C" void sd_t_s1_1_cuda(size_t h1d, size_t h2d, size_t h3d, size_t p4d, size_t p5d, size_t p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; cudaStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; //size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); //CUDA_SAFE(cudaMalloc((void**) &t3_d, size_t3)); //CUDA_SAFE(cudaMalloc((void**) &t2_d, size_t2)); //CUDA_SAFE(cudaMalloc((void**) &v2_d, size_v2)); // t3_d = (double *) getGpuMem(size_t3); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); //t3_p = (double *) getHostMem(size_t3); streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t)); assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])); } CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; // p5ld_v2 = p6d * h3d * p7d; h3ld_t3 = 1; h2ld_t3 = h3d; h1ld_t3 = h2d * h3d; p6ld_t3 = h1d * h2d * h3d; // p5ld_t3 = p6d * h1d * h2d * h3d; p4ld_t3 = p5d * p6d * h1d * h2d * h3d; size_t total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); for(i=0;i<nstreams;++i){ sd_t_s1_1_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t2_d,v2_d,i,total_x, t3_s_d); CHECK_ERR("Kernel execution failed"); } /* st = timer(); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (cudaStreamQuery(streams[stream]) != cudaSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; } */ cudaThreadSynchronize(); // CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost)); for (i = 0; i < nstreams; ++i) { cudaStreamDestroy(streams[i]); } // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); // cudaFree(t2_d); // cudaFree(v2_d); //freeHostMem(t3_p); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_s1_1_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_1_cuda((size_t) *h1d, (size_t) *h2d, (size_t) *h3d, (size_t) *p4d, (size_t) *p5d, (size_t) *p6d, t3, t2, v2); } /*----------------------------------------------------------------------* *t3[h3,h1,h2,p6,p5,p4] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_s1_2_kernel(size_t h1d,size_t h2d,size_t h3d,size_t p4d,size_t p6d,size_t p4ld_t2,size_t h1ld_t2,size_t h3ld_v2,size_t h2ld_v2,size_t p6ld_v2,size_t h3ld_t3,size_t h2ld_t3,size_t h1ld_t3,size_t p6ld_t3,size_t p4ld_t3,double *t2_d, double *v2_d,size_t p4, size_t total_x, double* t3d) { size_t h1,h2,h3,p6; __shared__ double t2_shm[T1*4*Tcomm]; for(size_t i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; size_t rest_x=blockIdx.x; size_t thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(size_t i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2]; } } __syncthreads(); } extern "C" void sd_t_s1_2_cuda(size_t h1d, size_t h2d, size_t h3d, size_t p4d, size_t p5d, size_t p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; cudaStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; //size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); cudaMemset(t3_d,0,size_t3*sizeof(double)); first = 0; }*/ //CUDA_SAFE(cudaMalloc((void**) &t2_d, size_t2)); //CUDA_SAFE(cudaMalloc((void**) &v2_d, size_v2)); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); //t3_p = (double *) getHostMem(size_t3); streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t)); /* assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])); }*/ CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d ; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; // p5ld_v2 = p6d * h3d * p7d; h3ld_t3 = 1; h1ld_t3 = h3d; h2ld_t3 = h1d * h3d; p6ld_t3 = h1d * h2d * h3d; // p5ld_t3 = p6d * h1d * h2d * h3d; p4ld_t3 = p5d * p6d * h1d * h2d * h3d; size_t total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); // for(i=0;i<nstreams;++i){ sd_t_s1_2_kernel<<<dimGrid,dimBlock,0>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t2_d,v2_d,i,total_x, t3_s_d); CHECK_ERR("Kernel execution failed"); // } /* for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (cudaStreamQuery(streams[stream]) != cudaSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; }*/ cudaThreadSynchronize(); // CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost)); /* for (i = 0; i < nstreams; ++i) { cudaStreamDestroy(streams[i]); }*/ freeGpuMem(t2_d); freeGpuMem(v2_d); //freeHostMem(t3_p); free(streams); } extern "C" void sd_t_s1_2_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_2_cuda((size_t) *h1d, (size_t) *h2d, (size_t) *h3d, (size_t) *p4d, (size_t) *p5d, (size_t) *p6d, t3, t2, v2); } extern "C" void sd_t_s1_3_cuda(size_t h1d, size_t h2d, size_t h3d, size_t p4d, size_t p5d, size_t p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; cudaStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; //size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); cudaMemset(t3_d,0,size_t3*sizeof(double)); first = 0; } */ t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); //t3_p = (double *) getHostMem(size_t3); streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t)); assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])); } CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d ; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; // p5ld_v2 = p6d * h3d * p7d; h1ld_t3 = 1; h3ld_t3 = h1d; h2ld_t3 = h1d * h3d; p6ld_t3 = h1d * h2d * h3d; // p5ld_t3 = p6d * h1d * h2d * h3d; p4ld_t3 = p5d * p6d * h1d * h2d * h3d; size_t total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); for(i=0;i<nstreams;++i){ sd_t_s1_1_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t2_d,v2_d,i,total_x, t3_s_d); CHECK_ERR("Kernel execution failed"); } /* for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (cudaStreamQuery(streams[stream]) != cudaSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; } */ cudaThreadSynchronize(); //CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost)); for (i = 0; i < nstreams; ++i) { cudaStreamDestroy(streams[i]); } // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); //freeHostMem(t3_p); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_s1_3_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_3_cuda((size_t) *h1d, (size_t) *h2d, (size_t) *h3d, (size_t) *p4d, (size_t) *p5d, (size_t) *p6d, t3, t2, v2); } /*----------------------------------------------------------------------* *t3[h3,h2,h1,p6,p4,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_s1_4_kernel(size_t h1d,size_t h2d,size_t h3d,size_t p4d,size_t p5d,size_t p6d,size_t p4ld_t2,size_t h1ld_t2,size_t h3ld_v2,size_t h2ld_v2,size_t p6ld_v2,size_t p5ld_v2,size_t h3ld_t3,size_t h2ld_t3,size_t h1ld_t3,size_t p6ld_t3,size_t p5ld_t3,size_t p4ld_t3,double *t3d, double *t2_d, double *v2_d,size_t p4, size_t total_x) { size_t h1,h2,h3,p6,p5; __shared__ double t2_shm[T1*4*Tcomm]; for(size_t i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; size_t rest_x=blockIdx.x; size_t thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(size_t i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; rest_x=rest_x/p6d; p5=rest_x%p5d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2]; } } __syncthreads(); } extern "C" void sd_t_s1_4_cuda(size_t h1d, size_t h2d, size_t h3d, size_t p4d, size_t p5d, size_t p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; cudaStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; //size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); cudaMemset(t3_d,0,size_t3*sizeof(double)); first = 0; } */ // t3_d = (double *) getGpuMem(size_t3); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); //t3_p = (double *) getHostMem(size_t3); streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t)); /* assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])); }*/ CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; p5ld_v2 = p6d * h3d * h2d; h3ld_t3 = 1; h2ld_t3 = h3d; h1ld_t3 = h2d * h3d; p6ld_t3 = h1d * h2d * h3d; p4ld_t3 = p6d * h1d * h2d * h3d; p5ld_t3 = p4d * p6d * h1d * h2d * h3d; size_t total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); i=0; // for(i=0;i<nstreams;++i){ sd_t_s1_4_kernel<<<dimGrid,dimBlock,0>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x); //sd_t_s1_4_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_d,t2_d,v2_d,i,total_x); CHECK_ERR("Kernel execution failed"); // } cudaThreadSynchronize(); /* CUDA_SAFE(cudaMemcpy(((char *) t3_p) , ((char *) t3_d) , size_block_t3, cudaMemcpyDeviceToHost)); printf("Time for Async DeviceToHost %f\n", et-st); stream = 0; // while (stream < nstreams) { // while (cudaStreamQuery(streams[stream]) != cudaSuccess); double *src = t3_p; //[stream * size_el_block_t3]; double *dst = t3; //[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] -= src[i]; } // stream++; // } */ // cudaThreadSynchronize(); /* for (i = 0; i < nstreams; ++i) { cudaStreamDestroy(streams[i]); }*/ // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); //freeHostMem(t3_p); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_s1_4_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_4_cuda((size_t) *h1d, (size_t) *h2d, (size_t) *h3d, (size_t) *p4d, (size_t) *p5d, (size_t) *p6d, t3, t2, v2); } /*----------------------------------------------------------------------* *t3[h3,h1,h2,p6,p4,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_s1_5_kernel(size_t h1d,size_t h2d,size_t h3d,size_t p4d,size_t p5d,size_t p6d,size_t p4ld_t2,size_t h1ld_t2,size_t h3ld_v2,size_t h2ld_v2,size_t p6ld_v2,size_t p5ld_v2,size_t h3ld_t3,size_t h2ld_t3,size_t h1ld_t3,size_t p6ld_t3,size_t p5ld_t3,size_t p4ld_t3,double *t3d, double *t2_d, double *v2_d,size_t p4, size_t total_x) { size_t h1,h2,h3,p6,p5; __shared__ double t2_shm[T1*4*Tcomm]; for(size_t i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; size_t rest_x=blockIdx.x; size_t thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(size_t i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; rest_x=rest_x/p6d; p5=rest_x%p5d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2]; } } __syncthreads(); } extern "C" void sd_t_s1_5_cuda(size_t h1d, size_t h2d, size_t h3d, size_t p4d, size_t p5d, size_t p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; cudaStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; //size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); cudaMemset(t3_d,0,size_t3*sizeof(double)); first = 0; } */ // t3_d = (double *) getGpuMem(size_t3); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); //t3_p = (double *) getHostMem(size_t3); streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t)); assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])); } CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d ; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; p5ld_v2 = p6d * h3d * h2d; h3ld_t3 = 1; h1ld_t3 = h3d; h2ld_t3 = h1d * h3d; p6ld_t3 = h1d * h2d * h3d; p4ld_t3 = p6d * h1d * h2d * h3d; p5ld_t3 = p4d * p6d * h1d * h2d * h3d; size_t total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); for(i=0;i<nstreams;++i){ sd_t_s1_5_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x); CHECK_ERR("Kernel execution failed"); } /* for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (cudaStreamQuery(streams[stream]) != cudaSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; } */ cudaThreadSynchronize(); //CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost)); for (i = 0; i < nstreams; ++i) { cudaStreamDestroy(streams[i]); } // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); //freeHostMem(t3_p); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_s1_5_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_5_cuda((size_t) *h1d, (size_t) *h2d, (size_t) *h3d, (size_t) *p4d, (size_t) *p5d, (size_t) *p6d, t3, t2, v2); } /*----------------------------------------------------------------------* *t3[h1,h3,h2,p6,p4,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_s1_6_kernel(size_t h1d,size_t h2d,size_t h3d,size_t p4d,size_t p5d,size_t p6d,size_t p4ld_t2,size_t h1ld_t2,size_t h3ld_v2,size_t h2ld_v2,size_t p6ld_v2,size_t p5ld_v2,size_t h3ld_t3,size_t h2ld_t3,size_t h1ld_t3,size_t p6ld_t3,size_t p5ld_t3,size_t p4ld_t3,double *t3d, double *t2_d, double *v2_d,size_t p4, size_t total_x) { size_t h1,h2,h3,p6,p5; __shared__ double t2_shm[T1*4*Tcomm]; for(size_t i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; size_t rest_x=blockIdx.x; size_t thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(size_t i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; rest_x=rest_x/p6d; p5=rest_x%p5d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2]; } } __syncthreads(); } extern "C" void sd_t_s1_6_cuda(size_t h1d, size_t h2d, size_t h3d, size_t p4d, size_t p5d, size_t p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; cudaStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; //size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); cudaMemset(t3_d,0,size_t3*sizeof(double)); first = 0; } */ // t3_d = (double *) getGpuMem(size_t3); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); //t3_p = (double *) getHostMem(size_t3); streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t)); assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])); } CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; p5ld_v2 = p6d * h3d * h2d; h1ld_t3 = 1; h3ld_t3 = h1d; h2ld_t3 = h1d * h3d; p6ld_t3 = h1d * h2d * h3d; p4ld_t3 = p6d * h1d * h2d * h3d; p5ld_t3 = p4d * p6d * h1d * h2d * h3d; size_t total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); for(i=0;i<nstreams;++i){ sd_t_s1_6_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d,p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,p5ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p5ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x); CHECK_ERR("Kernel execution failed"); } /* for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (cudaStreamQuery(streams[stream]) != cudaSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; }*/ cudaThreadSynchronize(); //CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost)); for (i = 0; i < nstreams; ++i) { cudaStreamDestroy(streams[i]); } // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); //freeHostMem(t3_p); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_s1_6_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_6_cuda((size_t) *h1d, (size_t) *h2d, (size_t) *h3d, (size_t) *p4d, (size_t) *p5d, (size_t) *p6d, t3, t2, v2); } /*----------------------------------------------------------------------* *t3[h3,h2,h1,p4,p6,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_s1_7_kernel(size_t h1d,size_t h2d,size_t h3d,size_t p4d,size_t p6d,size_t p4ld_t2,size_t h1ld_t2,size_t h3ld_v2,size_t h2ld_v2,size_t p6ld_v2,size_t h3ld_t3,size_t h2ld_t3,size_t h1ld_t3,size_t p6ld_t3,size_t p4ld_t3,double *t3d, double *t2_d, double *v2_d,size_t p4, size_t total_x) { size_t h1,h2,h3,p6; __shared__ double t2_shm[T1*4*Tcomm]; for(size_t i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; size_t rest_x=blockIdx.x; size_t thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(size_t i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]+=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2]; } } __syncthreads(); } extern "C" void sd_t_s1_7_cuda(size_t h1d, size_t h2d, size_t h3d, size_t p4d, size_t p5d, size_t p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; cudaStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; //size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); cudaMemset(t3_d,0,size_t3*sizeof(double)); first = 0; } */ // t3_d = (double *) getGpuMem(size_t3); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); //t3_p = (double *) getHostMem(size_t3); streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t)); assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])); } CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; // p5ld_v2 = p6d * h3d * p7d; h3ld_t3 = 1; h2ld_t3 = h3d; h1ld_t3 = h2d * h3d; p4ld_t3 = h1d * h2d * h3d; // p5ld_t3 = p6d * h1d * h2d * h3d; p6ld_t3 = p4d * h1d * h2d * h3d; size_t total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); for(i=0;i<nstreams;++i){ sd_t_s1_7_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x); CHECK_ERR("Kernel execution failed"); } /* for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (cudaStreamQuery(streams[stream]) != cudaSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; }*/ cudaThreadSynchronize(); //CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost)); for (i = 0; i < nstreams; ++i) { cudaStreamDestroy(streams[i]); } // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); //freeHostMem(t3_p); free(streams); } #undef T1 #undef T2 #undef Tcomm extern "C" void sd_t_s1_7_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_7_cuda((size_t) *h1d, (size_t) *h2d, (size_t) *h3d, (size_t) *p4d, (size_t) *p5d, (size_t) *p6d, t3, t2, v2); } #define T1 16 #define T2 16 #define Tcomm 16 __global__ void sd_t_s1_8_kernel(size_t h1d,size_t h2d,size_t h3d,size_t p4d,size_t p6d,size_t p4ld_t2,size_t h1ld_t2,size_t h3ld_v2,size_t h2ld_v2,size_t p6ld_v2,size_t h3ld_t3,size_t h2ld_t3,size_t h1ld_t3,size_t p6ld_t3,size_t p4ld_t3,double *t3d, double *t2_d, double *v2_d,size_t p4, size_t total_x) { size_t h1,h2,h3,p6; __shared__ double t2_shm[T1*4*Tcomm]; for(size_t i=threadIdx.x;i<h1d*p4d;i+=blockDim.x) if(i<h1d*p4d) t2_shm[i] = t2_d[i]; size_t rest_x=blockIdx.x; size_t thread_x = T2*T1 * rest_x + threadIdx.x; rest_x = thread_x; __syncthreads(); /* the following computation may need to happen inside the loop */ for(size_t i=0;i<total_x;i+=gridDim.x*blockDim.x) { rest_x += i; h3=rest_x%h3d; rest_x=rest_x/h3d; h2=rest_x%h2d; rest_x=rest_x/h2d; p6=rest_x%p6d; if((thread_x+i)<total_x) for(h1=0;h1<h1d;h1++) for(p4=0;p4<p4d;p4++) { t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2]; } } __syncthreads(); } /*----------------------------------------------------------------------* *t3[h3,h1,h2,p4,p6,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ #define T1 16 #define T2 16 #define Tcomm 16 extern "C" void sd_t_s1_8_cuda(size_t h1d, size_t h2d, size_t h3d, size_t p4d, size_t p5d, size_t p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; cudaStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; //size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); cudaMemset(t3_d,0,size_t3*sizeof(double)); first = 0; } */ // t3_d = (double *) getGpuMem(size_t3); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); //t3_p = (double *) getHostMem(size_t3); streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t)); assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])); } CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; // p5ld_v2 = p6d * h3d * p7d; h3ld_t3 = 1; h1ld_t3 = h3d; h2ld_t3 = h1d * h3d; p4ld_t3 = h1d * h2d * h3d; // p5ld_t3 = p6d * h1d * h2d * h3d; p6ld_t3 = p4d * h1d * h2d * h3d; size_t total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); for(i=0;i<nstreams;++i){ sd_t_s1_8_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x); CHECK_ERR("Kernel execution failed"); } /* for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (cudaStreamQuery(streams[stream]) != cudaSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; }*/ cudaThreadSynchronize(); // CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost)); for (i = 0; i < nstreams; ++i) { cudaStreamDestroy(streams[i]); } // freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); //freeHostMem(t3_p); free(streams); } extern "C" void sd_t_s1_8_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_8_cuda((size_t) *h1d, (size_t) *h2d, (size_t) *h3d, (size_t) *p4d, (size_t) *p5d, (size_t) *p6d, t3, t2, v2); } /*----------------------------------------------------------------------* *t3[h1,h3,h2,p4,p6,p5] -= t2[p4,h1] * v2[h3,h2,p6,p5] *----------------------------------------------------------------------*/ extern "C" void sd_t_s1_9_cuda(size_t h1d, size_t h2d, size_t h3d, size_t p4d, size_t p5d, size_t p6d, double *t3, double *t2, double *v2) { double st, et; //ckbn st = timer(); size_t p7ld_t2, p4ld_t2, h1ld_t2, h2ld_v2, p7ld_v2, h3ld_v2, p6ld_v2, p5ld_v2, h3ld_t3, h2ld_t3, h1ld_t3, p6ld_t3, p5ld_t3, p4ld_t3; size_t size_t3, size_block_t3, size_el_block_t3, size_t2, size_v2; cudaStream_t *streams; size_t nstreams, i; double *t2_d, *v2_d, *t3_p; //size_t3 = h3d * h2d * h1d * p6d * p5d * p4d * sizeof(double); size_t2 = p4d * h1d * sizeof(double); size_v2 = h3d * h2d * p6d * p5d * sizeof(double); nstreams = 1; size_block_t3 = size_t3 / nstreams; size_el_block_t3 = size_block_t3 / sizeof(double); /* if(first==1) { t3_d = (double *) getGpuMem(size_t3); cudaMemset(t3_d,0,size_t3*sizeof(double)); first = 0; } */ // t3_d = (double *) getGpuMem(size_t3); t2_d = (double *) getGpuMem(size_t2); v2_d = (double *) getGpuMem(size_v2); //t3_p = (double *) getHostMem(size_t3); streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t)); assert(streams != NULL); for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaStreamCreate(&streams[i])); } CUDA_SAFE(cudaMemcpy(t2_d, t2, size_t2, cudaMemcpyHostToDevice)); CUDA_SAFE(cudaMemcpy(v2_d, v2, size_v2, cudaMemcpyHostToDevice)); p4ld_t2 = 1; h1ld_t2 = p4d; h3ld_v2 = 1; h2ld_v2 = h3d; p6ld_v2 = h3d * h2d; // p5ld_v2 = p6d * h3d * p7d; h1ld_t3 = 1; h3ld_t3 = h1d; h2ld_t3 = h1d * h3d; p4ld_t3 = h1d * h2d * h3d; // p5ld_t3 = p6d * h1d * h2d * h3d; p6ld_t3 = p4d * h1d * h2d * h3d; size_t total_x = h3d*h2d*p6d*p5d; dim3 dimBlock(T2*T1);dim3 dimGrid(DIV_UB(total_x,T2*T1), 1); for(i=0;i<nstreams;++i){ sd_t_s1_7_kernel<<<dimGrid,dimBlock,0,streams[i]>>>(h1d,h2d,h3d,p4d,p5d*p6d,p4ld_t2,h1ld_t2,h3ld_v2,h2ld_v2,p6ld_v2,h3ld_t3,h2ld_t3,h1ld_t3,p6ld_t3,p4ld_t3,t3_s_d,t2_d,v2_d,i,total_x); CHECK_ERR("Kernel execution failed"); } /* for (i = 0; i < nstreams; ++i) { CUDA_SAFE(cudaMemcpyAsync(((char *) t3_p) + i * size_block_t3, ((char *) t3_s_d) + i * size_block_t3, size_block_t3, cudaMemcpyDeviceToHost, streams[i])); } stream = 0; while (stream < nstreams) { while (cudaStreamQuery(streams[stream]) != cudaSuccess); double *src = &t3_p[stream * size_el_block_t3]; double *dst = &t3[stream * size_el_block_t3]; for (i = 0; i < size_el_block_t3; ++i) { dst[i] = src[i]; } stream++; }*/ cudaThreadSynchronize(); //CUDA_SAFE(cudaMemcpy(((char *) t3) , ((char *) t3_s_d) , size_t3, cudaMemcpyDeviceToHost)); // printf("out is %lf\n", t3_p[0]); for (i = 0; i < nstreams; ++i) { cudaStreamDestroy(streams[i]); } //freeGpuMem(t3_d); freeGpuMem(t2_d); freeGpuMem(v2_d); //freeHostMem(t3_p); free(streams); } extern "C" void sd_t_s1_9_cuda_(Integer * h1d, Integer * h2d, Integer * h3d, Integer * p4d, Integer * p5d, Integer * p6d, double *t3, double *t2, double *v2) { sd_t_s1_9_cuda((size_t) *h1d, (size_t) *h2d, (size_t) *h3d, (size_t) *p4d, (size_t) *p5d, (size_t) *p6d, t3, t2, v2); }
the_stack
#include <cuspatial/error.hpp> #include <cuspatial/spatial_join.hpp> #include <cudf/column/column.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/table/table.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/iterator/zip_iterator.h> #include <tuple> namespace cuspatial { namespace detail { namespace { template <typename UnaryFunction> inline auto make_counting_transform_iterator(cudf::size_type start, UnaryFunction f) { return thrust::make_transform_iterator(thrust::make_counting_iterator(start), f); } template <typename T> inline std::unique_ptr<cudf::table> join_quadtree_and_bboxes(cudf::table_view const& quadtree, cudf::table_view const& poly_bbox, T x_min, T x_max, T y_min, T y_max, T scale, int8_t max_depth, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const node_levels = quadtree.column(1); // uint8_t auto const node_counts = quadtree.column(3); // uint32_t auto const node_offsets = quadtree.column(4); // uint32_t // Count the number of top-level nodes to start. // This could be provided explicitly, but count_if should be fast enough. auto num_top_level_leaves = thrust::count_if(rmm::exec_policy(stream), node_levels.begin<uint8_t>(), node_levels.end<uint8_t>(), thrust::placeholders::_1 == 0); auto num_pairs = num_top_level_leaves * poly_bbox.num_rows(); // The found poly-quad pairs are dynamic and can not be pre-allocated. // Relevant arrays are resized accordingly for memory efficiency. // Vectors for intermediate poly and node indices at each level rmm::device_uvector<uint8_t> cur_types(num_pairs, stream); rmm::device_uvector<uint8_t> cur_levels(num_pairs, stream); rmm::device_uvector<uint32_t> cur_node_idxs(num_pairs, stream); rmm::device_uvector<uint32_t> cur_poly_idxs(num_pairs, stream); // Vectors for found pairs of poly and leaf node indices rmm::device_uvector<uint8_t> out_types(num_pairs, stream); rmm::device_uvector<uint8_t> out_levels(num_pairs, stream); rmm::device_uvector<uint32_t> out_node_idxs(num_pairs, stream); rmm::device_uvector<uint32_t> out_poly_idxs(num_pairs, stream); cudf::size_type num_leaves{0}; cudf::size_type num_results{0}; cudf::size_type num_parents{0}; auto make_current_level_iter = [&]() { return thrust::make_zip_iterator( cur_types.begin(), cur_levels.begin(), cur_node_idxs.begin(), cur_poly_idxs.begin()); }; auto make_output_values_iter = [&]() { return num_results + thrust::make_zip_iterator( out_types.begin(), out_levels.begin(), out_node_idxs.begin(), out_poly_idxs.begin()); }; // Find intersections for all the top level quadrants and polygons std::tie(num_parents, num_leaves) = find_intersections(quadtree, poly_bbox, // The top-level node indices make_counting_transform_iterator( 0, [=] __device__(auto i) { return i % num_top_level_leaves; }), // The top-level poly indices make_counting_transform_iterator( 0, [=] __device__(auto i) { return i / num_top_level_leaves; }), make_current_level_iter(), // intermediate intersections or parent quadrants // found during traversal // found intersecting quadrant and polygon indices for output make_output_values_iter(), num_pairs, x_min, y_min, scale, max_depth, stream); num_results += num_leaves; // Traverse the quadtree descending to `max_depth`, or until no more parent quadrants are found for (uint8_t level{1}; level < max_depth && num_parents > 0; ++level) { // Shrink the current level's vectors to overwrite elements removed by `find_intersections()` cur_types.shrink_to_fit(stream); cur_levels.shrink_to_fit(stream); cur_node_idxs.shrink_to_fit(stream); cur_poly_idxs.shrink_to_fit(stream); // Grow preallocated output vectors. The next level will expand out to no more // than `num_parents * 4` pairs, since each parent quadrant has up to 4 children. size_t max_num_results = num_results + num_parents * 4; if (max_num_results > out_types.capacity()) { // grow preallocated output sizes in multiples of the current capacity // auto new_size = out_types.capacity() * ((max_num_results / out_types.capacity()) + 1); out_types.resize(max_num_results, stream); out_levels.resize(max_num_results, stream); out_node_idxs.resize(max_num_results, stream); out_poly_idxs.resize(max_num_results, stream); } // Walk one level down and fill the current level's vectors with // the next level's quadrant info and polygon indices. std::tie(num_pairs, cur_types, cur_levels, cur_node_idxs, cur_poly_idxs) = descend_quadtree(node_counts.begin<uint32_t>(), node_offsets.begin<uint32_t>(), num_parents, cur_types, cur_levels, cur_node_idxs, cur_poly_idxs, stream); // Find intersections for the the next level's quadrants and polygons std::tie(num_parents, num_leaves) = find_intersections(quadtree, poly_bbox, cur_node_idxs.begin(), cur_poly_idxs.begin(), make_current_level_iter(), // intermediate intersections or parent // quadrants found during traversal // found intersecting quadrant and polygon indices for output make_output_values_iter(), num_pairs, x_min, y_min, scale, max_depth, stream); num_results += num_leaves; } // Sort the output poly/quad indices by quadrant [&]() { // Copy the relevant `node_offsets` into a tmp vec so we don't modify the quadtree column rmm::device_uvector<uint32_t> tmp_node_offsets(num_results, stream); auto const iter = thrust::make_permutation_iterator(node_offsets.begin<uint32_t>(), out_node_idxs.begin()); thrust::copy(rmm::exec_policy(stream), iter, iter + num_results, tmp_node_offsets.begin()); thrust::stable_sort_by_key( rmm::exec_policy(stream), tmp_node_offsets.begin(), tmp_node_offsets.end(), thrust::make_zip_iterator(out_poly_idxs.begin(), out_node_idxs.begin())); }(); std::vector<std::unique_ptr<cudf::column>> cols{}; cols.reserve(2); cols.push_back(make_fixed_width_column<uint32_t>(num_results, stream, mr)); cols.push_back(make_fixed_width_column<uint32_t>(num_results, stream, mr)); thrust::copy(rmm::exec_policy(stream), out_poly_idxs.begin(), out_poly_idxs.begin() + num_results, cols.at(0)->mutable_view().begin<uint32_t>()); thrust::copy(rmm::exec_policy(stream), out_node_idxs.begin(), out_node_idxs.begin() + num_results, cols.at(1)->mutable_view().begin<uint32_t>()); return std::make_unique<cudf::table>(std::move(cols)); } struct dispatch_quadtree_bounding_box_join { template <typename T, std::enable_if_t<std::is_floating_point<T>::value>* = nullptr> inline std::unique_ptr<cudf::table> operator()(cudf::table_view const& quadtree, cudf::table_view const& poly_bbox, double x_min, double x_max, double y_min, double y_max, double scale, int8_t max_depth, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { return join_quadtree_and_bboxes<T>(quadtree, poly_bbox, static_cast<T>(x_min), static_cast<T>(x_max), static_cast<T>(y_min), static_cast<T>(y_max), static_cast<T>(scale), max_depth, stream, mr); } template <typename T, std::enable_if_t<!std::is_floating_point<T>::value>* = nullptr, typename... Args> inline std::unique_ptr<cudf::table> operator()(Args&&...) { CUSPATIAL_FAIL("Only floating-point types are supported"); } }; } // namespace std::unique_ptr<cudf::table> join_quadtree_and_bounding_boxes(cudf::table_view const& quadtree, cudf::table_view const& poly_bbox, double x_min, double x_max, double y_min, double y_max, double scale, int8_t max_depth, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { return cudf::type_dispatcher(poly_bbox.column(0).type(), dispatch_quadtree_bounding_box_join{}, quadtree, poly_bbox, x_min, x_max, y_min, y_max, scale, max_depth, stream, mr); } } // namespace detail std::unique_ptr<cudf::table> join_quadtree_and_bounding_boxes(cudf::table_view const& quadtree, cudf::table_view const& poly_bbox, double x_min, double x_max, double y_min, double y_max, double scale, int8_t max_depth, rmm::mr::device_memory_resource* mr) { CUSPATIAL_EXPECTS(quadtree.num_columns() == 5, "quadtree table must have 5 columns"); CUSPATIAL_EXPECTS(poly_bbox.num_columns() == 4, "polygon bbox table must have 4 columns"); CUSPATIAL_EXPECTS(scale > 0, "scale must be positive"); CUSPATIAL_EXPECTS(x_min < x_max && y_min < y_max, "invalid bounding box (x_min, x_max, y_min, y_max)"); CUSPATIAL_EXPECTS(max_depth > 0 && max_depth < 16, "maximum depth must be positive and less than 16"); if (quadtree.num_rows() == 0 || poly_bbox.num_rows() == 0) { std::vector<std::unique_ptr<cudf::column>> cols{}; cols.reserve(2); cols.push_back(cudf::make_empty_column(cudf::data_type{cudf::type_id::UINT32})); cols.push_back(cudf::make_empty_column(cudf::data_type{cudf::type_id::UINT32})); return std::make_unique<cudf::table>(std::move(cols)); } return detail::join_quadtree_and_bounding_boxes(quadtree, poly_bbox, x_min, x_max, y_min, y_max, scale, max_depth, rmm::cuda_stream_default, mr); } } // namespace cuspatial
the_stack
Parallel reduction This sample shows how to perform a reduction operation on an array of values to produce a single value. Reductions are a very common computation in parallel algorithms. Any time an array of values needs to be reduced to a single value using a binary associative operator, a reduction can be used. Example applications include statistics computaions such as mean and standard deviation, and image processing applications such as finding the total luminance of an image. This code performs sum reductions, but any associative operator such as min() or max() could also be used. It assumes the input size is a power of 2. COMMAND LINE ARGUMENTS "--shmoo": Test performance for 1 to 32M elements with each of the 7 different kernels "--n=<N>": Specify the number of elements to reduce (default 1048576) "--threads=<N>": Specify the number of threads per block (default 128) "--kernel=<N>": Specify which kernel to run (0-6, default 6) "--maxblocks=<N>": Specify the maximum number of thread blocks to launch (kernel 6 only, default 64) "--cpufinal": Read back the per-block results and do final sum of block sums on CPU (default false) "--cputhresh=<N>": The threshold of number of blocks sums below which to perform a CPU final reduction (default 1) */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cutil_inline.h> #include <tests/Reduction/reduction.h> enum ReduceType { REDUCE_INT, REDUCE_FLOAT, REDUCE_DOUBLE }; //////////////////////////////////////////////////////////////////////////////// // declaration, forward template <class T> void runTest( int argc, char** argv, ReduceType datatype); #ifdef WIN32 #define strcasecmp strcmpi #endif extern "C" bool isPow2(unsigned int x) { return ((x&(x-1))==0); } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { char *typeChoice; cutGetCmdLineArgumentstr( argc, (const char**) argv, "type", &typeChoice); if (0 == typeChoice) { typeChoice = (char*)malloc(4 * sizeof(char)); strcpy(typeChoice, "int"); } ReduceType datatype = REDUCE_INT; if (!strcasecmp(typeChoice, "float")) datatype = REDUCE_FLOAT; else if (!strcasecmp(typeChoice, "double")) datatype = REDUCE_DOUBLE; else datatype = REDUCE_INT; printf("Reducing array of type %s.\n", typeChoice); cudaDeviceProp deviceProp; deviceProp.major = 1; deviceProp.minor = 0; int desiredMinorRevision = 0; if (datatype == REDUCE_DOUBLE) { deviceProp.minor = 3; desiredMinorRevision = 3; } int dev; cutilSafeCallNoSync(cudaChooseDevice(&dev, &deviceProp)); cutilSafeCallNoSync(cudaGetDeviceProperties(&deviceProp, dev)); if(deviceProp.major > 1 || deviceProp.minor >= desiredMinorRevision) { printf("Using Device %d: \"%s\"\n", dev, deviceProp.name); cutilSafeCallNoSync(cudaSetDevice(dev)); } else if (desiredMinorRevision == 3) { printf("There is no device supporting compute capability %d.%d.\n\n", 1, desiredMinorRevision); printf("TEST PASSED"); cudaThreadExit(); cutilExit(argc, argv); } switch (datatype) { default: case REDUCE_INT: runTest<int>( argc, argv, datatype); break; case REDUCE_FLOAT: runTest<float>( argc, argv, datatype); break; case REDUCE_DOUBLE: runTest<double>( argc, argv, datatype); break; } cudaThreadExit(); cutilExit(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Compute sum reduction on CPU //! We use Kahan summation for an accurate sum of large arrays. //! http://en.wikipedia.org/wiki/Kahan_summation_algorithm //! //! @param data pointer to input data //! @param size number of input data elements //////////////////////////////////////////////////////////////////////////////// template<class T> T reduceCPU(T *data, int size) { T sum = data[0]; T c = (T)0.0; for (int i = 1; i < size; i++) { T y = data[i] - c; T t = sum + y; c = (t - sum) - y; sum = t; } return sum; } unsigned int nextPow2( unsigned int x ) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } //////////////////////////////////////////////////////////////////////////////// // Compute the number of threads and blocks to use for the given reduction kernel // For the kernels >= 3, we set threads / block to the minimum of maxThreads and // n/2. For kernels < 3, we set to the minimum of maxThreads and n. For kernel // 6, we observe the maximum specified number of blocks, because each thread in // that kernel can process a variable number of elements. //////////////////////////////////////////////////////////////////////////////// void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads) { if (whichKernel < 3) { threads = (n < maxThreads) ? nextPow2(n) : maxThreads; blocks = (n + threads - 1) / threads; } else { threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads; blocks = (n + (threads * 2 - 1)) / (threads * 2); } if (whichKernel == 6) blocks = min(maxBlocks, blocks); } //////////////////////////////////////////////////////////////////////////////// // This function performs a reduction of the input data multiple times and // measures the average reduction time. //////////////////////////////////////////////////////////////////////////////// template <class T> T benchmarkReduce(int n, int numThreads, int numBlocks, int maxThreads, int maxBlocks, int whichKernel, int testIterations, bool cpuFinalReduction, int cpuFinalThreshold, unsigned int timer, bool useSM13, T* h_odata, T* d_idata, T* d_odata) { T gpu_result = 0; bool needReadBack = true; for (int i = 0; i < testIterations; ++i) { gpu_result = 0; cudaThreadSynchronize(); cutilCheckError( cutStartTimer( timer)); // execute the kernel if (useSM13) reduce_sm13<T>(n, numThreads, numBlocks, whichKernel, d_idata, d_odata); else reduce_sm10<T>(n, numThreads, numBlocks, whichKernel, d_idata, d_odata); // check if kernel execution generated an error cutilCheckMsg("Kernel execution failed"); if (cpuFinalReduction) { // sum partial sums from each block on CPU // copy result from device to host cutilSafeCallNoSync( cudaMemcpy( h_odata, d_odata, numBlocks*sizeof(T), cudaMemcpyDeviceToHost) ); for(int i=0; i<numBlocks; i++) { gpu_result += h_odata[i]; } needReadBack = false; } else { // sum partial block sums on GPU int s=numBlocks; int kernel = (whichKernel == 6) ? 5 : whichKernel; while(s > cpuFinalThreshold) { int threads = 0, blocks = 0; getNumBlocksAndThreads(kernel, s, maxBlocks, maxThreads, blocks, threads); if (useSM13) reduce_sm13<T>(s, threads, blocks, kernel, d_odata, d_odata); else reduce_sm10<T>(s, threads, blocks, kernel, d_odata, d_odata); if (kernel < 3) s = (s + threads - 1) / threads; else s = (s + (threads*2-1)) / (threads*2); } if (s > 1) { // copy result from device to host cutilSafeCallNoSync( cudaMemcpy( h_odata, d_odata, s * sizeof(T), cudaMemcpyDeviceToHost) ); for(int i=0; i < s; i++) { gpu_result += h_odata[i]; } needReadBack = false; } } cudaThreadSynchronize(); cutilCheckError( cutStopTimer(timer) ); } if (needReadBack) { // copy final sum from device to host cutilSafeCallNoSync( cudaMemcpy( &gpu_result, d_odata, sizeof(T), cudaMemcpyDeviceToHost) ); } return gpu_result; } //////////////////////////////////////////////////////////////////////////////// // This function calls benchmarkReduce multple times for a range of array sizes // and prints a report in CSV (comma-separated value) format that can be used for // generating a "shmoo" plot showing the performance for each kernel variation // over a wide range of input sizes. //////////////////////////////////////////////////////////////////////////////// template <class T> void shmoo(int minN, int maxN, int maxThreads, int maxBlocks, ReduceType datatype) { bool useSM13 = (datatype == REDUCE_DOUBLE); // create random input data on CPU unsigned int bytes = maxN * sizeof(T); T *h_idata = (T*) malloc(bytes); for(int i = 0; i < maxN; i++) { // Keep the numbers small so we don't get truncation error in the sum if (datatype == REDUCE_INT) h_idata[i] = (T)(rand() & 0xFF); else h_idata[i] = (rand() & 0xFF) / (T)RAND_MAX; } int maxNumBlocks = maxN / maxThreads; // allocate mem for the result on host side T* h_odata = (T*) malloc(maxNumBlocks*sizeof(T)); // allocate device memory and data T* d_idata = NULL; T* d_odata = NULL; cutilSafeCallNoSync( cudaMalloc((void**) &d_idata, bytes) ); cutilSafeCallNoSync( cudaMalloc((void**) &d_odata, maxNumBlocks*sizeof(T)) ); // copy data directly to device memory cutilSafeCallNoSync( cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice) ); cutilSafeCallNoSync( cudaMemcpy(d_odata, h_idata, maxNumBlocks*sizeof(T), cudaMemcpyHostToDevice) ); // warm-up #if 0 for (int kernel = 0; kernel < 7; kernel++) { if (useSM13) reduce_sm13<T>(maxN, maxThreads, maxNumBlocks, kernel, d_idata, d_odata); else reduce_sm10<T>(maxN, maxThreads, maxNumBlocks, kernel, d_idata, d_odata); } int testIterations = 100; #else int testIterations = 1; #endif unsigned int timer = 0; cutilCheckError( cutCreateTimer( &timer)); // print headers printf("Time in milliseconds for various numbers of elements for each kernel\n"); printf("\n\n"); printf("Kernel"); for (int i = minN; i <= maxN; i *= 2) { printf(", %d", i); } for (int kernel = 0; kernel < 7; kernel++) { printf("\n"); printf("%d", kernel); for (int i = minN; i <= maxN; i *= 2) { cutResetTimer(timer); int numBlocks = 0; int numThreads = 0; getNumBlocksAndThreads(kernel, i, maxBlocks, maxThreads, numBlocks, numThreads); benchmarkReduce(i, numThreads, numBlocks, maxThreads, maxBlocks, kernel, testIterations, false, 1, timer, useSM13, h_odata, d_idata, d_odata); float reduceTime = cutGetAverageTimerValue(timer); printf(", %f", reduceTime); } } // cleanup cutilCheckError(cutDeleteTimer(timer)); free(h_idata); free(h_odata); cutilSafeCallNoSync(cudaFree(d_idata)); cutilSafeCallNoSync(cudaFree(d_odata)); } //////////////////////////////////////////////////////////////////////////////// // The main function whihc runs the reduction test. //////////////////////////////////////////////////////////////////////////////// template <class T> void runTest( int argc, char** argv, ReduceType datatype) { int size = 1<<20; // number of elements to reduce int maxThreads = 128; // number of threads per block int whichKernel = 6; int maxBlocks = 64; bool cpuFinalReduction = false; int cpuFinalThreshold = 1; cutGetCmdLineArgumenti( argc, (const char**) argv, "n", &size); cutGetCmdLineArgumenti( argc, (const char**) argv, "threads", &maxThreads); cutGetCmdLineArgumenti( argc, (const char**) argv, "kernel", &whichKernel); cutGetCmdLineArgumenti( argc, (const char**) argv, "maxblocks", &maxBlocks); printf("%d elements\n", size); printf("%d threads (max)\n", maxThreads); cpuFinalReduction = (cutCheckCmdLineFlag( argc, (const char**) argv, "cpufinal") == CUTTrue); cutGetCmdLineArgumenti( argc, (const char**) argv, "cputhresh", &cpuFinalThreshold); bool runShmoo = (cutCheckCmdLineFlag(argc, (const char**) argv, "shmoo") == CUTTrue); if (runShmoo) { shmoo<T>(1, 33554432, maxThreads, maxBlocks, datatype); } else { // create random input data on CPU unsigned int bytes = size * sizeof(T); T *h_idata = (T *) malloc(bytes); for(int i=0; i<size; i++) { // Keep the numbers small so we don't get truncation error in the sum if (datatype == REDUCE_INT) h_idata[i] = (T)(rand() & 0xFF); else h_idata[i] = (rand() & 0xFF) / (T)RAND_MAX; } int numBlocks = 0; int numThreads = 0; getNumBlocksAndThreads(whichKernel, size, maxBlocks, maxThreads, numBlocks, numThreads); if (numBlocks == 1) cpuFinalThreshold = 1; // allocate mem for the result on host side T* h_odata = (T*) malloc(numBlocks*sizeof(T)); printf("%d blocks\n", numBlocks); // allocate device memory and data T* d_idata = NULL; T* d_odata = NULL; cutilSafeCallNoSync( cudaMalloc((void**) &d_idata, bytes) ); cutilSafeCallNoSync( cudaMalloc((void**) &d_odata, numBlocks*sizeof(T)) ); // copy data directly to device memory cutilSafeCallNoSync( cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice) ); cutilSafeCallNoSync( cudaMemcpy(d_odata, h_idata, numBlocks*sizeof(T), cudaMemcpyHostToDevice) ); #if 0 // warm-up if (datatype == REDUCE_DOUBLE) reduce_sm13<T>(size, numThreads, numBlocks, whichKernel, d_idata, d_odata); else reduce_sm10<T>(size, numThreads, numBlocks, whichKernel, d_idata, d_odata); int testIterations = 100; #else int testIterations = 1; #endif unsigned int timer = 0; cutilCheckError( cutCreateTimer( &timer)); T gpu_result = 0; gpu_result = benchmarkReduce<T>(size, numThreads, numBlocks, maxThreads, maxBlocks, whichKernel, testIterations, cpuFinalReduction, cpuFinalThreshold, timer, datatype == REDUCE_DOUBLE, h_odata, d_idata, d_odata); float reduceTime = cutGetAverageTimerValue(timer); printf("Average time: %f ms\n", reduceTime); printf("Bandwidth: %f GB/s\n\n", (size * sizeof(int)) / (reduceTime * 1.0e6)); // compute reference solution T cpu_result = reduceCPU<T>(h_idata, size); if (datatype == REDUCE_INT) { printf("GPU result = %d\n", gpu_result); printf("CPU result = %d\n", cpu_result); printf("TEST %s\n", (gpu_result == cpu_result) ? "PASSED" : "FAILED"); } else { printf("GPU result = %0.12f\n", gpu_result); printf("CPU result = %0.12f\n", cpu_result); double threshold = (datatype == REDUCE_FLOAT) ? 1e-8 * size : 1e-12; double diff = abs((double)gpu_result - (double)cpu_result); printf("TEST %s\n", (diff < threshold) ? "PASSED" : "FAILED"); } // cleanup cutilCheckError( cutDeleteTimer(timer) ); free(h_idata); free(h_odata); cutilSafeCallNoSync(cudaFree(d_idata)); cutilSafeCallNoSync(cudaFree(d_odata)); } }
the_stack
const int WS = 32; #define ORDER 1 #include "alg5.cu" #undef ORDER #define ORDER 2 #include "alg5.cu" namespace rod { struct recfilter5_plan { recfilter5_plan() // at least these should be initialized to make // upload_plan work when plan is empty : a_in(NULL) , width(0) , height(0) , border(0) { } virtual ~recfilter5_plan() { if(a_in != NULL) cudaFreeArray(a_in); } int width, height; int rowstride; float inv_width, inv_height; int m_size, n_size, last_m, last_n; BorderType border_type; int border; cudaArray *a_in; }; template <int R> struct recfilter5_plan_R : recfilter5_plan { recfilter5_plan_R() { // this should be initialized for upload_plan for(int i=0; i<R; ++i) weights[i] = 0; } dvector<Matrix<float,R,WS> > d_pybar, d_ezhat, d_ptucheck, d_etvtilde; Vector<float, R+1> weights; Matrix<float,R,WS> AFP_T, ARE_T; Matrix<float,WS,WS> AFB_T, ARB_T; Matrix<float,R,R> AbF_T, AbR_T, AbF, AbR, HARB_AFP_T, HARB_AFP; Matrix<float,R,WS> ARB_AFP_T, TAFB, HARB_AFB; }; namespace { const recfilter5_plan *g_loaded_plan_in_gpu = NULL; template<int R> void load_plan(const recfilter5_plan_R<R> &plan) { const recfilter5_plan_R<R> *gpu_plan = dynamic_cast<const recfilter5_plan_R<R> *>(g_loaded_plan_in_gpu); const_data<R> &cdata = get_cdata<R>::get(); if(!gpu_plan || gpu_plan->weights != plan.weights) { copy_to_symbol(cdata.weights, plan.weights); copy_to_symbol(cdata.AbF_T, plan.AbF_T); copy_to_symbol(cdata.AbR_T, plan.AbR_T); copy_to_symbol(cdata.HARB_AFP_T, plan.HARB_AFP_T); copy_to_symbol(cdata.AbF, plan.AbF); copy_to_symbol(cdata.AbR, plan.AbR); copy_to_symbol(cdata.HARB_AFP, plan.HARB_AFP); copy_to_symbol(cdata.ARE_T, plan.ARE_T); copy_to_symbol(cdata.ARB_AFP_T, plan.ARB_AFP_T); copy_to_symbol(cdata.TAFB, plan.TAFB); copy_to_symbol(cdata.HARB_AFB, plan.HARB_AFB); } if(!g_loaded_plan_in_gpu || g_loaded_plan_in_gpu->border != plan.border) copy_to_symbol(cdata.border,plan.border); if(!g_loaded_plan_in_gpu || g_loaded_plan_in_gpu->rowstride!=plan.rowstride) copy_to_symbol(cdata.rowstride, plan.rowstride); if(!g_loaded_plan_in_gpu || g_loaded_plan_in_gpu->width != plan.width || g_loaded_plan_in_gpu->border != plan.border) { copy_to_symbol(cdata.width, plan.width); copy_to_symbol(cdata.inv_width, plan.inv_width); copy_to_symbol(cdata.m_size, plan.m_size); copy_to_symbol(cdata.last_m, plan.last_m); } if(!g_loaded_plan_in_gpu || g_loaded_plan_in_gpu->height != plan.height || g_loaded_plan_in_gpu->border != plan.border) { copy_to_symbol(cdata.inv_height, plan.inv_height); copy_to_symbol(cdata.height, plan.height); copy_to_symbol(cdata.n_size, plan.n_size); copy_to_symbol(cdata.last_n, plan.last_n); } if(!g_loaded_plan_in_gpu) { t_in.normalized = true; t_in.filterMode = cudaFilterModePoint; } if(!g_loaded_plan_in_gpu || g_loaded_plan_in_gpu->border_type != plan.border_type) { switch(plan.border_type) { case CLAMP_TO_ZERO: t_in.addressMode[0] = t_in.addressMode[1] = cudaAddressModeBorder; break; case CLAMP_TO_EDGE: t_in.addressMode[0] = t_in.addressMode[1] = cudaAddressModeClamp; break; case REPEAT: t_in.addressMode[0] = t_in.addressMode[1] = cudaAddressModeWrap; break; case REFLECT: t_in.addressMode[0] = t_in.addressMode[1] = cudaAddressModeMirror; break; } } g_loaded_plan_in_gpu = &plan; } } // local namespace template <int R> void recfilter5(recfilter5_plan_R<R> &plan,float *d_output,const float *d_input) { load_plan(plan); cudaMemcpy2DToArray(plan.a_in, 0, 0, d_input, plan.rowstride*sizeof(float), plan.width*sizeof(float), plan.height, cudaMemcpyDeviceToDevice); cudaBindTextureToArray(t_in, plan.a_in); collect_carries<<< #if CUDA_SM >= 20 dim3((plan.m_size+2-1)/2, plan.n_size), #else dim3(plan.m_size, plan.n_size), #endif dim3(WS, W1) >>> (&plan.d_pybar, &plan.d_ezhat, &plan.d_ptucheck, &plan.d_etvtilde); adjust_carries<<< dim3(1,plan.n_size), dim3(WS, std::min<int>(plan.m_size, W23)) >>> (&plan.d_pybar, &plan.d_ezhat, plan.m_size, plan.n_size ); adjust_carries<<< dim3(plan.m_size,1), dim3(WS, std::min<int>(plan.n_size, W45)) >>> (&plan.d_ptucheck, &plan.d_etvtilde, &plan.d_pybar, &plan.d_ezhat, plan.m_size, plan.n_size ); write_result<<< #if CUDA_SM >= 20 dim3((plan.m_size+2-1)/2,plan.n_size), #else dim3(plan.m_size,plan.n_size), #endif dim3(WS, W6)>>> (d_output, &plan.d_pybar, &plan.d_ezhat, &plan.d_ptucheck, &plan.d_etvtilde); cudaUnbindTexture(t_in); } void recfilter5(recfilter5_plan *plan, float *d_output, const float *d_input) { assert(plan); if(recfilter5_plan_R<1> *plan_R = dynamic_cast<recfilter5_plan_R<1>*>(plan)) recfilter5(*plan_R, d_output, d_input); else if(recfilter5_plan_R<2> *plan_R = dynamic_cast<recfilter5_plan_R<2>*>(plan)) recfilter5(*plan_R, d_output, d_input); else throw std::runtime_error("Bad plan for recfilter5"); } void recfilter5(recfilter5_plan *plan, float *d_inout) { recfilter5(plan, d_inout, d_inout); } template <int R> recfilter5_plan * recfilter5_create_plan(int width, int height, int rowstride, const Vector<float, R+1> &w, BorderType border_type, int border) { recfilter5_plan_R<R> *plan = new recfilter5_plan_R<R>; try { update_plan<R>(plan, width, height, rowstride, w, border_type, border); load_plan(*plan); } catch(...) { delete plan; throw; } return plan; } void calc_borders(int *left, int *top, int *right, int *bottom, int w, int h, int border) { if(border > 0) { *left = border*32; *top = border*32; *right = (border+1)*32-(w%32); *bottom = (border+1)*32-(h%32); } else { *left = *top = 0; *right = 32-(w%32); if(*right == 32) *right = 0; *bottom = 32-(h%32); if(*bottom == 32) *bottom = 0; } } template <int R> void update_plan(recfilter5_plan *_plan, int width, int height, int rowstride, const Vector<float, R+1> &w, BorderType border_type, int border) { assert(_plan); recfilter5_plan_R<R> *plan = dynamic_cast<recfilter5_plan_R<R> *>(_plan); if(plan == NULL) throw std::invalid_argument("Can't change recfilter's plan order"); const int B = 32; int old_border = plan->border, old_width = plan->width, old_height = plan->height; if(old_width!=width || old_height!=height) { // let's do this first to at least have a passable strong // exception guarantee (this has more chance to blow up) cudaArray *a_in = NULL; cudaChannelFormatDesc ccd = cudaCreateChannelDesc<float>(); cudaMallocArray(&a_in, &ccd, width, height); check_cuda_error("cudaMallocArray"); try { if(plan->a_in) { cudaFreeArray(plan->a_in); check_cuda_error("cudaFreeArray"); plan->a_in = NULL; } } catch(...) { cudaFreeArray(a_in); throw; } plan->a_in = a_in; } if(plan->weights != w) { Matrix<float,R,R> Ir = identity<float,R,R>(); Matrix<float,B,R> Zbr = zeros<float,B,R>(); Matrix<float,R,B> Zrb = zeros<float,R,B>(); Matrix<float,B,B> Ib = identity<float,B,B>(); // depends on weight plan->weights = w; plan->AFP_T = fwd(Ir, Zrb, w); plan->ARE_T = rev(Zrb, Ir, w); plan->AFB_T = fwd(Zbr, Ib, w); plan->ARB_T = rev(Ib, Zbr, w); plan->AbF_T = tail<R>(plan->AFP_T); plan->AbR_T = head<R>(plan->ARE_T); plan->AbF = transp(plan->AbF_T); plan->AbR = transp(plan->AbR_T); plan->HARB_AFP_T = plan->AFP_T*head<R>(plan->ARB_T); plan->HARB_AFP = transp(plan->HARB_AFP_T); plan->ARB_AFP_T = plan->AFP_T*plan->ARB_T; plan->TAFB = transp(tail<R>(plan->AFB_T)); plan->HARB_AFB = transp(plan->AFB_T*head<R>(plan->ARB_T)); } int bleft, bright, btop, bbottom; calc_borders(&bleft, &btop, &bright, &bbottom, width, height, border); // depends on width and border if(old_border != border || old_width != width) { plan->m_size = (width+bleft+bright+WS-1)/WS, plan->last_m = (bleft+width-1)/WS; plan->width = width; plan->inv_width = 1.f/width; } // depends on height and border if(old_border != border || old_height != height) { plan->n_size = (height+btop+bbottom+WS-1)/WS; plan->last_n = (btop+height-1)/WS; plan->height = height; plan->inv_height = 1.f/height; } // depends on width, height and border if(old_border!=border || old_width!=width || old_height!=height) { // TODO: provide strong exception guarantee of previous data // in case of any of these blowing up. plan->d_pybar.resize(plan->n_size*plan->m_size); plan->d_ezhat.resize(plan->n_size*plan->m_size); plan->d_ptucheck.resize(plan->n_size*plan->m_size); plan->d_etvtilde.resize(plan->n_size*plan->m_size); } // depends on rowstride plan->rowstride = rowstride; // depends on border plan->border_type = border_type; plan->border = border; } template recfilter5_plan * recfilter5_create_plan<1>(int width, int height, int rowstride, const Vector<float, 1+1> &w, BorderType border_type, int border); template recfilter5_plan * recfilter5_create_plan<2>(int width, int height, int rowstride, const Vector<float, 2+1> &w, BorderType border_type, int border); void free(recfilter5_plan *plan) { if(g_loaded_plan_in_gpu == plan) g_loaded_plan_in_gpu = NULL; delete plan; } } // namespace rod
the_stack
// Moments.cu // 几何矩的计算 #include "Moments.h" #include <iostream> #include <stdio.h> #include <cmath> using namespace std; #include "ErrorCode.h" // 宏:M_PI // π值。对于某些操作系统,M_PI可能没有定义,这里补充定义 M_PI。 #ifndef M_PI #define M_PI 3.14159265359 #endif // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 256 #define DEF_BLOCK_Y 1 // Kernel 函数:_accumulateImageKer(原图像的累进求和) // 利用差分矩因子算法计算几何矩。对原图像的每一行做 5 次累进求和,保留结果 // g1(j, 1), g2(j, 2), g3(j, 1) + g3(j, 2), g4(j, 1) + g4(j, 2) * 4 + g4(j, 3), // g5(j, 1) + g5(j, 2) * 11 + g5(j, 3) * 11 + g5(j, 4),j 等于图像的高度 height, // 所以输出数组 accimg 大小为 5 * height。每个线程处理一行的元素。 static __global__ void // Kernel 函数无返回值 _accumulateImageKer( ImageCuda img, // 输入图像 double *accimg // 原图像的 5 次累进求和 ); // Kernel 函数:_accumulateConstantOneKer(原图像的累进求和,乘积项设置恒为 1) // 利用差分矩因子算法计算几何矩。对原图像的每一行做 5 次累进求和,保留结果 // g1(j, 1), g2(j, 2), g3(j, 1) + g3(j, 2), g4(j, 1) + g4(j, 2) * 4 + g4(j, 3), // g5(j, 1) + g5(j, 2) * 11 + g5(j, 3) * 11 + g5(j, 4),j 等于图像的高度 height, // 所以输出数组 accimg 大小为 5 * height。每个线程处理一行的元素。注意,计算矩 // 过程中的乘积项设置为 1。 static __global__ void // Kernel 函数无返回值 _accumulateConstantOneKer( ImageCuda img, // 输入图像 double *accimg // 原图像的 5 次累进求和 ); // Host 方法:complexMultiply(复数乘法运算) // 输入两个复数的实部和虚部,输出结果的实虚部。 __host__ int complexMultiply(double real1, double imag1, double real2, double imag2, double *realout, double *imagout); // Kernel 函数:_accumulateImageKer(原图像的累进求和) static __global__ void _accumulateImageKer(ImageCuda img, double *accimg) { // 计算线程对应的位置。 int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx > img.imgMeta.height) return; // 原图像的每一行做 5 次累进求和的结果。 double g11, g21, g31, g41, g51; // 因为是从右向左累进,所以初始化为每一行最右边的元素。 int index = img.pitchBytes * idx + (img.imgMeta.width - 1); g11 = g21 = g31 = g41 = g51 = img.imgMeta.imgData[index]; // 循环累加每一行元素。该循环暂且计算到 img.imgMeta.width - 3 个元素, // 因为需要保存 g54 的值。 int tempidx = img.pitchBytes * idx; //int tempidx = img.imgMeta.width * idx; for (int i = img.imgMeta.width - 2; i >= 3; i--) { // 对原图像进行 5 次 累进求和。 g11 += img.imgMeta.imgData[tempidx + i]; g21 += g11; g31 += g21; g41 += g31; g51 += g41; } // 计算 g54 的值。 double g54 = g51; // 继续累进一个像素,即总体计算到 img.imgMeta.width - 2 个元素。 g11 += img.imgMeta.imgData[tempidx + 2]; g21 += g11; g31 += g21; g41 += g31; g51 += g41; // 计算 g43, g53 的值。 double g43 = g41; double g53 = g51; // 继续累进一个像素,即总体计算到 img.imgMeta.width - 1 个元素。 g11 += img.imgMeta.imgData[tempidx + 1]; g21 += g11; g31 += g21; g41 += g31; g51 += g41; // 计算 g32, g42, g52 的值。 double g32 = g31; double g42 = g41; double g52 = g51; // 继续累进一个像素,即总体计算到 img.imgMeta.width 个元素,累进求和结束。 g11 += img.imgMeta.imgData[tempidx]; g21 += g11; g31 += g21; g41 += g31; g51 += g41; // 将累进结果保存到输出数组中。 accimg[idx] = g11; accimg[idx += img.imgMeta.height] = g21; accimg[idx += img.imgMeta.height] = g31 + g32; accimg[idx += img.imgMeta.height] = g41 + g42 * 4.0f + g43; accimg[idx += img.imgMeta.height] = g51 + g52 * 11.0f + g53 * 11.0f + g54; } // Kernel 函数:_accumulateConstantOneKer(原图像的累进求和,乘积项设置恒为 1) static __global__ void _accumulateConstantOneKer(ImageCuda img, double *accimg) { // 计算线程对应的位置。 int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx > img.imgMeta.height) return; // 原图像的每一行做 5 次累进求和的结果。 double g11, g21, g31, g41, g51; // 因为是从右向左累进,所以初始化为每一行最右边的元素。 g11 = g21 = g31 = g41 = g51 = 1.0f; // 循环累加每一行元素。该循环暂且计算到 img.imgMeta.width - 3 个元素, // 因为需要保存 g54 的值。 int tempidx = img.pitchBytes * idx; //int tempidx = img.imgMeta.width * idx; for (int i = img.imgMeta.width - 2; i >= 3; i--) { // 对原图像进行 5 次 累进求和。 g11 += 1.0f; g21 += g11; g31 += g21; g41 += g31; g51 += g41; } // 计算 g54 的值。 double g54 = g51; // 继续累进一个像素,即总体计算到 img.imgMeta.width - 2 个元素。 g11 += img.imgMeta.imgData[tempidx + 2]; g21 += g11; g31 += g21; g41 += g31; g51 += g41; // 计算 g43, g53 的值。 double g43 = g41; double g53 = g51; // 继续累进一个像素,即总体计算到 img.imgMeta.width - 1 个元素。 g11 += img.imgMeta.imgData[tempidx + 1]; g21 += g11; g31 += g21; g41 += g31; g51 += g41; // 计算 g32, g42, g52 的值。 double g32 = g31; double g42 = g41; double g52 = g51; // 继续累进一个像素,即总体计算到 img.imgMeta.width 个元素,累进求和结束。 g11 += img.imgMeta.imgData[tempidx]; g21 += g11; g31 += g21; g41 += g31; g51 += g41; // 将累进结果保存到输出数组中。 accimg[idx] = g11; accimg[idx += img.imgMeta.height] = g21; accimg[idx += img.imgMeta.height] = g31 + g32; accimg[idx += img.imgMeta.height] = g41 + g42 * 4.0f + g43; accimg[idx += img.imgMeta.height] = g51 + g52 * 11.0f + g53 * 11.0f + g54; } // Host 成员方法:spatialMoments(计算空间矩) __host__ int Moments::spatialMoments(Image *img, MomentSet *momset) { // 检查输入图像是否为 NULL,如果为 NULL 直接报错返回。 if (img == NULL) return NULL_POINTER; // 检查 momset 是否为空。 if (momset == NULL) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(img); if (errcode != NO_ERROR) return errcode; // 提取输入图像的 ROI 子图像。 ImageCuda subimgCud; errcode = ImageBasicOp::roiSubImage(img, &subimgCud); if (errcode != NO_ERROR) return errcode; // 在 Device 端申请空间,并将其数据初始化为 0。 cudaError_t cudaerrcode; double *accimgdev; int datasize = 5 * subimgCud.imgMeta.height * sizeof (double); cudaerrcode = cudaMalloc((void**)&accimgdev, datasize); if (cudaerrcode != cudaSuccess) return cudaerrcode; // 初始化 Device 上的内存空间。 cudaerrcode = cudaMemset(accimgdev, 0, datasize); if (cudaerrcode != cudaSuccess) return cudaerrcode; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (subimgCud.imgMeta.height + DEF_BLOCK_X - 1) / DEF_BLOCK_X; gridsize.y = DEF_BLOCK_Y; // 计算矩的过程中正常乘以原图像的灰度值。 if (!this->isconst) { // 调用核函数,对原图像每一行计算 5 次累进求和,每个线程计算一行数据。 _accumulateImageKer<<<gridsize, blocksize>>>(subimgCud, accimgdev); // 计算矩的过程中图像的灰度值部分恒等于 1。 } else { // 调用核函数,对原图像每一行计算 5 次累进求和,每个线程计算一行数据。 _accumulateConstantOneKer<<<gridsize, blocksize>>>(subimgCud, accimgdev); } // 若调用 CUDA 出错返回错误代码 if (cudaGetLastError() != cudaSuccess) { cudaFree(accimgdev); return CUDA_ERROR; } // 将 Device 端数据拷贝到 Host 端。 double *accimg = new double[5 * subimgCud.imgMeta.height]; cudaerrcode = cudaMemcpy(accimg, accimgdev, datasize, cudaMemcpyDeviceToHost); if (cudaerrcode != cudaSuccess) return cudaerrcode; // 对 g1(i, j) 的第一列做 5 次迭代累进求和。 int tempidx; double r11, r21, r31, r41, r51, r32 = 0.0f, r42 = 0.0f, r43 = 0.0f, r52 = 0.0f, r53 = 0.0f, r54 = 0.0f; tempidx = subimgCud.imgMeta.height - 1; r11 = r21 = r31 = r41 = r51 = accimg[tempidx]; // 对 g2(i, j) 的第一列做 4 次迭代累进求和。 double s11, s21, s31, s41, s32 = 0.0f, s42 = 0.0f, s43 = 0.0f; tempidx += subimgCud.imgMeta.height; s11 = s21 = s31 = s41 = accimg[tempidx]; // 对 t0(j) = g3(j, 1) + g3(j, 2)做 3 次累进求和。 double t11, t21, t31, t32 = 0.0f; tempidx += subimgCud.imgMeta.height; t11 = t21 = t31 = accimg[tempidx]; // 对 u0(j) = g4(j, 1) + g4(j, 2) * 4 + g4(j, 3) 做 2 次累进求和。 double u11, u21; tempidx += subimgCud.imgMeta.height; u11 = u21 = accimg[tempidx]; // 对 p0(j) = g5(j, 1) + g5(j, 2) * 11 + g5(j, 3) * 11 + g5(j, 4) 做 // 1 次累进求和。 tempidx += subimgCud.imgMeta.height; double p11 = accimg[tempidx]; // 循环累加每一行元素。中间过程中需要计算一些额外值, // 包括 r54, r53, r52, r43, r42, s43, s42, s32, t32。 for (int i = subimgCud.imgMeta.height - 2; i >= 0; i--) { // 对 g1(j) 进行 5 次累进求和。 r11 += accimg[i]; r21 += r11; r31 += r21; r41 += r31; r51 += r41; // 对 g2(j) 进行 4 次累进求和。 tempidx = i + subimgCud.imgMeta.height; s11 += accimg[tempidx]; s21 += s11; s31 += s21; s41 += s31; // 对 t0(j) 进行 3 次累进求和。 tempidx += subimgCud.imgMeta.height; t11 += accimg[tempidx]; t21 += t11; t31 += t21; // 对 u0(j) 进行 2 次累进求和。 tempidx += subimgCud.imgMeta.height; u11 += accimg[tempidx]; u21 += u11; // 对 p0(j) 进行 1 次累进求和。 tempidx += subimgCud.imgMeta.height; p11 += accimg[tempidx]; // 计算 r54。 if (i == 3) { r54 = r51; } // 计算 r53, r43, s43。 if (i == 2) { r43 = r41; r53 = r51; s43 = s41; } // 计算 r32, r42, r52, s42, s32, t32。 if (i == 1) { r32 = r31; r42 = r41; r52 = r51; s32 = s31; s42 = s41; t32 = t31; } } // 根据上面计算的变量,对 MomentSet 中的 15 个空间矩进行赋值。 momset->m00 = r11; momset->m10 = s11; momset->m01 = r21; momset->m20 = t11; momset->m11 = s21; momset->m02 = r31 + r32; momset->m30 = u11; momset->m21 = t21; momset->m12 = s31 + s32; momset->m03 = r41 + r42 * 4.0f + r43; momset->m22 = t31 + t32; momset->m31 = u21; momset->m13 = s41 + s42 * 4.0f + s43; momset->m40 = p11; momset->m04 = r51 + r52 * 11.0f + r53 * 11.0f +r54; // 释放 Device 端空间。 cudaFree(accimgdev); // 释放 Host 端空间。 delete [] accimg; // 处理完毕,退出。 return NO_ERROR; } // Host 成员方法:centralMoments(计算中心矩) __host__ int Moments::centralMoments(Image *img, MomentSet *momset) { // 局部变量,错误码 int errcode; // 首先计算空间矩。 errcode = spatialMoments(img, momset); if (errcode != NO_ERROR) return errcode; // 对 MomentSet 中的中心距 mu00, mu10, mu01 进行赋值。 momset->mu00 = momset->m00; momset->mu10 = 0.0f; momset->mu01 = 0.0f; // 如果 mu00 不为 0 的话,继续计算其他中心矩的数值。 if (!(fabs(momset->m00) < 0.000001)) { // meanX 和 meanY 是形状的分布重心。 double meanX = momset->m10 / momset->m00; double meanY = momset->m01 / momset->m00; // 判断是否需要调整中心坐标。 if (this->adjustcenter == true) { momset->mu20 = momset->m20 - meanX * momset->m10; momset->mu02 = momset->m02 - meanY * momset->m01; // 计算偏差。 double xs = sqrt(momset->mu20 / momset->mu00); double ys = sqrt(momset->mu02 / momset->mu00); // 重新定义中心。 meanX = meanX - xs; meanY = meanY - ys; } // 定义中间变量。 double meanX2 = meanX * meanX; double meanY2 = meanY * meanY; // 计算其余的中心矩的数值。 momset->mu20 = momset->m20 - meanX * momset->m10; momset->mu11 = momset->m11 - meanY * momset->m10; momset->mu02 = momset->m02 - meanY * momset->m01; momset->mu30 = momset->m30 - 3.0f * meanX * momset->m20 + 2.0f * meanX2 * momset->m10; momset->mu21 = momset->m21 - 2.0f * meanX * momset->m11 + 2.0f * meanX2 * momset->m01- meanY * momset->m20; momset->mu12 = momset->m12 - 2.0f * meanY * momset->m11 + 2.0f * meanY2 * momset->m10 - meanX * momset->m02; momset->mu03 = momset->m03 - 3.0f * meanY * momset->m02 + 2.0f * meanY2 * momset->m01; momset->mu22 = momset->m22 - 2.0f * meanY * momset->m21 + meanY2 * momset->m20 - 2.0f * meanX * momset->m12 + 4.0f * meanX * meanY * momset->m11 - 2.0f * meanX * meanY * momset->m10 + meanX2 * momset->m02 - 3.0f * meanX2 * meanY * momset->m01; momset->mu31 = momset->m31 - meanY * momset->m30 - 3.0f * meanX * momset->m21 + 3.0f * meanX * meanY * momset->m20 + 3.0f * meanX2 * momset->m11 - 3.0f * meanX2 * meanY * momset->m10; momset->mu13 = momset->m13 - 3.0f * meanY * momset->m12 + 3.0f * meanY2 * momset->m11 - meanX * momset->m03 + 3.0f * meanX * meanY * momset->m02 - 3.0f * meanX * meanY2 * momset->m01; momset->mu40 = momset->m40 - 4.0f * meanX * momset->m30 + 6.0f * meanX2 * momset->m20 - 3.0f * meanX2 * meanX * momset->m10; momset->mu04 = momset->m04 - 4.0f * meanY * momset->m03 + 6.0f * meanY2 * momset->m02 - 3.0f * meanY2 * meanY * momset->m01; // 否则为了避免产生除 0 的错误,直接退出。 } else { return OP_OVERFLOW; } // 处理完毕,退出。 return NO_ERROR; } // Host 成员方法:centralMoments(计算形状的分布重心和方向) __host__ int Moments::centralMoments(Image *img, double centers[2], double *angle) { // 检查输入参数是否为空。 if (centers == NULL || angle == NULL) return NULL_POINTER; // 局部变量,错误码 int errcode; // 声明 MomentSet 结构体变量。 MomentSet momset; // 计算中心矩的各项值。 errcode = centralMoments(img, &momset); if (errcode != NO_ERROR) return errcode; // 计算几何分布重心。 centers[0] = momset.m10 / momset.m00; centers[1] = momset.m01 / momset.m00; // 计算几何分布方向。 double u_20 = momset.mu20 / momset.mu00; double u_02 = momset.mu02 / momset.mu00; double u_11 = momset.mu11 / momset.mu00; // 如果 u_11 不等于 0,并且 u_20 不等于 u_02的话,计算方向角度。 if (!(fabs(u_11) < 0.000001) && !(fabs(u_20 - u_02) < 0.000001)) { // 计算角度大小。 *angle = (atan(2.0f * u_11 / (u_20 - u_02)) / 2.0f); // 否则为了避免产生除 0 的错误,直接退出。 } else { // 特殊标识。 *angle = -2 * M_PI; } // 处理完毕,退出。 return NO_ERROR; } // Host 成员方法:huMoments(计算 Hu 矩) __host__ int Moments::huMoments(Image *img, MomentSet *momset) { // 局部变量,错误码 int errcode; // 首先计算中心矩。 errcode = centralMoments(img, momset); if (errcode != NO_ERROR) return errcode; // 标准化中心距。 double p1 = pow(momset->mu00, 2.0f); double p2 = pow(momset->mu00, 2.5f); double n11 = momset->mu11 / p1; double n02 = momset->mu02 / p1; double n20 = momset->mu20 / p1; double n12 = momset->mu12 / p2; double n21 = momset->mu21 / p2; double n03 = momset->mu03 / p2; double n30 = momset->mu30 / p2; // 声明中间变量,方便简化后面的语句。 double temp1 = n20 - n02; double temp2 = n30 - 3.0f * n12; double temp3 = 3.0f * n21 - n03; double temp4 = n30 + n12; double temp5 = n21 + n03; // 计算 Hu moments 的 8 个值。 momset->hu1 = n20 + n02; momset->hu2 = temp1 * temp1 + 4.0f * n11 * n11; momset->hu3 = temp2 * temp2 + temp3 * temp3; momset->hu4 = temp4 * temp4 + temp5 * temp5; momset->hu5 = temp2 * temp4 * (temp4 * temp4 - 3.0f * temp5 * temp5) + temp3 * temp5 * (3.0f * temp4 * temp4 - temp5 * temp5); momset->hu6 = temp1 * (temp4 * temp4 - temp5 * temp5) + 4.0f * n11 * temp4 * temp5; momset->hu7 = temp3 * temp4 * (temp4 * temp4 - 3.0f * temp5 * temp5) - temp2 * temp5 * (3.0f * temp4 * temp4 - temp5 * temp5); // Hu 矩的扩展,增加了一个不变量。 momset->hu8 = n11 * (temp4 * temp4 - temp5 * temp5) - temp1 * temp4 * temp5; // 处理完毕,退出。 return NO_ERROR; } // Host 成员方法:affineMoments(计算 affine 矩) __host__ int Moments::affineMoments(Image *img, MomentSet *momset) { // 局部变量,错误码 int errcode; // 首先计算中心矩。 errcode = centralMoments(img, momset); if (errcode != NO_ERROR) return errcode; // 获得中心矩。 double u11 = momset->mu11; double u20 = momset->mu20; double u02 = momset->mu02; double u12 = momset->mu12; double u21 = momset->mu21; double u30 = momset->mu30; double u03 = momset->mu03; double u13 = momset->mu13; double u31 = momset->mu31; double u22 = momset->mu22; double u40 = momset->mu40; double u04 = momset->mu04; /* // 计算 9 个 affine moment invariants double s = momset->mu00; momset->ami1 = (u20 * u02 − u11 * u11) / pow(s, 4); momset->ami2 = (-u30 * u30 * u03 * u03 + 6 * u30 * u21 * u12 * u03 − 4 * u30 * u12 * u12 * u12 − 4 * u21 * u21 * u21 * u03 + 3 * u21 * u21 * u12 * u12) / pow(s, 10); momset->ami3 = (u20 * u21 * u03 − u20 * u12 * u12− u11 * u30 * u03 + u11 * u21 * u12 + u02 * u30 * u12− u02 * u21 * u21) / pow(s, 7); momset->ami4 = (−u20 * u20 * u20 * u03 * u03 + 6 * u20 * u20 * u11 * u12 * u03 – 3 * u20 * u20 * u02 * u12 * u12 − 6 * u20 * u11 * u11 * u21 * u03 – 6 * u20 * u11 * u11 * u12 * u12 + 12 * u20 * u11 * u02 * u21 * u12 – 3 * u20 * u02 * u02 * u21 * u21 + 2 * u11 * u11 * u11 * u30 * u03 + 6 * u11 * u11 * u11 * u21 * u12 – 6 * u11 * u11 * u02 * u30 * u12 – 6 * u11 * u11 * u02 * u21 * u21 + 6 * u11 * u02 * u02 * u30 * u21 − u02 * u02 * u02 * u30 * u30) / pow(s, 11); momset->ami6 = (u40 * u04 – 4 * u31 * u13 + 3 * u22 * u22 ) / pow(s, 6); momset->ami7 = (u40 * u22 * u04 − u40 * u13 * u13 − u31 * u31 * u04 + 2 * u31 * u22 * u13 − u22 * u22 * u22) / pow(s, 9); momset->ami8 = (u20 * u20 * u04 – 4 * u20 * u11 * u13 + 2 * u20 * u02 * u22 + 4 * u11 * u11 * u22 − 4 * u11 * u02 * u31 + u02 * u02 * u40) / pow(s, 7); momset->ami9 = (u20 * u20 * u22 * u04 − u20 * u20 * u13 * u13− 2 * u20 * u11 * u31 * u04 + 2 * u20 * u11 * u22 * u13 + u20 * u02 * u40 * u04 – 2 * u20 * u02 * u31 * u13 + u20 * u02 * u22 * u22 + 4 * u11 * u11 * u31 * u13 – 4 * u11 * u11 * u22 * u22 − 2 * u11 * u02 * u40 * u13 + 2 * u11 * u02 * u31 * u22 + u02 * u02 * u40 * u22 − u02 * u02 * u31 * u31) / pow(s, 10); momset->ami19 = (u20 * u30 * u12 * u04 − u20 * u30 * u03 * u13 − u20 * u21 * u21 * u04 + u20 * u21 * u12 * u13 + u20 * u21 * u03 * u22 − u20 * u12 * u12 * u22 – 2 * u11 * u30 * u12 * u13 + 2 * u11 * u30 * u03 * u22 + 2 * u11 * u21 * u21 * u13 – 2 * u11 * u21 * u12 * u22 – 2 * u11 * u21 * u03 * u31 + 2 * u11 * u12 * u12 * u31 + u02 * u30 * u12 * u22 − u02 * u30 * u03 * u31 – u02 * u21 * u21 * u22 + u02 * u21 * u12 * u31 + u02 * u21 * u03 * u40 − u02 * u12 * u12 * u40) / pow(s, 10); */ // 定义一些中间变量。 double temp1 = u20 * u02; double temp2 = u11 * u11; double temp3 = u30 * u30; double temp4 = u03 * u03; double temp5 = u03 * u30; double temp6 = u21 * u12; double temp7 = u12 * u12 * u12; double temp8 = u21 * u21 * u21; double temp9 = u21 * u21; double temp10 = u12 * u12; double temp11 = u20 * u20 * u20; double temp12 = u20 * u20; double temp13 = u11 * u11 * u11; double temp14 = u02 * u02; double temp15 = u02 * u02 * u02; double temp16 = u40 * u04; double temp17 = u31 * u13; double temp18 = u22 * u22; double temp19 = u13 * u13; double temp20 = u31 * u31; // 计算 9 个 affine moment invariants double s4 = pow(momset->mu00, 4); double s6 = pow(momset->mu00, 6); double s7 = s6 * momset->mu00; double s9 = pow(momset->mu00, 9); double s10 = s9 * momset->mu00; double s11 = s10 * momset->mu00; momset->ami1 = (temp1 - temp2) / s4; momset->ami2 = (-temp3 * temp4 + 6 * temp5 * temp6 - 4 * u30 * temp7 - 4 * temp8 * u03 + 3 * temp9 * temp10) / s10; momset->ami3 = (u20 * u21 * u03 - u20 * temp10 - u11 * temp5 + u11 * temp6 + u02 * u30 * u12 - u02 * temp9) / s7; momset->ami4 = (-temp11 * temp4 + 6 * temp12 * u11 * u12 * u03 - 3 * temp12 * u02 * temp10 - 6 * u20 * temp2 * u21 * u03 - 6 * u20 * temp2 * temp10 + 12 * temp1 * u11 * temp6 - 3 * temp1 * u02 * temp9 + 2 * temp13 * temp5 + 6 * temp13 * temp6 - 6 * temp2 * u02 * u30 * u12 - 6 * temp2 * u02 * temp9 + 6 * u11 * temp14 * u30 * u21 - temp15 * temp3) / s11; momset->ami6 = (temp16 - 4 * temp17 + 3 * temp18) / s6; momset->ami7 = (temp16 * u22 - u40 * temp19 - temp20 * u04 + 2 * u22 * temp17 - temp18 * u22) / s9; momset->ami8 = (temp12 * u04 - 4 * u20 * u11 * u13 + 2 * temp1 * u22 + 4 * temp2 * u22 - 4 * u11 * u02 * u31 + temp14 * u40) / s7; momset->ami9 = (temp12 * u22 * u04 - temp12 * temp19 - 2 * u20 * u11 * u31 * u04 + 2 * u20 * u11 * u22 * u13 + temp1 * temp16 - 2 * temp1 * temp17 + temp1 * temp18 + 4 * temp2 * temp17 - 4 * temp2 * temp18 - 2 * u11 * u02 * u40 * u13 + 2 * u11 * u02 * u31 * u22 + temp14 * u40 * u22 - temp14 * temp20) / s10; momset->ami19 = (u20 * u30 * u12 * u04 - u20 * temp5 * u13 - u20 * temp9 * u04 + u20 * temp6 * u04 + u20 * temp6 * u13 + u20 * u21 * u03 * u22 - u20 * temp10 * u22 - 2 * u11 * u30 * u12 * u13 + 2 * u11 * temp5 * u22 + 2 * u11 * temp9 * u13 - 2 * u11 * temp6 * u22 - 2 * u11 * u21 * u03 * u31 + 2 * u11 * temp10 * u31 + u02 * u30 * u12 * u22 - u20 * temp5 * u31 - u02 * temp9 * u22 + u02 * temp6 * u31 + u02 * u21 * u03 * u40 - u02 * temp10 * u40) / s10; // 处理完毕,退出。 return NO_ERROR; } // Host 方法:complexMultiply(复数乘法运算) __host__ int complexMultiply(double real1, double imag1, double real2, double imag2, double *realout, double *imagout) { // 两复数的乘法运算。 *realout = real1 * real2 - imag1 * imag2; *imagout = imag1 * real2 + real1 * imag2; // 处理完毕,退出。 return NO_ERROR; } // Host 成员方法:flusserMoments(计算 flusser 矩) __host__ int Moments::flusserMoments(Image *img, MomentSet *momset) { // 局部变量,错误码。 int errcode; // 使用调整的中心距。 errcode = setAdjustcenter(true); if (errcode != NO_ERROR) return errcode; // 首先计算中心矩。 errcode = centralMoments(img, momset); if (errcode != NO_ERROR) return errcode; // 获得中心矩。 double u11 = momset->mu11; double u20 = momset->mu20; double u02 = momset->mu02; double u12 = momset->mu12; double u21 = momset->mu21; double u30 = momset->mu30; double u03 = momset->mu03; double u13 = momset->mu13; double u31 = momset->mu31; double u22 = momset->mu22; double u40 = momset->mu40; double u04 = momset->mu04; // 归一化中心距。 double temp1 = pow(momset->mu00, 2); double temp2 = pow(momset->mu00, 2.5f); double temp3 = pow(momset->mu00, 3); double n11 = u11 / temp1; double n20 = u20 / temp1; double n02 = u02 / temp1; double n12 = u12 / temp2; double n21 = u21 / temp2; double n30 = u30 / temp2; double n03 = u03 / temp2; double n13 = u13 / temp3; double n31 = u31 / temp3; double n22 = u22 / temp3; double n40 = u40 / temp3; double n04 = u04 / temp3; // 计算 11 个 flusser moments。 // 计算 flu1。 momset->flu1 = n20 + n02; // 计算 flu2。 double c21[2]; c21[0] = n12 + n30; c21[1] = n03 + n21; double c12[2]; c12[0] = n12 + n30; c12[1] = -n03 - n21; // c21 乘以 c12。 double c21c12[2]; complexMultiply(c21[0], c21[1], c21[0], c21[1], &c21c12[0], &c21c12[1]); momset->flu2 = c21c12[0]; // 计算 flu3, flu4。 double c20[2]; c20[0] = n20 - n02; c20[1] = 2 * n11; // c12 的平方。 double c12p2[2]; complexMultiply(c12[0], c12[1], c12[0], c12[1], &c12p2[0], &c12p2[1]); // c20 乘以 c12 的平方。 double c20c12p2[2]; complexMultiply(c20[0], c20[1], c12p2[0], c12p2[1], &c20c12p2[0], &c20c12p2[1]); momset->flu3 = c20c12p2[0]; momset->flu4 = c20c12p2[1]; // 计算 flu5, flu6。 double c30[2]; c30[0] = n30 - 3 * n12; c30[1] = 3 * n21 - n03; // c12 的 3 次方。 double c12p3[2]; complexMultiply(c12[0], c12[1], c12p2[0], c12p2[1], &c12p3[0], &c12p3[1]); // c30 乘以 c12 的 3 次方。 double c30c12p3[2]; complexMultiply(c30[0], c30[1], c12p3[0], c12p3[1], &c30c12p3[0], &c30c12p3[1]); momset->flu5 = c30c12p3[0]; momset->flu6 = c30c12p3[1]; // 计算 flu7。 momset->flu7 = n04 + 2 * n22 + n40; // 计算 flu8, flu9。 double c31[2]; c31[0] = n40 - n04; c31[1] = 2 * n13 + 2 * n31; // c31 乘以 c12 的平方。 double c31c12p2[2]; complexMultiply(c31[0], c31[1], c12p2[0], c12p2[1], &c31c12p2[0], &c31c12p2[1]); momset->flu8 = c31c12p2[0]; momset->flu9 = c31c12p2[1]; // 计算 flu10, flu11。 double c40[2]; c40[0] = n04 + n40 - 6 * n22; c40[1] = 4 * n13 + 4 * n31; // c12 的 4 次方。 double c12p4[2]; complexMultiply(c12[0], c12[1], c12p3[0], c12p3[1], &c12p4[0], &c12p4[1]); // c40 乘以 c12 的 4 次方。 double c40c12p4[2]; complexMultiply(c40[0], c40[1], c12p4[0], c12p4[1], &c40c12p4[0], &c40c12p4[1]); momset->flu10 = c40c12p4[0]; momset->flu11 = c40c12p4[1]; // 处理完毕,退出。 return NO_ERROR; }
the_stack
#define ROUND_OFF 50000 #define CUDA_NUM_THREADS 1024 #define WARPS_PER_BLOCK 1 #define THREADS_PER_WARP 32 #define CUDA_KERNEL_LOOP(i, n) for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) #define GET_BLOCKS(n, t) (n+t-1) / t // == Dimension rearrangement Kernel __global__ void blob_rearrange_kernel2(const float *in, float *out, int num, int channels, int width, int height, int widthheight, int padding, int pwidthheight) { int xy = blockIdx.x*blockDim.x + threadIdx.x; if(xy>=widthheight) return; int ch = blockIdx.y; int n = blockIdx.z; float value=in[(n*channels+ch)*widthheight+xy]; __syncthreads(); int xpad = (xy % width + padding); int ypad = (xy / width + padding); int xypad = ypad * (width+2*padding) + xpad; out[(n*pwidthheight+xypad)*channels + ch] = value; } void blob_rearrange_ongpu(const float *in, float *out, int num, int channels, int width, int height, int widthheight, int padding, int pwidthheight, cudaStream_t stream) { int threads_per_block=16; dim3 totalBlocksRearr((widthheight-1)/threads_per_block+1, channels, num); cudaError_t err; blob_rearrange_kernel2<<<totalBlocksRearr, threads_per_block, 0, stream>>> (in, out, num, channels, width, height, widthheight, padding, pwidthheight); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed: %s\n", cudaGetErrorString(err)); exit(-1); } } // == Correlation Kernel __global__ void CorrelateData(const int nthreads, int num, int topwidth, int topheight, int topchannels, int topcount, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int kernel_size, int stride1, int stride2, int bottomwidth, int bottomheight, int bottomchannels, const float *bottom0, const float *bottom1, float *top) { extern __shared__ char patch_data_char[]; float *patch_data = (float *)patch_data_char; // First (upper left) position of kernel upper-left corner in current center position of neighborhood in image 1 int x1 = blockIdx.x*stride1 + max_displacement; int y1 = blockIdx.y*stride1 + max_displacement; int item = blockIdx.z; int ch_off = threadIdx.x; // Load 3D patch into shared shared memory for(int j = 0; j < kernel_size; j++) { // HEIGHT for(int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch; int idxPatchData = ji_off + ch; patch_data[idxPatchData] = bottom0[idx1]; } } } __syncthreads(); __shared__ float sum[WARPS_PER_BLOCK*THREADS_PER_WARP]; // Compute correlation for(int top_channel = 0; top_channel < topchannels; top_channel++) { sum[ch_off] = 0; int s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2; int s2p = (top_channel / neighborhood_grid_width - neighborhood_grid_radius) * stride2; for(int j = 0; j < kernel_size; j++) { // HEIGHT for(int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS int x2 = x1 + s2o; int y2 = y1 + s2p; int idxPatchData = ji_off + ch; int idx2 = ((item * bottomheight + y2+j) * bottomwidth + x2+i) * bottomchannels + ch; sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2]; } } } __syncthreads(); if(ch_off == 0) { float total_sum = 0; for(int idx = 0; idx < WARPS_PER_BLOCK*THREADS_PER_WARP; idx++) { total_sum += sum[idx]; } const int sumelems = kernel_size*kernel_size*bottomchannels; const int index = ((top_channel*topheight + blockIdx.y)*topwidth)+blockIdx.x; top[index + item*topcount] = total_sum / (float)sumelems; } } // Aggregate } __global__ void CorrelateDataSubtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int topcount, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int bottomchannels, const float *bottom0, const float *bottom1, float *top) { CUDA_KERNEL_LOOP(index, nthreads) { int x = index % topwidth; //w-pos int y = (index / topwidth) % topheight; //h-pos int c = (index / topwidth / topheight) % topchannels; //channels // Offset of patch in image 2 int s2o = (c % neighborhood_grid_width - neighborhood_grid_radius) * stride2; int s2p = (c / neighborhood_grid_width - neighborhood_grid_radius) * stride2; // First (upper left) position of kernel center in current neighborhood in image 1 int x1 = x*stride1 + kernel_radius + max_displacement; int y1 = y*stride1 + kernel_radius + max_displacement; // Iterate through 3D patch float sum = 0; for(int j = -kernel_radius; j <= kernel_radius; j++) { // HEIGHT for(int i = -kernel_radius; i <= kernel_radius; i++) { // WIDTH for(int l = 0; l < bottomchannels; l++) { // CHANNELS // Calculate position in image 2 int x2 = x1 + s2o; int y2 = y1 + s2p; // Indices in bottom data: (CH=l,W=x2,H=y2,N) int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + l; int idx2 = ((item * bottomheight + y2+j) * bottomwidth + x2+i) * bottomchannels + l; // Do the correlation: sum += fabsf(bottom0[idx1] - bottom1[idx2]); } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; top[index + item*topcount] = sum / (float)sumelems; } } void CorrelateData_ongpu(const float *rbot1, const float *rbot2, float *output, int batchSize, int nOutputCols, int nOutputRows, int nOutputPlane, int max_displacement, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int kernel_size, int stride1, int stride2, int paddedbottomwidth, int paddedbottomheight, int nInputPlane, int corr_type_multiply, cudaStream_t stream) { dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK); int shared_memory_per_block = (kernel_size*kernel_size)*nInputPlane; int outputCount = nOutputCols * nOutputRows * nOutputPlane; int outputThreadCount = outputCount; if (corr_type_multiply == 1) { dim3 totalBlocksCorr(nOutputCols, nOutputRows, batchSize); CorrelateData<<<totalBlocksCorr, threadsPerBlock, shared_memory_per_block * sizeof(float), stream>>>( outputThreadCount, batchSize, nOutputCols, nOutputRows, nOutputPlane, outputCount, max_displacement, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, kernel_size, stride1, stride2, paddedbottomwidth, paddedbottomheight, nInputPlane, rbot1, rbot2, output ); } else { for (int n = 0; n < batchSize; n++) { CorrelateDataSubtract<<<GET_BLOCKS(outputThreadCount, CUDA_NUM_THREADS), CUDA_NUM_THREADS, 0, stream>>>( outputThreadCount, batchSize, n, nOutputCols, nOutputRows, nOutputPlane, outputCount, max_displacement, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1, stride2, paddedbottomwidth, paddedbottomheight, nInputPlane, rbot1, rbot2, output ); } } } // == Correlation Backward Pass Kernel (For Blob 0) __global__ void CorrelateDataBackward0(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, float *bottom0diff, const float *bottom1, const float *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int n = index % bottomchannels; //channels int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; //h-pos //Get X,Y ranges and clamp // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 // Same here: int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1 int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1 float sum = 0; if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { // Get bottom1 data: int s2o = stride2 * o; int s2p = stride2 * p; int idxbot1 = ((item * pbottomheight + (m+s2p)) * pbottomwidth + (l+s2o)) * bottomchannels + n; float bot1tmp = bottom1[idxbot1]; // bottom1[l+s2o,m+s2p,n] // Index offset for topdiff in following loops: int op = (p+neighborhood_grid_radius) * neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p] int idxopoffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot1tmp; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot0index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size); bottom0diff[bot0index + item*bottomcount] = sum / (float)sumelems; } } // == Correlation Backward Pass Kernel (For Blob 1) __global__ void CorrelateDataBackward1(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, const float *bottom0, float *bottom1diff, const float *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { //int l = index % bottomwidth + pad_size; //w-pos //int m = (index / bottomwidth) % bottomheight + pad_size; //h-pos //int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels int n = index % bottomchannels; //channels int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; //h-pos // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; float sum = 0; for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { int s2o = stride2 * o; int s2p = stride2 * p; //Get X,Y ranges and clamp // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 // Same here: int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1 int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - s2p) / stride1 if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); // Get bottom0 data: int idxbot0 = ((item * pbottomheight + (m-s2p)) * pbottomwidth + (l-s2o)) * bottomchannels + n; float bot0tmp = bottom0[idxbot0]; // bottom1[l+s2o,m+s2p,n] // Index offset for topdiff in following loops: int op = (p+neighborhood_grid_radius) * neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p] int idxOpOffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot0tmp; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot1index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size); bottom1diff[bot1index + item*bottomcount] = sum / (float)sumelems; } } // == Correlation Kernel Subtraction // == Correlation Backward Pass Kernel (For Blob 0) __global__ void CorrelateDataBackward0Subtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, float *bottom0diff, const float *bottom0, const float *bottom1, const float *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int l = index % bottomwidth + pad_size; //w-pos int m = (index / bottomwidth) % bottomheight + pad_size; //h-pos int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels //Get X,Y ranges and clamp // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 // Same here: int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1 int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1 float sum = 0; if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { // Get bottom1 data: int s2o = stride2 * o; int s2p = stride2 * p; int idxbot = ((item * pbottomheight + (m+s2p)) * pbottomwidth + (l+s2o)) * bottomchannels + n; float bot0tmp = bottom0[idxbot]; // bottom0[l+s2o,m+s2p,n] float bot1tmp = bottom1[idxbot]; // bottom1[l+s2o,m+s2p,n] float sign = (bot0tmp >= bot1tmp) ? float(1.0) : float(-1.0); // Index offset for topdiff in following loops: int op = (p+neighborhood_grid_radius) * neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p] int idxopoffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * sign; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; bottom0diff[index + item*bottomcount] = sum / (float)sumelems; } } // == Correlation Backward Pass Kernel (For Blob 1) __global__ void CorrelateDataBackward1Subtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, const float *bottom0, const float *bottom1, float *bottom1diff, const float *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int l = index % bottomwidth + pad_size; //w-pos int m = (index / bottomwidth) % bottomheight + pad_size; //h-pos int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; float sum = 0; for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { int s2o = stride2 * o; int s2p = stride2 * p; //Get X,Y ranges and clamp // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 // Same here: int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1 int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - s2p) / stride1 if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); // Get bottom0 data: int idxbot = ((item * pbottomheight + (m-s2p)) * pbottomwidth + (l-s2o)) * bottomchannels + n; float bot0tmp = bottom0[idxbot]; // bottom0[l+s2o,m+s2p,n] float bot1tmp = bottom1[idxbot]; // bottom1[l+s2o,m+s2p,n] float sign = (bot0tmp >= bot1tmp) ? float(-1.0) : float(1.0); // Index offset for topdiff in following loops: int op = (p+neighborhood_grid_radius) * neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p] int idxOpOffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * sign; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; bottom1diff[index + item*bottomcount] = sum / (float)sumelems; } } void CorrelateDataBackward_ongpu(const float *rbot1, const float *rbot2, const float *gradOutput, float *gradInput1, float *gradInput2, int batchSize, int nOutputCols, int nOutputRows, int nOutputPlane, int max_displacement, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1, int stride2, int nInputCols, int nInputRows, int paddedbottomwidth, int paddedbottomheight, int nInputPlane, int pad_size, int corr_type_multiply, cudaStream_t stream) { int inputCount = nInputPlane * nInputRows * nInputCols; int botThreadCount = inputCount; if (corr_type_multiply == 1) { // == Run kernel Backward 0 for (int n = 0; n < batchSize; n++) { //Bottom0 CorrelateDataBackward0<<<GET_BLOCKS(botThreadCount, CUDA_NUM_THREADS), CUDA_NUM_THREADS, 0, stream>>>( botThreadCount, batchSize, n, nOutputCols, nOutputRows, nOutputPlane, max_displacement, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1, stride2, nInputCols, nInputRows, paddedbottomwidth, paddedbottomheight, nInputPlane, inputCount, pad_size, gradInput1, rbot2, gradOutput ); } // == Run kernel Backward 1 for (int n = 0; n < batchSize; n++) { CorrelateDataBackward1<<<GET_BLOCKS(botThreadCount, CUDA_NUM_THREADS), CUDA_NUM_THREADS, 0, stream>>>( botThreadCount, batchSize, n, nOutputCols, nOutputRows, nOutputPlane, max_displacement, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1, stride2, nInputCols, nInputRows, paddedbottomwidth, paddedbottomheight, nInputPlane, inputCount, pad_size, rbot1, gradInput2, gradOutput ); } } else { for ( int n = 0; n < batchSize; n++ ) { //Bottom0 CorrelateDataBackward0Subtract<<<GET_BLOCKS(botThreadCount, CUDA_NUM_THREADS), CUDA_NUM_THREADS, 0, stream>>> ( botThreadCount, batchSize, n, nOutputCols, nOutputRows, nOutputPlane, max_displacement, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1, stride2, nInputCols, nInputRows, paddedbottomwidth, paddedbottomheight, nInputPlane, inputCount, pad_size, gradInput1, rbot1, rbot2, gradOutput ); } for (int n = 0; n < batchSize; n++ ) { //Bottom0 CorrelateDataBackward1Subtract<<<GET_BLOCKS(botThreadCount, CUDA_NUM_THREADS), CUDA_NUM_THREADS, 0, stream>>>( botThreadCount, batchSize, n, nOutputCols, nOutputRows, nOutputPlane, max_displacement, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1, stride2, nInputCols, nInputRows, paddedbottomwidth, paddedbottomheight, nInputPlane, inputCount, pad_size, rbot1, rbot2, gradInput2, gradOutput ); } } }
the_stack
* Quadro and Tesla GPUs with compute capability >= 2.0 can overlap two memcopies * with kernel execution. This sample illustrates the usage of CUDA streams to * achieve overlapping of kernel execution with copying data to and from the device. * * Additionally, this sample uses CUDA events to measure elapsed time for * CUDA calls. Events are a part of CUDA API and provide a system independent * way to measure execution times on CUDA devices with approximately 0.5 * microsecond precision. * * Elapsed times are averaged over nreps repetitions (10 by default). * */ #include <shrUtils.h> const char *sSDKname = "simpleMultiCopy"; // includes, system #include <stdio.h> // includes, project #include <sdkHelper.h> // helper for shared that are common to CUDA SDK samples #include <shrQATest.h> //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors( cudaError err, const char *file, const int line ) { if( cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError( const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // General GPU Device CUDA Initialization int gpuDeviceInit(int devID) { int deviceCount; checkCudaErrors(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n"); exit(-1); } if (devID < 0) devID = 0; if (devID > deviceCount-1) { fprintf(stderr, "\n"); fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount); fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID); fprintf(stderr, "\n"); return -devID; } cudaDeviceProp deviceProp; checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) ); if (deviceProp.major < 1) { fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n"); exit(-1); \ } checkCudaErrors( cudaSetDevice(devID) ); printf("> gpuDeviceInit() CUDA device [%d]: %s\n", devID, deviceProp.name); return devID; } // This function returns the best GPU (with maximum GFLOPS) int gpuGetMaxGflopsDeviceId() { int current_device = 0, sm_per_multiproc = 0; int max_compute_perf = 0, max_perf_device = 0; int device_count = 0, best_SM_arch = 0; cudaDeviceProp deviceProp; cudaGetDeviceCount( &device_count ); // Find the best major SM Architecture GPU device while ( current_device < device_count ) { cudaGetDeviceProperties( &deviceProp, current_device ); if (deviceProp.major > 0 && deviceProp.major < 9999) { best_SM_arch = MAX(best_SM_arch, deviceProp.major); } current_device++; } // Find the best CUDA capable GPU device current_device = 0; while( current_device < device_count ) { cudaGetDeviceProperties( &deviceProp, current_device ); if (deviceProp.major == 9999 && deviceProp.minor == 9999) { sm_per_multiproc = 1; } else { sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor); } int compute_perf = deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate; if( compute_perf > max_compute_perf ) { // If we find GPU with SM major > 2, search only these if ( best_SM_arch > 2 ) { // If our device==dest_SM_arch, choose this, or else pass if (deviceProp.major == best_SM_arch) { max_compute_perf = compute_perf; max_perf_device = current_device; } } else { max_compute_perf = compute_perf; max_perf_device = current_device; } } ++current_device; } return max_perf_device; } // Initialization code to find the best CUDA Device int findCudaDevice(int argc, const char **argv) { cudaDeviceProp deviceProp; int devID = 0; // If the command-line has a device number specified, use it if (checkCmdLineFlag(argc, argv, "device")) { devID = getCmdLineArgumentInt(argc, argv, "device="); if (devID < 0) { printf("Invalid command line parameters\n"); exit(-1); } else { devID = gpuDeviceInit(devID); if (devID < 0) { printf("exiting...\n"); shrQAFinishExit(argc, (const char **)argv, QA_FAILED); exit(-1); } } } else { // Otherwise pick the device with highest Gflops/s devID = gpuGetMaxGflopsDeviceId(); checkCudaErrors( cudaSetDevice( devID ) ); checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) ); printf("> Using CUDA device [%d]: %s\n", devID, deviceProp.name); } return devID; } // end of CUDA Helper Functions // includes, kernels // Declare the CUDA kernels here and main() code that is needed to launch // Compute workload on the system __global__ void incKernel(int *g_out, int *g_in, int N, int inner_reps) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if( idx < N ) { for( int i=0; i<inner_reps; ++i ) { g_out[idx] = g_in[idx] + 1; } } } #define STREAM_COUNT 4 // Uncomment to simulate data source/sink IO times //#define SIMULATE_IO int *h_data_source; int *h_data_sink; int *h_data_in[STREAM_COUNT]; int *d_data_in[STREAM_COUNT]; int *h_data_out[STREAM_COUNT]; int *d_data_out[STREAM_COUNT]; cudaEvent_t cycleDone[STREAM_COUNT]; cudaStream_t stream[STREAM_COUNT]; cudaEvent_t start, stop; int N = 1 << 22; int nreps = 10; // number of times each experiment is repeated int inner_reps = 5; int memsize; dim3 block(512); dim3 grid; int thread_blocks; float processWithStreams(int streams_used); void init(); bool test(); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char *argv[]) { int cuda_device = 0; float scale_factor; cudaDeviceProp deviceProp; shrQAStart(argc, argv); if (checkCmdLineFlag(argc, (const char **)argv, "device")) { cuda_device = getCmdLineArgumentInt(argc, (const char **)argv, "device="); if (cuda_device < 0) { printf("Invalid command line parameters\n"); exit(-1); } else { printf("cuda_device = %d\n", cuda_device); cuda_device = gpuDeviceInit(cuda_device); if (cuda_device < 0) { printf("No CUDA Capable devices found, exiting...\n"); shrQAFinishExit(argc, (const char **)argv, QA_WAIVED); exit(-1); } } } else { // Otherwise pick the device with the highest Gflops/s cuda_device = gpuGetMaxGflopsDeviceId(); checkCudaErrors( cudaSetDevice( cuda_device ) ); checkCudaErrors( cudaGetDeviceProperties(&deviceProp, cuda_device) ); printf("> Using CUDA device [%d]: %s\n", cuda_device, deviceProp.name); } checkCudaErrors( cudaGetDeviceProperties(&deviceProp, cuda_device) ); printf("[%s] has %d MP(s) x %d (Cores/MP) = %d (Cores)\n", deviceProp.name, deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); // Anything that is less than 32 Cores will have scaled down workload scale_factor = max((32.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount) ), 1.0f); N = (int)( (float)N / scale_factor ); printf("> Device name: %s\n", deviceProp.name); printf("> CUDA Capability %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount); printf("> scale_factor = %.2f\n", 1.0f/scale_factor); printf("> array_size = %d\n\n", N); memsize = N * sizeof(int); thread_blocks = N / block.x; grid.x = thread_blocks % 65535; grid.y = (thread_blocks / 65535 + 1); // Allocate resources h_data_source = (int*) malloc(memsize); h_data_sink = (int*) malloc(memsize); for( int i =0; i<STREAM_COUNT; ++i ) { checkCudaErrors( cudaHostAlloc(&h_data_in[i], memsize, cudaHostAllocDefault) ); checkCudaErrors( cudaMalloc(&d_data_in[i], memsize) ); checkCudaErrors( cudaHostAlloc(&h_data_out[i], memsize, cudaHostAllocDefault) ); checkCudaErrors( cudaMalloc(&d_data_out[i], memsize) ); checkCudaErrors( cudaStreamCreate(&stream[i]) ); checkCudaErrors( cudaEventCreate(&cycleDone[i]) ); cudaEventRecord(cycleDone[i], stream[i]); } cudaEventCreate(&start); cudaEventCreate(&stop); init(); // Kernel warmup incKernel<<<grid, block>>>(d_data_out[0], d_data_in[0], N, inner_reps); // Time copies and kernel cudaEventRecord(start,0); checkCudaErrors( cudaMemcpyAsync(d_data_in[0], h_data_in[0], memsize, cudaMemcpyHostToDevice,0) ); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float memcpy_h2d_time; cudaEventElapsedTime(&memcpy_h2d_time, start, stop); cudaEventRecord(start,0); checkCudaErrors( cudaMemcpyAsync(h_data_out[0], d_data_out[0], memsize, cudaMemcpyDeviceToHost, 0) ); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float memcpy_d2h_time; cudaEventElapsedTime(&memcpy_d2h_time, start, stop); cudaEventRecord(start,0); incKernel<<<grid, block,0,0>>>(d_data_out[0], d_data_in[0], N, inner_reps); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float kernel_time; cudaEventElapsedTime(&kernel_time, start, stop); printf("\n"); printf("Relevant properties of this CUDA device\n"); printf("(%s) Can overlap one CPU<>GPU data transfer with GPU kernel execution (device property \"deviceOverlap\")\n", deviceProp.deviceOverlap ? "X" : " "); //printf("(%s) Can execute several GPU kernels simultaneously (compute capability >= 2.0)\n", deviceProp.major >= 2 ? "X": " "); printf("(%s) Can overlap two CPU<>GPU data transfers with GPU kernel execution\n" " (compute capability >= 2.0 AND (Tesla product OR Quadro 4000/5000)\n", (deviceProp.major == 2 && strstr(deviceProp.name, "GeForce") == 0 && strstr(deviceProp.name, "Quadro 2000") == 0 && strstr(deviceProp.name, "Quadro 600") == 0) ? "X" : " "); printf("\n"); printf("Measured timings (throughput):\n"); printf(" Memcpy host to device\t: %f ms (%f GB/s)\n", memcpy_h2d_time, (memsize * 1e-6)/ memcpy_h2d_time ); printf(" Memcpy device to host\t: %f ms (%f GB/s)\n", memcpy_d2h_time, (memsize * 1e-6)/ memcpy_d2h_time); printf(" Kernel\t\t\t: %f ms (%f GB/s)\n", kernel_time, (inner_reps * memsize * 2e-6)/ kernel_time); printf("\n"); printf("Theoretical limits for speedup gained from overlapped data transfers:\n"); printf("No overlap at all (transfer-kernel-transfer): %f ms \n", memcpy_h2d_time + memcpy_d2h_time + kernel_time); printf("Compute can overlap with one transfer: %f ms\n", max((memcpy_h2d_time + memcpy_d2h_time), kernel_time)); printf("Compute can overlap with both data transfers: %f ms\n", max(max(memcpy_h2d_time,memcpy_d2h_time), kernel_time)); // Process pipelined work float serial_time = processWithStreams(1); float overlap_time = processWithStreams(STREAM_COUNT); printf("\nAverage measured timings over %d repetitions:\n", nreps); printf(" Avg. time when execution fully serialized\t: %f ms\n", serial_time / nreps); printf(" Avg. time when overlapped using %d streams\t: %f ms\n", STREAM_COUNT, overlap_time / nreps); printf(" Avg. speedup gained (serialized - overlapped)\t: %f ms\n", (serial_time - overlap_time) / nreps); printf("\nMeasured throughput:\n"); printf(" Fully serialized execution\t\t: %f GB/s\n", (nreps * (memsize * 2e-6))/ serial_time); printf(" Overlapped using %d streams\t\t: %f GB/s\n", STREAM_COUNT, (nreps * (memsize * 2e-6))/ overlap_time); // Verify the results, we will use the results for final output bool bResults = test(); // Free resources free( h_data_source ); free( h_data_sink ); for( int i =0; i<STREAM_COUNT; ++i ) { cudaFreeHost(h_data_in[i]); cudaFree(d_data_in[i]); cudaFreeHost(h_data_out[i]); cudaFree(d_data_out[i]); cudaStreamDestroy(stream[i]); cudaEventDestroy(cycleDone[i]); } cudaEventDestroy(start); cudaEventDestroy(stop); cudaDeviceReset(); // Test result shrQAFinishExit(argc, (const char **)argv, (bResults ? QA_PASSED : QA_FAILED)); } float processWithStreams(int streams_used) { int current_stream = 0; float time; // Do processing in a loop // // Note: All memory commands are processed in the order they are issued, // independent of the stream they are enqueued in. Hence the pattern by // which the copy and kernel commands are enqueued in the stream // has an influence on the achieved overlap. cudaEventRecord(start, 0); for( int i=0; i<nreps; ++i ) { int next_stream = (current_stream + 1 ) % streams_used; #ifdef SIMULATE_IO // Store the result memcpy(h_data_sink, h_data_out[current_stream],memsize); // Read new input memcpy(h_data_in[next_stream], h_data_source, memsize); #endif // Ensure that processing and copying of the last cycle has finished cudaEventSynchronize(cycleDone[next_stream]); // Process current frame incKernel<<<grid, block, 0, stream[current_stream]>>>( d_data_out[current_stream], d_data_in[current_stream], N, inner_reps); // Upload next frame checkCudaErrors( cudaMemcpyAsync( d_data_in[next_stream], h_data_in[next_stream], memsize, cudaMemcpyHostToDevice, stream[next_stream]) ); // Download current frame checkCudaErrors( cudaMemcpyAsync( h_data_out[current_stream], d_data_out[current_stream], memsize, cudaMemcpyDeviceToHost, stream[current_stream]) ); checkCudaErrors( cudaEventRecord( cycleDone[current_stream], stream[current_stream]) ); current_stream = next_stream; } cudaEventRecord(stop, 0); cudaDeviceSynchronize(); cudaEventElapsedTime(&time, start, stop); return time; } void init() { for( int i=0; i<N; ++i) { h_data_source[i] = 0; } for( int i =0; i<STREAM_COUNT; ++i ) { memcpy(h_data_in[i], h_data_source, memsize); } } bool test() { bool passed = true; for( int j =0; j<STREAM_COUNT; ++j ) { for( int i =0; i<N; ++i ) { passed &= (h_data_out[j][i] == 1); } } return passed; }
the_stack
namespace xlib { namespace detail { template<typename T, int SIZE, CacheModifier CM> __device__ __forceinline__ WarpQueueBase<T, SIZE, CM> ::WarpQueueBase(T (&queue)[SIZE], T* __restrict__ queue_ptr, int* __restrict__ size_ptr) : _queue(queue), _queue_ptr(queue_ptr), _size_ptr(size_ptr), _size(0) {} } // namespace detail //------------------------------------------------------------------------------ template<typename T, int SIZE, CacheModifier CM> __device__ __forceinline__ WarpQueueSimple<T, SIZE, CM>::WarpQueueSimple(T (&queue)[SIZE], T* __restrict__ queue_ptr, int* __restrict__ size_ptr) : detail::WarpQueueBase<T, SIZE, CM>(queue, queue_ptr, size_ptr) {} template<typename T, int SIZE, CacheModifier CM> __device__ __forceinline__ WarpQueueSimple<T, SIZE, CM>::~WarpQueueSimple() { _store(); } template<typename T, int SIZE, CacheModifier CM> __device__ __forceinline__ void WarpQueueSimple<T, SIZE, CM>::insert(T item) { _queue[_size++] = item; } template<typename T, int SIZE, CacheModifier CM> __device__ __forceinline__ void WarpQueueSimple<T, SIZE, CM>::store() { assert(__ballot(true) == static_cast<unsigned>(-1)); if (__any(_size >= SIZE)) { _store(); _size = 0; } } template<typename T, int SIZE, CacheModifier CM> __device__ __forceinline__ void WarpQueueSimple<T, SIZE, CM>::_store() { int thread_offset = _size, total; int warp_offset = WarpExclusiveScan<>::AtomicAdd(thread_offset, _size_ptr, total); T* ptr = _queue_ptr + warp_offset + thread_offset; for (int i = 0; i < _size; i++) Store<CM>(ptr + i, _queue[i]); } //------------------------------------------------------------------------------ template<typename T, int SIZE, CacheModifier CM> __device__ __forceinline__ WarpQueueUnroll<T, SIZE, CM>::~WarpQueueUnroll() { int thread_offset = _size, total; int warp_offset = WarpExclusiveScan<>::AtomicAdd(thread_offset, _size_ptr, total); T* ptr = _queue_ptr + warp_offset + thread_offset; for (int i = 0; i < _size; i++) Store<CM>(ptr + i, _queue[i]); } template<typename T, int SIZE, CacheModifier CM> __device__ __forceinline__ void WarpQueueUnroll<T, SIZE, CM>::insert(T item) { Reg<>::insert(_queue, _size, item); } template<typename T, int SIZE, CacheModifier CM> __device__ __forceinline__ void WarpQueueUnroll<T, SIZE, CM>::store() { assert(__ballot(true) == static_cast<unsigned>(-1)); if (__any(_size >= SIZE)) { int thread_offset = _size, total; int warp_offset = WarpExclusiveScan<>::AtomicAdd(thread_offset, _size_ptr, total); T* ptr = _queue_ptr + warp_offset + thread_offset; #pragma unroll for (int i = 0; i < _size; i++) { if (i < _size) Store<CM>(ptr + i, _queue[i]); } _size = 0; } } //------------------------------------------------------------------------------ template<typename T, CacheModifier CM> __device__ __forceinline__ WarpQueueBallot<T, CM> ::WarpQueueBallot(T* __restrict__ queue_ptr, int* __restrict__ size_ptr) : _queue_ptr(queue_ptr), _size_ptr(size_ptr) {} template<typename T, CacheModifier CM> __device__ __forceinline__ void WarpQueueBallot<T, CM>::store(T item, int predicate) { unsigned ballot = __ballot(predicate); unsigned elected_lane = __msb(ballot); int warp_offset; if (lane_id() == elected_lane) warp_offset = atomicAdd(_size_ptr, __popc(ballot)); int offset = __popc(ballot & LaneMaskLT()) + __shfl(warp_offset, elected_lane); if (predicate) Store<CM>(_queue_ptr + offset) = item; } //------------------------------------------------------------------------------ template<typename T, int SIZE, unsigned ITEMS_PER_WARP, CacheModifier CM> __device__ __forceinline__ WarpQueueSharedMem<T, SIZE, ITEMS_PER_WARP, CM> ::WarpQueueSharedMem(T (&queue)[SIZE], T* __restrict__ queue_ptr, int* __restrict__ size_ptr, T* shared_mem) : detail::WarpQueueBase<T, SIZE, CM>(queue, queue_ptr, size_ptr), _shared_mem(shared_mem) { _lane_shared_mem = shared_mem + lane_id(); } template<typename T, int SIZE, unsigned ITEMS_PER_WARP, CacheModifier CM> __device__ __forceinline__ WarpQueueSharedMem<T, SIZE, ITEMS_PER_WARP, CM>::~WarpQueueSharedMem() { store(); } template<typename T, int SIZE, unsigned ITEMS_PER_WARP, CacheModifier CM> __device__ __forceinline__ void WarpQueueSharedMem<T, SIZE, ITEMS_PER_WARP, CM>::store() { assert(__ballot(true) == static_cast<unsigned>(-1)); if (__any(_size >= SIZE)) { int thread_offset = _size, total; int warp_offset = WarpExclusiveScan<>::AtomicAdd(thread_offset, _size_ptr, total); T* ptr = _queue_ptr + warp_offset + lane_id(); int j = 0; int loop_limit = total / ITEMS_PER_WARP; for (int loop = 0; loop < loop_limit; loop++) { int pos = thread_offset; while (j < _size && pos < ITEMS_PER_WARP) _shared_mem[pos++] = _queue[j++]; #pragma unroll for (int i = 0; i < ITEMS_PER_WARP; i += WARP_SIZE) Store<CM>(ptr + i, _lane_shared_mem[i]); total -= ITEMS_PER_WARP; thread_offset -= ITEMS_PER_WARP; ptr += ITEMS_PER_WARP; } int pos = thread_offset; while (j < _size && pos < ITEMS_PER_WARP) _shared_mem[pos++] = _queue[j++]; #pragma unroll for (int i = 0; i < ITEMS_PER_WARP; i += WARP_SIZE) { if (lane_id() + i < total) Store<CM>(ptr + i, _lane_shared_mem[i]); } _size = 0; } } //============================================================================== //============================================================================== template<cuQUEUE_MODE mode, CacheModifier CM, int Items_per_warp> template<typename T, typename R, int SIZE> __device__ __forceinline__ void QueueWarp<mode, CM, Items_per_warp> ::store(T (&Queue)[SIZE], int size, T* __restrict__ queue_ptr, R* __restrict__ queue_size_ptr, T* __restrict__ SMem) { int thread_offset = size, total; int warp_offset = WarpExclusiveScan<>::AtomicAdd(thread_offset, queue_size_ptr, total); warp_dyn<mode, CM, Items_per_warp> ::regToGlobal(Queue, size, thread_offset, queue_ptr + warp_offset, total, SMem + warp_id() * Items_per_warp); } template<cuQUEUE_MODE mode, CacheModifier CM, int Items_per_warp> template<typename T, typename R, int SIZE> __device__ __forceinline__ void QueueWarp<mode, CM, Items_per_warp> ::store(T (&Queue)[SIZE], int size, T* __restrict__ queue_ptr, R* __restrict__ queue_size_ptr) { static_assert(mode != cuQUEUE_MODE::SHAREDMEM && mode != cuQUEUE_MODE::SHAREDMEM_UNROLL, "SMem == nullptr not allowed with shared memory"); QueueWarp<mode, CM, Items_per_warp> ::store(Queue, size, queue_ptr, queue_size_ptr, static_cast<T*>(nullptr)); } /* template<cuQUEUE_MODE mode, CacheModifier CM, int Items_per_warp> template<typename T, typename R, int SIZE> __device__ __forceinline__ void QueueWarp<mode, CM, Items_per_warp> ::store2(T (&Queue)[SIZE], const int size, T* __restrict__ queue_ptr, R* __restrict__ queue_size_ptr, T* __restrict__ SMem, int& warp_offset, int& total) { int thread_offset = size; warp_offset = WarpExclusiveScan<>::AtomicAdd(thread_offset, queue_size_ptr, total); warp_dyn<mode, CM, Items_per_warp> ::regToGlobal(Queue, size, thread_offset, queue_ptr + warp_offset, total, SMem + warp_id() * Items_per_warp); }*/ //============================================================================== //============================================================================== template<CacheModifier CM, int Items_per_warp> struct warp_dyn<cuQUEUE_MODE::SIMPLE, CM, Items_per_warp> { template<typename T, int SIZE> __device__ __forceinline__ static void regToGlobal(T (&Queue)[SIZE], int size, int thread_offset, T* __restrict__ devPointer, const int total, //optional T* __restrict__ SMem) { //optional devPointer += thread_offset; for (int i = 0; i < size; i++) Store<CM>(devPointer + i, Queue[i]); } }; template<CacheModifier CM, int Items_per_warp> struct warp_dyn<cuQUEUE_MODE::UNROLL, CM, Items_per_warp> { template<typename T, int SIZE> __device__ __forceinline__ static void regToGlobal(T (&Queue)[SIZE], int size, int thread_offset, T* __restrict__ devPointer, const int total, //optional T* __restrict__ SMem) { //optional devPointer += thread_offset; #pragma unroll for (int i = 0; i < SIZE; i++) { if (i < size) Store<CM>(devPointer + i, Queue[i]); } } }; template<CacheModifier CM, int Items_per_warp> struct warp_dyn<cuQUEUE_MODE::SHAREDMEM, CM, Items_per_warp> { //static_assert(Items_per_warp != 0, "Items_per_warp == 0"); template<typename T, int SIZE> __device__ __forceinline__ static void regToGlobal(T (&Queue)[SIZE], int size, int thread_offset, T* __restrict__ devPointer, int total, T* __restrict__ SMem) { T* SMemTMP = SMem; devPointer += lane_id(); SMem += lane_id(); int j = 0; while (true) { while (j < size && thread_offset < Items_per_warp) { SMemTMP[thread_offset] = Queue[j]; j++; thread_offset++; } if (total < Items_per_warp) { #pragma unroll for (int i = 0; i < Items_per_warp; i += WARP_SIZE) { if (lane_id() + i < total) Store<CM>(devPointer + i, SMem[i]); } break; } else { #pragma unroll for (int i = 0; i < Items_per_warp; i += WARP_SIZE) Store<CM>(devPointer + i, SMem[i]); } total -= Items_per_warp; thread_offset -= Items_per_warp; devPointer += Items_per_warp; } } }; template<CacheModifier CM, int Items_per_warp> struct warp_dyn<cuQUEUE_MODE::Min, CM, Items_per_warp> { static_assert(Items_per_warp != 0, "Items_per_warp == 0"); template<typename T, int SIZE> __device__ __forceinline__ static void regToGlobal(T (&Queue)[SIZE], int size, int thread_offset, T* __restrict__ devPointer, const int total, //optional T* __restrict__ SMem) { //optional int minValue = size; WarpReduce<>::minAll(minValue); T* devPointerTMP = devPointer + lane_id(); for (int i = 0; i < minValue; i++) Store<CM>(devPointerTMP + i * WARP_SIZE, Queue[i]); size -= minValue; thread_offset -= lane_id() * minValue; total -= minValue * WARP_SIZE; devPointer += minValue * WARP_SIZE; RegToGlobal(Queue + minValue, size, thread_offset, total, SMem, devPointer); } }; template<CacheModifier CM, int Items_per_warp> struct QueueWarp<cuQUEUE_MODE::BALLOT, CM, Items_per_warp> { template<typename T, typename R> __device__ __forceinline__ static void store(T value, bool predicate, T* __restrict__ queue_ptr, R* __restrict__ queue_size_ptr) { unsigned ballot = __ballot(predicate); unsigned electedLane = __msb(ballot); //if (ballot) { int warp_offset; if (lane_id() == electedLane) warp_offset = atomicAdd(queue_size_ptr, __popc(ballot)); int th_offset = __popc(ballot & LaneMaskLT()) + __shfl(warp_offset, electedLane); if (predicate) queue_ptr[th_offset] = value; //} } }; } // namespace xlib
the_stack
#ifndef INCLUDE_GGNN_UTILS_CUDA_KNN_DATASET_CUH_ #define INCLUDE_GGNN_UTILS_CUDA_KNN_DATASET_CUH_ #include <algorithm> #include <limits> #include <string> #include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include "io/loader_ann.hpp" #include "io/storer_ann.hpp" /** * KNN database data that will be shared with the GPU * and some utilities to load (and store) that data * * @param KeyT datatype of dataset indices * @param BaseT datatype of dataset vector elements * @param BAddrT address type used to access dataset vectors (needs to be able * to represent N_base*D) */ template <typename KeyT, typename BaseT, typename BAddrT> struct Dataset { /// dataset vectors BaseT* h_base{nullptr}; /// query vectors BaseT* h_query{nullptr}; /// ground truth indices in the dataset for the given queries KeyT* gt{nullptr}; /// number of dataset vectors int N_base{0}; /// number of query vectors (and ground truth indices) int N_query{0}; /// dimension of vectors in the dataset and query int D{0}; /// number of nearest neighbors per ground truth entry int K_gt{0}; // indices within the ground truth list per point up to which result ids // need to be compared. // without duplicates in the dataset, each entry should just be 1 / KQuery std::vector<uint8_t> top1DuplicateEnd; std::vector<uint8_t> topKDuplicateEnd; Dataset(const std::string& basePath, const std::string& queryPath, const std::string& gtPath, const size_t N_base = std::numeric_limits<size_t>::max()) { VLOG(1) << "N_base: " << N_base; bool success = loadBase(basePath, 0, N_base) && loadQuery(queryPath) && loadGT(gtPath); if (!success) throw std::runtime_error( "failed to load dataset (see previous log entries for details).\n"); } //TODO(fabi): cleanup. ~Dataset() { freeBase(); freeQuery(); freeGT(); } Dataset(const Dataset&) = delete; Dataset(Dataset&&) = delete; Dataset& operator=(const Dataset&) = delete; Dataset& operator=(Dataset&&) = delete; void freeBase() { cudaFreeHost(h_base); h_base = nullptr; N_base = 0; if (!h_query) D = 0; } void freeQuery() { cudaFreeHost(h_query); h_query = nullptr; if (!gt) N_query = 0; if (!h_base) D = 0; } void freeGT() { free(gt); gt = nullptr; if (!h_query) N_query = 0; K_gt = 0; } /// load base vectors from file bool loadBase(const std::string& base_file, size_t from = 0, size_t num = std::numeric_limits<size_t>::max()) { freeBase(); XVecsLoader<BaseT> base_loader(base_file); num = std::min(num, base_loader.Num() - from); CHECK_GT(num, 0) << "The requested range contains no vectors."; N_base = num; if (D == 0) { D = base_loader.Dim(); } CHECK_EQ(D, base_loader.Dim()) << "Dimension mismatch"; const size_t dataset_max_index = static_cast<size_t>(N_base) * static_cast<size_t>(D); CHECK_LT(dataset_max_index, std::numeric_limits<BAddrT>::max()) << "Address type is insufficient to address " "the requested dataset. aborting"; const size_t base_memsize = static_cast<BAddrT>(N_base) * D * sizeof(BaseT); CHECK_CUDA(cudaMallocHost(&h_base, base_memsize, cudaHostAllocPortable | cudaHostAllocWriteCombined)); base_loader.load(h_base, from, num); return true; } /// load query vectors from file bool loadQuery(const std::string& query_file, KeyT from = 0, KeyT num = std::numeric_limits<KeyT>::max()) { freeQuery(); XVecsLoader<BaseT> query_loader(query_file); num = std::min(num, query_loader.Num() - from); CHECK_GT(num, 0) << "The requested range contains no vectors."; if (N_query == 0) { N_query = num; } CHECK_EQ(N_query, num) << "Number mismatch"; if (D == 0) { D = query_loader.Dim(); } CHECK_EQ(D, query_loader.Dim()) << "Dimension mismatch"; const size_t dataset_max_index = static_cast<size_t>(N_query) * static_cast<size_t>(D); CHECK_LT(dataset_max_index, std::numeric_limits<BAddrT>::max()) << "Address type is insufficient to address " "the requested dataset. aborting"; const size_t query_memsize = static_cast<BAddrT>(N_query) * D * sizeof(BaseT); CHECK_CUDA(cudaMallocHost(&h_query, query_memsize, cudaHostAllocPortable)); query_loader.load(h_query, from, num); return true; } /// load ground truth indices from file bool loadGT(const std::string& gt_file, KeyT from = 0, KeyT num = std::numeric_limits<KeyT>::max()) { freeGT(); if (gt_file.empty()) { LOG(INFO) << "No ground truth file loaded. Make sure to compute it yourself before evaluating any queries."; CHECK_GT(N_query, 0) << "Cannot determine the number of GT entries which need to be computed if the query is not yet loaded."; K_gt = 100; //TODO(fabi): move out of if branch. gt = (KeyT*) malloc(static_cast<BAddrT>(N_query) * K_gt * sizeof(KeyT)); CHECK(gt); return true; } XVecsLoader<KeyT> gt_loader(gt_file); num = std::min(num, gt_loader.Num() - from); CHECK_GT(num, 0) << "The requested range contains no vectors."; if (N_query == 0) { N_query = num; } CHECK_EQ(N_query, num) << "Number mismatch"; K_gt = gt_loader.Dim(); const size_t dataset_max_index = static_cast<size_t>(N_query) * static_cast<size_t>(K_gt); CHECK_LT(dataset_max_index, std::numeric_limits<BAddrT>::max()) << "Address type is insufficient to address " "the requested dataset. aborting"; gt = (KeyT*) malloc(static_cast<BAddrT>(N_query) * K_gt * sizeof(KeyT)); CHECK(gt); gt_loader.load(gt, from, num); return true; } template <DistanceMeasure measure, typename ValueT> ValueT compute_distance_query(KeyT index, KeyT query) const { CHECK_GE(index, 0); CHECK_GE(query, 0); CHECK_LT(index, N_base); CHECK_LT(query, N_query); ValueT distance = 0.0f, index_norm = 0.0f, query_norm = 0.0f; for (int d=0; d<D; ++d) { if (measure == Euclidean) { distance += (h_query[static_cast<size_t>(query)*D+d] -h_base [static_cast<size_t>(index)*D+d]) *(h_query[static_cast<size_t>(query)*D+d] -h_base [static_cast<size_t>(index)*D+d]); } else if (measure == Cosine) { distance += h_query[static_cast<size_t>(query)*D+d] *h_base [static_cast<size_t>(index)*D+d]; query_norm += h_query[static_cast<size_t>(query)*D+d] *h_query[static_cast<size_t>(query)*D+d]; index_norm += h_base [static_cast<size_t>(index)*D+d] *h_base [static_cast<size_t>(index)*D+d]; } } if (measure == Euclidean) { distance = sqrtf(distance); } else if (measure == Cosine) { if (index_norm*query_norm > 0.0f) distance = fabs(1.0f-distance/sqrtf(index_norm*query_norm)); else distance = 1.0f; } return distance; }; template <DistanceMeasure measure, typename ValueT> ValueT compute_distance_base_to_base(KeyT a, KeyT b) const { CHECK_GE(a, 0); CHECK_GE(b, 0); CHECK_LT(a, N_base); CHECK_LT(b, N_base); ValueT distance = 0.0f, a_norm = 0.0f, b_norm = 0.0f; for (int d=0; d<D; ++d) { if (measure == Euclidean) { distance += (h_base[static_cast<size_t>(b)*D+d]-h_base[static_cast<size_t>(a)*D+d]) *(h_base[static_cast<size_t>(b)*D+d]-h_base[static_cast<size_t>(a)*D+d]); } else if (measure == Cosine) { distance += h_base[static_cast<size_t>(b)*D+d]*h_base[static_cast<size_t>(a)*D+d]; b_norm += h_base[static_cast<size_t>(b)*D+d]*h_base[static_cast<size_t>(b)*D+d]; a_norm += h_base[static_cast<size_t>(a)*D+d]*h_base[static_cast<size_t>(a)*D+d]; } } if (measure == Euclidean) { distance = sqrtf(distance); } else if (measure == Cosine) { if (a_norm*b_norm > 0.0f) distance = fabs(1.0f-distance/sqrtf(a_norm*b_norm)); else distance = 1.0f; } return distance; }; template <DistanceMeasure measure, typename ValueT> void checkForDuplicatesInGroundTruth(const int KQuery) { if (!top1DuplicateEnd.empty() || !topKDuplicateEnd.empty()) return; VLOG(2) << "searching for duplicates in the ground truth indices."; const float Epsilon = 0.000001f; size_t total_num_duplicates_top_1 = 0, total_num_duplicates_top_k = 0; uint8_t max_dup_top_1 = 0, max_dup_top_k = 0; for (int n = 0; n < N_query; n++) { const ValueT gt_dist1 = compute_distance_query<measure, ValueT>(gt[n * K_gt], n); uint8_t num_duplicates_top_1 = 0, num_duplicates_top_k = 0; for (int k=1; k < K_gt; ++k) { const ValueT gt_dist_k = compute_distance_query<measure, ValueT>(gt[n * K_gt + k], n); if (gt_dist_k-gt_dist1 > Epsilon) break; ++num_duplicates_top_1; } total_num_duplicates_top_1 += num_duplicates_top_1; if (num_duplicates_top_1 > max_dup_top_1) max_dup_top_1 = num_duplicates_top_1; top1DuplicateEnd.push_back(1+num_duplicates_top_1); if (KQuery <= K_gt) { const ValueT gt_distKQuery = compute_distance_query<measure, ValueT>(gt[n * K_gt + KQuery-1], n); for (int k=KQuery; k < K_gt; ++k) { const ValueT gt_dist_k = compute_distance_query<measure, ValueT>(gt[n * K_gt + k], n); if (gt_dist_k-gt_distKQuery > Epsilon) break; ++num_duplicates_top_k; } total_num_duplicates_top_k += num_duplicates_top_k; if (num_duplicates_top_k > max_dup_top_k) max_dup_top_k = num_duplicates_top_k; topKDuplicateEnd.push_back(KQuery+num_duplicates_top_k); } else topKDuplicateEnd.push_back(K_gt); } VLOG(2) << "found " << total_num_duplicates_top_1 << " duplicates for c@1." << " max: " << uint32_t(max_dup_top_1); if (KQuery <= K_gt) { VLOG(2) << "found " << total_num_duplicates_top_k << " duplicates for c@" << KQuery << "." << " max: " << uint32_t(max_dup_top_k); } } }; #endif // INCLUDE_GGNN_UTILS_CUDA_KNN_DATASET_CUH_
the_stack
using namespace std; typedef boost::uint32_t uint32; typedef boost::uint64_t uint64; #define MAX_CUDA_THREADS (1<<20) #define MAX_CUDA_BLOCKS 256 #define MAX_CUDA_THREADS_PER_BLOCK 2048 #define REGISTERS_PER_CUDA_THREAD 64 #define TRAIL_NOCONSTRUCTOR #include "birthday_types.hpp" #ifndef CUDA_SAFE_CALL #define CUDA_SAFE_CALL(s) { auto ce = s; if (ce != cudaSuccess) { throw std::runtime_error("CUDA API Error:\n" + std::string(cudaGetErrorName(ce)) + ":\n" + std::string(cudaGetErrorString(ce))); } } #endif #ifndef cutilSafeCall #define cutilSafeCall(s) (s) #endif /**** NOTE WARNING: We assume that all global __device__ variables below are *thread* *specific* (instead of global) storage managed by the cuda realtime libraries *****/ // last template parameter is fence type: 0=none, 1=block, 2=gpu typedef cyclic_buffer_cas_t<MAX_CUDA_THREADS,uint32,7,cyclic_buffer_control_cas_t<MAX_CUDA_THREADS>,2> state_buffer_t; typedef cyclic_buffer_mask_t<MAX_CUDA_THREADS_PER_BLOCK,uint32,7,cyclic_buffer_control_mask_t<MAX_CUDA_THREADS_PER_BLOCK>,1> work_buffer_t; typedef work_buffer_t::control_t work_control_t; typedef cyclic_buffer_cas_t<MAX_CUDA_THREADS,uint32,15,cyclic_buffer_control_cas_t<MAX_CUDA_THREADS>,2> collisions_buffer_t; typedef collisions_buffer_t::control_t collisions_control_t; // static gpu buffer that always stays on GPU __device__ state_buffer_t gworking_states; __device__ collisions_buffer_t gcollision_states; // per-block buffer for trails __device__ work_buffer_t gtrailsout_buf[MAX_CUDA_BLOCKS]; __device__ work_control_t gtrailsout_ctl[MAX_CUDA_BLOCKS]; __shared__ work_control_t gtrailsout_ctlblock; // gpu-wide in- and out-put buffers collisions __device__ collisions_buffer_t gcollisionsin_buf; __device__ collisions_buffer_t gcollisionsout_buf; __device__ collisions_control_t gcollisionsin_ctl; __device__ collisions_control_t gcollisionsout_ctl; __device__ volatile uint32 halt_flag; __constant__ uint32 msg1[16], msg2[16], ihv1[4], ihv2[4], ihv2mod[4]; __constant__ uint32 precomp1[4], precomp2[4]; __constant__ uint32 hybridmask, distinguishedpointmask, maximumpathlength; class cuda_device_detail { public: uint32 device; uint32 blocks; uint32 threadsperblock; work_buffer_t* trailsout_buf; work_control_t* trailsout_ctl; // host-side buffer size_t nrcollisions_on_gpu; vector< pair<trail_type,trail_type> > collisions; collisions_buffer_t* collisionsin_buf; collisions_control_t* collisionsin_ctl; collisions_buffer_t* collisionsout_buf; collisions_control_t* collisionsout_ctl; }; /* F, G and H are basic MD5 functions: selection, majority, parity */ #define MD5_F(x, y, z) (((x) & (y)) | ((~x) & (z))) #define MD5_G(x, y, z) (((x) & (z)) | ((y) & (~z))) #define MD5_H(x, y, z) ((x) ^ (y) ^ (z)) #define MD5_I(x, y, z) ((y) ^ ((x) | (~z))) /* ROTATE_LEFT rotates x left n bits */ #define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n)))) /* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */ /* Rotation is separate from addition to prevent recomputation */ #define MD5_FF(a, b, c, d, x, s, ac) \ {(a) += MD5_F ((b), (c), (d)) + (x) + (uint32)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } #define MD5_GG(a, b, c, d, x, s, ac) \ {(a) += MD5_G ((b), (c), (d)) + (x) + (uint32)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } #define MD5_HH(a, b, c, d, x, s, ac) \ {(a) += MD5_H ((b), (c), (d)) + (x) + (uint32)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } #define MD5_II(a, b, c, d, x, s, ac) \ {(a) += MD5_I ((b), (c), (d)) + (x) + (uint32)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } __device__ void backup_controls() { __syncthreads(); if (threadIdx.x == 0) { gtrailsout_ctlblock = gtrailsout_ctl[blockIdx.x]; } __syncthreads(); } __device__ void restore_controls() { __syncthreads(); if (threadIdx.x == 0) { gtrailsout_ctl[blockIdx.x] = gtrailsout_ctlblock; } __syncthreads(); } __global__ void cuda_md5_init() { int idx = blockIdx.x * blockDim.x + threadIdx.x; gworking_states.get_ref<6>(idx) = 0; // len = 0 gcollision_states.get_ref<14>(idx) = 1; // bad = 1 if (threadIdx.x == 0) { gtrailsout_buf[blockIdx.x].reset(gtrailsout_ctl[blockIdx.x]); gcollisionsin_buf.reset(gcollisionsin_ctl); gcollisionsout_buf.reset(gcollisionsout_ctl); } } bool cuda_device::init(uint32 device, const uint32 ihv1b[4], const uint32 ihv2b[4], const uint32 ihv2modb[4], const uint32 msg1b[16], const uint32 msg2b[16], uint32 hmask, uint32 dpmask, uint32 maxlen) { detail = new cuda_device_detail; detail->device = device; int deviceCount; CUDA_SAFE_CALL( cudaGetDeviceCount(&deviceCount) ); if (deviceCount == 0) { cout << "There is no device supporting CUDA!" << endl; return false; } cudaDeviceProp deviceProp; CUDA_SAFE_CALL( cudaGetDeviceProperties(&deviceProp, device) ); if (deviceProp.major == 9999) { cout << "Emulation device found." << endl; return false; } cout << "CUDA device " << device << ": " << deviceProp.name << " (" << deviceProp.multiProcessorCount << " MPs)" << endl; unsigned maxthreadspermp = deviceProp.maxThreadsPerMultiProcessor; if (maxthreadspermp > MAX_CUDA_THREADS) maxthreadspermp = (MAX_CUDA_THREADS/32)*32; while (maxthreadspermp > deviceProp.regsPerMultiprocessor * REGISTERS_PER_CUDA_THREAD) maxthreadspermp -= 32; unsigned minblockspermp = 1; while (maxthreadspermp > minblockspermp * deviceProp.maxThreadsPerBlock) minblockspermp += 1; while (maxthreadspermp * REGISTERS_PER_CUDA_THREAD > minblockspermp * deviceProp.regsPerBlock) minblockspermp += 1; detail->threadsperblock = ((maxthreadspermp / minblockspermp) / 32) * 32; detail->blocks = minblockspermp * deviceProp.multiProcessorCount; cout << "Using " << detail->blocks << " blocks with " << detail->threadsperblock << " threads each: total " << detail->blocks * detail->threadsperblock << " threads." << endl; CUDA_SAFE_CALL( cudaSetDevice(device) ); // CUDA_SAFE_CALL( cudaSetDeviceFlags( cudaDeviceBlockingSync ) ); // work_buffer_t* trailsout_buf;//[MAX_CUDA_BLOCKS]; // work_control_t* trailsout_ctl;//[MAX_CUDA_BLOCKS]; // collisions_buffer_t* collisionsin_buf;//[MAX_CUDA_BLOCKS]; // collisions_control_t* collisionsin_ctl;//[MAX_CUDA_BLOCKS]; // collisions_buffer_t* collisionsout_buf;//[MAX_CUDA_BLOCKS]; // collisions_control_t* collisionsout_ctl;//[MAX_CUDA_BLOCKS]; CUDA_SAFE_CALL( cudaMallocHost( (void**)(&(detail->trailsout_buf)), detail->blocks * sizeof(work_buffer_t) ) ); CUDA_SAFE_CALL( cudaMallocHost( (void**)(&(detail->trailsout_ctl)), detail->blocks * sizeof(work_control_t) ) ); CUDA_SAFE_CALL( cudaMallocHost( (void**)(&(detail->collisionsin_buf)), sizeof(collisions_buffer_t) ) ); CUDA_SAFE_CALL( cudaMallocHost( (void**)(&(detail->collisionsin_ctl)), sizeof(collisions_control_t) ) ); CUDA_SAFE_CALL( cudaMallocHost( (void**)(&(detail->collisionsout_buf)), sizeof(collisions_buffer_t) ) ); CUDA_SAFE_CALL( cudaMallocHost( (void**)(&(detail->collisionsout_ctl)), sizeof(collisions_control_t) ) ); for (unsigned b = 0; b < detail->blocks; ++b) detail->trailsout_buf[b].reset(detail->trailsout_ctl[b]); detail->collisionsin_buf->reset(*(detail->collisionsin_ctl)); detail->collisionsout_buf->reset(*(detail->collisionsout_ctl)); detail->nrcollisions_on_gpu = 0; uint32 pc1[4], pc2[4]; uint32 a = ihv1b[0], b = ihv1b[1], c = ihv1b[2], d = ihv1b[3]; MD5_FF ( a, b, c, d, msg1b[ 0], 7, 3614090360); /* 1 */ MD5_FF ( d, a, b, c, msg1b[ 1], 12, 3905402710); /* 2 */ MD5_FF ( c, d, a, b, msg1b[ 2], 17, 606105819); /* 3 */ MD5_FF ( b, c, d, a, msg1b[ 3], 22, 3250441966); /* 4 */ MD5_FF ( a, b, c, d, msg1b[ 4], 7, 4118548399); /* 5 */ MD5_FF ( d, a, b, c, msg1b[ 5], 12, 1200080426); /* 6 */ MD5_FF ( c, d, a, b, msg1b[ 6], 17, 2821735955); /* 7 */ MD5_FF ( b, c, d, a, msg1b[ 7], 22, 4249261313); /* 8 */ MD5_FF ( a, b, c, d, msg1b[ 8], 7, 1770035416); /* 9 */ MD5_FF ( d, a, b, c, msg1b[ 9], 12, 2336552879); /* 10 */ MD5_FF ( c, d, a, b, msg1b[10], 17, 4294925233); /* 11 */ MD5_FF ( b, c, d, a, msg1b[11], 22, 2304563134); /* 12 */ MD5_FF ( a, b, c, d, msg1b[12], 7, 1804603682); /* 13 */ pc1[0] = a; pc1[1] = b; pc1[2] = c; pc1[3] = d; a = ihv2b[0]; b = ihv2b[1]; c = ihv2b[2]; d = ihv2b[3]; MD5_FF ( a, b, c, d, msg2b[ 0], 7, 3614090360); /* 1 */ MD5_FF ( d, a, b, c, msg2b[ 1], 12, 3905402710); /* 2 */ MD5_FF ( c, d, a, b, msg2b[ 2], 17, 606105819); /* 3 */ MD5_FF ( b, c, d, a, msg2b[ 3], 22, 3250441966); /* 4 */ MD5_FF ( a, b, c, d, msg2b[ 4], 7, 4118548399); /* 5 */ MD5_FF ( d, a, b, c, msg2b[ 5], 12, 1200080426); /* 6 */ MD5_FF ( c, d, a, b, msg2b[ 6], 17, 2821735955); /* 7 */ MD5_FF ( b, c, d, a, msg2b[ 7], 22, 4249261313); /* 8 */ MD5_FF ( a, b, c, d, msg2b[ 8], 7, 1770035416); /* 9 */ MD5_FF ( d, a, b, c, msg2b[ 9], 12, 2336552879); /* 10 */ MD5_FF ( c, d, a, b, msg2b[10], 17, 4294925233); /* 11 */ MD5_FF ( b, c, d, a, msg2b[11], 22, 2304563134); /* 12 */ MD5_FF ( a, b, c, d, msg2b[12], 7, 1804603682); /* 13 */ pc2[0] = a; pc2[1] = b; pc2[2] = c; pc2[3] = d; CUDA_SAFE_CALL( cudaMemcpyToSymbol(msg1, msg1b, sizeof(msg1)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(msg2, msg2b, sizeof(msg2)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(ihv1, ihv1b, sizeof(ihv1)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(ihv2, ihv2b, sizeof(ihv2)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(ihv2mod, ihv2modb, sizeof(ihv2mod)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(precomp1, pc1, sizeof(pc1)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(precomp2, pc2, sizeof(pc2)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(hybridmask, &hmask, sizeof(hmask)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(distinguishedpointmask, &dpmask, sizeof(dpmask)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(maximumpathlength, &maxlen, sizeof(maxlen)) ); cuda_md5_init<<<detail->blocks, detail->threadsperblock>>>(); return true; } template<bool mod = false> __device__ void cuda_md5_work2(uint64 seed) { halt_flag = 0; /********************* GENERATE TRAILS ***********************/ restore_controls(); const int idx = blockIdx.x * blockDim.x + threadIdx.x; uint32 len = gworking_states.get<6>(idx); uint32 x = gworking_states.get<3>(idx); //end[0] uint32 y = gworking_states.get<4>(idx); //end[1] uint32 z = gworking_states.get<5>(idx); //end[2] if (len >= maximumpathlength || len == 0) { x = uint32(seed>>32) ^ threadIdx.x; y = uint32(seed) ^ blockIdx.x; z = 0; gworking_states.get_ref<0>(idx) = x; gworking_states.get_ref<1>(idx) = y; gworking_states.get_ref<2>(idx) = z; len = 0; } for (unsigned j = 0; j < (1<<12); ++j) { { uint32* in = msg1; uint32 a = precomp1[0], b = precomp1[1], c = precomp1[2], d = precomp1[3]; if (x > y) { in = msg2; a = precomp2[0]; b = precomp2[1]; c = precomp2[2]; d = precomp2[3]; } MD5_FF ( d, a, b, c, z, 12, 4254626195); /* 14 */ MD5_FF ( c, d, a, b, x, 17, 2792965006); /* 15 */ MD5_FF ( b, c, d, a, y, 22, 1236535329); /* 16 */ MD5_GG ( a, b, c, d, in[ 1], 5, 4129170786); /* 17 */ MD5_GG ( d, a, b, c, in[ 6], 9, 3225465664); /* 18 */ MD5_GG ( c, d, a, b, in[11], 14, 643717713); /* 19 */ MD5_GG ( b, c, d, a, in[ 0], 20, 3921069994); /* 20 */ MD5_GG ( a, b, c, d, in[ 5], 5, 3593408605); /* 21 */ MD5_GG ( d, a, b, c, in[10], 9, 38016083); /* 22 */ MD5_GG ( c, d, a, b, y, 14, 3634488961); /* 23 */ MD5_GG ( b, c, d, a, in[ 4], 20, 3889429448); /* 24 */ MD5_GG ( a, b, c, d, in[ 9], 5, 568446438); /* 25 */ MD5_GG ( d, a, b, c, x, 9, 3275163606); /* 26 */ MD5_GG ( c, d, a, b, in[ 3], 14, 4107603335); /* 27 */ MD5_GG ( b, c, d, a, in[ 8], 20, 1163531501); /* 28 */ MD5_GG ( a, b, c, d, z, 5, 2850285829); /* 29 */ MD5_GG ( d, a, b, c, in[ 2], 9, 4243563512); /* 30 */ MD5_GG ( c, d, a, b, in[ 7], 14, 1735328473); /* 31 */ MD5_GG ( b, c, d, a, in[12], 20, 2368359562); /* 32 */ MD5_HH ( a, b, c, d, in[ 5], 4, 4294588738); /* 33 */ MD5_HH ( d, a, b, c, in[ 8], 11, 2272392833); /* 34 */ MD5_HH ( c, d, a, b, in[11], 16, 1839030562); /* 35 */ MD5_HH ( b, c, d, a, x, 23, 4259657740); /* 36 */ MD5_HH ( a, b, c, d, in[ 1], 4, 2763975236); /* 37 */ MD5_HH ( d, a, b, c, in[ 4], 11, 1272893353); /* 38 */ MD5_HH ( c, d, a, b, in[ 7], 16, 4139469664); /* 39 */ MD5_HH ( b, c, d, a, in[10], 23, 3200236656); /* 40 */ MD5_HH ( a, b, c, d, z, 4, 681279174); /* 41 */ MD5_HH ( d, a, b, c, in[ 0], 11, 3936430074); /* 42 */ MD5_HH ( c, d, a, b, in[ 3], 16, 3572445317); /* 43 */ MD5_HH ( b, c, d, a, in[ 6], 23, 76029189); /* 44 */ MD5_HH ( a, b, c, d, in[ 9], 4, 3654602809); /* 45 */ MD5_HH ( d, a, b, c, in[12], 11, 3873151461); /* 46 */ MD5_HH ( c, d, a, b, y, 16, 530742520); /* 47 */ MD5_HH ( b, c, d, a, in[ 2], 23, 3299628645); /* 48 */ MD5_II ( a, b, c, d, in[ 0], 6, 4096336452); /* 49 */ MD5_II ( d, a, b, c, in[ 7], 10, 1126891415); /* 50 */ MD5_II ( c, d, a, b, x, 15, 2878612391); /* 51 */ MD5_II ( b, c, d, a, in[ 5], 21, 4237533241); /* 52 */ MD5_II ( a, b, c, d, in[12], 6, 1700485571); /* 53 */ MD5_II ( d, a, b, c, in[ 3], 10, 2399980690); /* 54 */ MD5_II ( c, d, a, b, in[10], 15, 4293915773); /* 55 */ MD5_II ( b, c, d, a, in[ 1], 21, 2240044497); /* 56 */ MD5_II ( a, b, c, d, in[ 8], 6, 1873313359); /* 57 */ MD5_II ( d, a, b, c, y, 10, 4264355552); /* 58 */ MD5_II ( c, d, a, b, in[ 6], 15, 2734768916); /* 59 */ MD5_II ( b, c, d, a, z, 21, 1309151649); /* 60 */ MD5_II ( a, b, c, d, in[ 4], 6, 4149444226); /* 61 */ MD5_II ( d, a, b, c, in[11], 10, 3174756917); /* 62 */ MD5_II ( c, d, a, b, in[ 2], 15, 718787259); /* 63 */ MD5_II ( b, c, d, a, in[ 9], 21, 3951481745); /* 64 */ if (mod) { if (x <= y) { x = a + ihv1[0]; y = d + ihv1[3]; z = (c + ihv1[2]) & hybridmask; } else { x = a + ihv2mod[0]; y = d + ihv2mod[3]; z = (c + ihv2mod[2]) & hybridmask; } } else { if (x <= y) { a += ihv1[0]; b += ihv1[1]; c += ihv1[2]; d += ihv1[3]; } else { a += ihv2mod[0]; b += ihv2mod[1]; c += ihv2mod[2]; d += ihv2mod[3]; } x = a; y = d - c; z = (d - b) & hybridmask; } ++len; } { // conditionally write bool done = (0 == (x & distinguishedpointmask)); gtrailsout_buf[blockIdx.x].write(gtrailsout_ctlblock, done, gworking_states.get_ref<0>(idx), gworking_states.get_ref<1>(idx), gworking_states.get_ref<2>(idx), x, y, z, len ); if (done) { x = uint32(seed>>32) ^ (threadIdx.x<<16) + len; y = uint32(seed) ^ blockIdx.x; z = 0; len = 0; gworking_states.get_ref<0>(idx) = x; gworking_states.get_ref<1>(idx) = y; gworking_states.get_ref<2>(idx) = z; } } // __syncthreads(); } gworking_states.get_ref<3>(idx) = x; gworking_states.get_ref<4>(idx) = y; gworking_states.get_ref<5>(idx) = z; gworking_states.get_ref<6>(idx) = len; backup_controls(); halt_flag = 1; } template<bool mod = false> __global__ void cuda_md5_work(uint64 seed) { cuda_md5_work2<mod>(seed); } template<bool mod = false> __global__ void cuda_md5_collisions(uint64 seed) { halt_flag = 0; /********** PROCESS COLLIDING TRAILS INTO COLLISIONS ***************/ const int idx = blockIdx.x * blockDim.x + threadIdx.x; uint32 bad = gcollision_states.get<14>(idx); uint32 len = gcollision_states.get<6>(idx); uint32 len2 = gcollision_states.get<7+6>(idx); // if collision state is empty then go read a collision if (len == 0 || len2 == 0) bad = 1; uint32 readidx = gcollisionsin_buf.getreadidx(gcollisionsin_ctl,bad); if (bad && readidx < 0xEEEEEEEE) { len = gcollisionsin_buf.get<6>(readidx); len2 = gcollisionsin_buf.get<7+6>(readidx); gcollision_states.get_ref<0>(idx) = gcollisionsin_buf.get<0>(readidx); //start[0] gcollision_states.get_ref<1>(idx) = gcollisionsin_buf.get<1>(readidx); //start[1] gcollision_states.get_ref<2>(idx) = gcollisionsin_buf.get<2>(readidx); //start[2] gcollision_states.get_ref<3>(idx) = gcollisionsin_buf.get<0>(readidx); //start[0] gcollision_states.get_ref<4>(idx) = gcollisionsin_buf.get<1>(readidx); //start[1] gcollision_states.get_ref<5>(idx) = gcollisionsin_buf.get<2>(readidx); //start[2] gcollision_states.get_ref<6>(idx) = gcollisionsin_buf.get<6>(readidx); //len gcollision_states.get_ref<7+0>(idx) = gcollisionsin_buf.get<7+0>(readidx); gcollision_states.get_ref<7+1>(idx) = gcollisionsin_buf.get<7+1>(readidx); gcollision_states.get_ref<7+2>(idx) = gcollisionsin_buf.get<7+2>(readidx); gcollision_states.get_ref<7+3>(idx) = gcollisionsin_buf.get<7+0>(readidx); gcollision_states.get_ref<7+4>(idx) = gcollisionsin_buf.get<7+1>(readidx); gcollision_states.get_ref<7+5>(idx) = gcollisionsin_buf.get<7+2>(readidx); gcollision_states.get_ref<7+6>(idx) = gcollisionsin_buf.get<7+6>(readidx); gcollision_states.get_ref<14>(idx) = bad = 0; } if (__all_sync(WARP_FULL_MASK,bad)) { // cuda_md5_work2<mod>(seed); return; } for (unsigned j = 0; j < (1<<12); ++j) { // always process the longest uint32 x, y, z; if (len >= len2) { // process trail1 // load start+1, write to start x = gcollision_states.get<3>(idx); y = gcollision_states.get<4>(idx); z = gcollision_states.get<5>(idx); gcollision_states.get_ref<0>(idx) = x; gcollision_states.get_ref<1>(idx) = y; gcollision_states.get_ref<2>(idx) = z; } else { // process trail2 // load start+1, write to start x = gcollision_states.get<7+3>(idx); y = gcollision_states.get<7+4>(idx); z = gcollision_states.get<7+5>(idx); gcollision_states.get_ref<7+0>(idx) = x; gcollision_states.get_ref<7+1>(idx) = y; gcollision_states.get_ref<7+2>(idx) = z; } { uint32* in = msg1; uint32 a = precomp1[0], b = precomp1[1], c = precomp1[2], d = precomp1[3]; if (x > y) { in = msg2; a = precomp2[0]; b = precomp2[1]; c = precomp2[2]; d = precomp2[3]; } MD5_FF ( d, a, b, c, z, 12, 4254626195); /* 14 */ MD5_FF ( c, d, a, b, x, 17, 2792965006); /* 15 */ MD5_FF ( b, c, d, a, y, 22, 1236535329); /* 16 */ MD5_GG ( a, b, c, d, in[ 1], 5, 4129170786); /* 17 */ MD5_GG ( d, a, b, c, in[ 6], 9, 3225465664); /* 18 */ MD5_GG ( c, d, a, b, in[11], 14, 643717713); /* 19 */ MD5_GG ( b, c, d, a, in[ 0], 20, 3921069994); /* 20 */ MD5_GG ( a, b, c, d, in[ 5], 5, 3593408605); /* 21 */ MD5_GG ( d, a, b, c, in[10], 9, 38016083); /* 22 */ MD5_GG ( c, d, a, b, y, 14, 3634488961); /* 23 */ MD5_GG ( b, c, d, a, in[ 4], 20, 3889429448); /* 24 */ MD5_GG ( a, b, c, d, in[ 9], 5, 568446438); /* 25 */ MD5_GG ( d, a, b, c, x, 9, 3275163606); /* 26 */ MD5_GG ( c, d, a, b, in[ 3], 14, 4107603335); /* 27 */ MD5_GG ( b, c, d, a, in[ 8], 20, 1163531501); /* 28 */ MD5_GG ( a, b, c, d, z, 5, 2850285829); /* 29 */ MD5_GG ( d, a, b, c, in[ 2], 9, 4243563512); /* 30 */ MD5_GG ( c, d, a, b, in[ 7], 14, 1735328473); /* 31 */ MD5_GG ( b, c, d, a, in[12], 20, 2368359562); /* 32 */ MD5_HH ( a, b, c, d, in[ 5], 4, 4294588738); /* 33 */ MD5_HH ( d, a, b, c, in[ 8], 11, 2272392833); /* 34 */ MD5_HH ( c, d, a, b, in[11], 16, 1839030562); /* 35 */ MD5_HH ( b, c, d, a, x, 23, 4259657740); /* 36 */ MD5_HH ( a, b, c, d, in[ 1], 4, 2763975236); /* 37 */ MD5_HH ( d, a, b, c, in[ 4], 11, 1272893353); /* 38 */ MD5_HH ( c, d, a, b, in[ 7], 16, 4139469664); /* 39 */ MD5_HH ( b, c, d, a, in[10], 23, 3200236656); /* 40 */ MD5_HH ( a, b, c, d, z, 4, 681279174); /* 41 */ MD5_HH ( d, a, b, c, in[ 0], 11, 3936430074); /* 42 */ MD5_HH ( c, d, a, b, in[ 3], 16, 3572445317); /* 43 */ MD5_HH ( b, c, d, a, in[ 6], 23, 76029189); /* 44 */ MD5_HH ( a, b, c, d, in[ 9], 4, 3654602809); /* 45 */ MD5_HH ( d, a, b, c, in[12], 11, 3873151461); /* 46 */ MD5_HH ( c, d, a, b, y, 16, 530742520); /* 47 */ MD5_HH ( b, c, d, a, in[ 2], 23, 3299628645); /* 48 */ MD5_II ( a, b, c, d, in[ 0], 6, 4096336452); /* 49 */ MD5_II ( d, a, b, c, in[ 7], 10, 1126891415); /* 50 */ MD5_II ( c, d, a, b, x, 15, 2878612391); /* 51 */ MD5_II ( b, c, d, a, in[ 5], 21, 4237533241); /* 52 */ MD5_II ( a, b, c, d, in[12], 6, 1700485571); /* 53 */ MD5_II ( d, a, b, c, in[ 3], 10, 2399980690); /* 54 */ MD5_II ( c, d, a, b, in[10], 15, 4293915773); /* 55 */ MD5_II ( b, c, d, a, in[ 1], 21, 2240044497); /* 56 */ MD5_II ( a, b, c, d, in[ 8], 6, 1873313359); /* 57 */ MD5_II ( d, a, b, c, y, 10, 4264355552); /* 58 */ MD5_II ( c, d, a, b, in[ 6], 15, 2734768916); /* 59 */ MD5_II ( b, c, d, a, z, 21, 1309151649); /* 60 */ MD5_II ( a, b, c, d, in[ 4], 6, 4149444226); /* 61 */ MD5_II ( d, a, b, c, in[11], 10, 3174756917); /* 62 */ MD5_II ( c, d, a, b, in[ 2], 15, 718787259); /* 63 */ MD5_II ( b, c, d, a, in[ 9], 21, 3951481745); /* 64 */ if (mod) { if (x <= y) { x = a + ihv1[0]; y = d + ihv1[3]; z = (c + ihv1[2]) & hybridmask; } else { x = a + ihv2mod[0]; y = d + ihv2mod[3]; z = (c + ihv2mod[2]) & hybridmask; } } else { if (x <= y) { a += ihv1[0]; b += ihv1[1]; c += ihv1[2]; d += ihv1[3]; } else { a += ihv2mod[0]; b += ihv2mod[1]; c += ihv2mod[2]; d += ihv2mod[3]; } x = a; y = d - c; z = (d - b) & hybridmask; } } if (len >= len2) { // processed trail1 // write to end gcollision_states.get_ref<3>(idx) = x; gcollision_states.get_ref<4>(idx) = y; gcollision_states.get_ref<5>(idx) = z; if (len > 0) --len; } else { // processed trail2 // write to end gcollision_states.get_ref<7+3>(idx) = x; gcollision_states.get_ref<7+4>(idx) = y; gcollision_states.get_ref<7+5>(idx) = z; if (len2 > 0) --len2; } bool done = (bad == 0) && (len == 0 || len2 == 0 || ( (gcollision_states.get<3>(idx) == gcollision_states.get<7+3>(idx)) && (gcollision_states.get<4>(idx) == gcollision_states.get<7+4>(idx)) && (gcollision_states.get<5>(idx) == gcollision_states.get<7+5>(idx)) )); { if (done) { if (len > 0) len = 1; if (len2 > 0) len2 = 1; bad = 1; } // conditionally write result and load a new one gcollisionsout_buf.write(gcollisionsout_ctl, done, gcollision_states.get<0>(idx), gcollision_states.get<1>(idx), gcollision_states.get<2>(idx), gcollision_states.get<3>(idx), gcollision_states.get<4>(idx), gcollision_states.get<5>(idx), len, gcollision_states.get<7+0>(idx), gcollision_states.get<7+1>(idx), gcollision_states.get<7+2>(idx), gcollision_states.get<7+3>(idx), gcollision_states.get<7+4>(idx), gcollision_states.get<7+5>(idx), len2); } if (4 <= __popc(__ballot_sync(WARP_FULL_MASK,bad))) { uint32 readidx = gcollisionsin_buf.getreadidx(gcollisionsin_ctl, bad); if (bad && readidx < 0xEEEEEEEE) { len = gcollisionsin_buf.get<6>(readidx); len2 = gcollisionsin_buf.get<7+6>(readidx); gcollision_states.get_ref<0>(idx) = gcollisionsin_buf.get<0>(readidx); gcollision_states.get_ref<1>(idx) = gcollisionsin_buf.get<1>(readidx); gcollision_states.get_ref<2>(idx) = gcollisionsin_buf.get<2>(readidx); gcollision_states.get_ref<3>(idx) = gcollisionsin_buf.get<0>(readidx); gcollision_states.get_ref<4>(idx) = gcollisionsin_buf.get<1>(readidx); gcollision_states.get_ref<5>(idx) = gcollisionsin_buf.get<2>(readidx); gcollision_states.get_ref<6>(idx) = gcollisionsin_buf.get<6>(readidx); gcollision_states.get_ref<7+0>(idx) = gcollisionsin_buf.get<7+0>(readidx); gcollision_states.get_ref<7+1>(idx) = gcollisionsin_buf.get<7+1>(readidx); gcollision_states.get_ref<7+2>(idx) = gcollisionsin_buf.get<7+2>(readidx); gcollision_states.get_ref<7+3>(idx) = gcollisionsin_buf.get<7+0>(readidx); gcollision_states.get_ref<7+4>(idx) = gcollisionsin_buf.get<7+1>(readidx); gcollision_states.get_ref<7+5>(idx) = gcollisionsin_buf.get<7+2>(readidx); gcollision_states.get_ref<7+6>(idx) = gcollisionsin_buf.get<7+6>(readidx); gcollision_states.get_ref<14>(idx) = bad = 0; } } if (__all_sync(WARP_FULL_MASK,bad)) break; if (__shfl_sync(WARP_FULL_MASK,halt_flag,0)) // read global halt flag together and halt if set break; // __syncthreads(); } gcollision_states.get_ref<6>(idx) = len; gcollision_states.get_ref<7+6>(idx) = len2; gcollision_states.get_ref<14>(idx) = bad; } void cuda_device::cuda_fill_trail_buffer(uint32 id, uint64 seed, vector<trail_type>& buf, vector< pair<trail_type,trail_type> >& collisions, bool mod) { // CUDA_SAFE_CALL( cudaMallocHost( (void**)(&(detail->trailsout_buf)), detail->blocks * sizeof(work_buffer_t) ) ); // CUDA_SAFE_CALL( cudaMallocHost( (void**)(&(detail->trailsout_ctl)), detail->blocks * sizeof(work_control_t) ) ); // move all collisions into buffer for (auto& c : collisions) detail->collisions.emplace_back(c); collisions.clear(); // if collisions buffer is big enough then actually launch it uint32 collisionblocks = 0; if (detail->collisions.size()) { size_t oldsize = detail->collisions.size(); // store input collisions to GPU by writing to host buffer // and sending it to GPU, we only move the control back and forth uint32 count = detail->collisions.size(); // don't overwrite collision data still in the buffer if (count >= detail->collisionsin_ctl->free_count()) count = detail->collisionsin_ctl->free_count(); if (count > 0) count -= 1; for (std::size_t i = 0; i < count; ++i) { detail->collisionsin_buf->write(*(detail->collisionsin_ctl), true, detail->collisions[i].first.start[0], detail->collisions[i].first.start[1], detail->collisions[i].first.start[2], detail->collisions[i].first.end[0], detail->collisions[i].first.end[1], detail->collisions[i].first.end[2], detail->collisions[i].first.len, detail->collisions[i].second.start[0], detail->collisions[i].second.start[1], detail->collisions[i].second.start[2], detail->collisions[i].second.end[0], detail->collisions[i].second.end[1], detail->collisions[i].second.end[2], detail->collisions[i].second.len); } detail->collisions.erase(detail->collisions.begin(), detail->collisions.begin() + count); detail->nrcollisions_on_gpu += count; // determine how many cuda blocks to start for collision collisionblocks = (detail->nrcollisions_on_gpu / detail->threadsperblock)/2; if (collisionblocks > detail->blocks) collisionblocks = detail->blocks; // only copy data to GPU when we're actually going to run GPU code if (collisionblocks > 0) { // send control and buffer structures to GPU cudaMemcpyToSymbol(gcollisionsin_ctl, detail->collisionsin_ctl, sizeof(collisions_control_t)); cudaMemcpyToSymbol(gcollisionsin_buf, detail->collisionsin_buf, sizeof(collisions_buffer_t)); } if (0) std::cout << "C: " << oldsize << " " << detail->collisions.size() << " " << detail->collisionsin_ctl->used_count() << " " << detail->collisionsin_ctl->free_count() << " " << collisionblocks << " " << detail->blocks << std::endl; } // send control structures to GPU cudaMemcpyToSymbol(gtrailsout_ctl, detail->trailsout_ctl, detail->blocks * sizeof(work_control_t)); // retrieve store buffers from GPU cudaMemcpyToSymbol(gtrailsout_buf, detail->trailsout_buf, detail->blocks * sizeof(work_buffer_t)); // run GPU code if (mod) { cuda_md5_work<true><<<detail->blocks - collisionblocks, detail->threadsperblock>>>(seed); cuda_md5_collisions<true><<< collisionblocks, detail->threadsperblock>>>(seed); } else { cuda_md5_work<false><<<detail->blocks - collisionblocks, detail->threadsperblock>>>(seed); cuda_md5_collisions<false><<< collisionblocks, detail->threadsperblock>>>(seed); } // retrieve store buffers from GPU cudaMemcpyFromSymbol(detail->trailsout_buf, gtrailsout_buf, detail->blocks * sizeof(work_buffer_t)); // retrieve control structures from GPU cudaMemcpyFromSymbol(detail->trailsout_ctl, gtrailsout_ctl, detail->blocks * sizeof(work_control_t)); /* std::cout << detail->trailsout_ctl[0].write_idx << " " << detail->trailsout_ctl[0].read_idx << std::endl; */ // if we started a collision processing cuda job then process its output if (collisionblocks > 0) { cudaMemcpyFromSymbol(detail->collisionsout_buf, gcollisionsout_buf, sizeof(collisions_buffer_t)); cudaMemcpyFromSymbol(detail->collisionsin_ctl, gcollisionsin_ctl, sizeof(collisions_control_t)); cudaMemcpyFromSymbol(detail->collisionsout_ctl, gcollisionsout_ctl, sizeof(collisions_control_t)); uint32 readidx; while ((readidx=detail->collisionsout_buf->getreadidx(*(detail->collisionsout_ctl))) < 0xEEEEEEEE) { --detail->nrcollisions_on_gpu; collisions.emplace_back(); trail_type& first = collisions.back().first; trail_type& second = collisions.back().second; first.start[0] = detail->collisionsout_buf->get<0>(readidx); first.start[1] = detail->collisionsout_buf->get<1>(readidx); first.start[2] = detail->collisionsout_buf->get<2>(readidx); first.end[0] = detail->collisionsout_buf->get<3>(readidx); first.end[1] = detail->collisionsout_buf->get<4>(readidx); first.end[2] = detail->collisionsout_buf->get<5>(readidx); first.len = detail->collisionsout_buf->get<6>(readidx); second.start[0] = detail->collisionsout_buf->get<7+0>(readidx); second.start[1] = detail->collisionsout_buf->get<7+1>(readidx); second.start[2] = detail->collisionsout_buf->get<7+2>(readidx); second.end[0] = detail->collisionsout_buf->get<7+3>(readidx); second.end[1] = detail->collisionsout_buf->get<7+4>(readidx); second.end[2] = detail->collisionsout_buf->get<7+5>(readidx); second.len = detail->collisionsout_buf->get<7+6>(readidx); } cudaMemcpyToSymbol(gcollisionsout_ctl, detail->collisionsout_ctl, sizeof(collisions_control_t)); } // process and return results buf.clear(); for (unsigned b = 0; b < detail->blocks; ++b) { uint32 readidx; trail_type trail; while ((readidx=detail->trailsout_buf[b].getreadidx(detail->trailsout_ctl[b])) != 0xFFFFFFFF) { trail.start[0] = detail->trailsout_buf[b].get<0>(readidx); trail.start[1] = detail->trailsout_buf[b].get<1>(readidx); trail.start[2] = detail->trailsout_buf[b].get<2>(readidx); trail.end[0] = detail->trailsout_buf[b].get<3>(readidx); trail.end[1] = detail->trailsout_buf[b].get<4>(readidx); trail.end[2] = detail->trailsout_buf[b].get<5>(readidx); trail.len = detail->trailsout_buf[b].get<6>(readidx); buf.push_back(trail); } } // std::cout << "B " << buf.size() << std::endl; } #ifdef _WIN32 #include <windows.h> #else #include <sys/time.h> #endif class timer_detail; class timer { public: timer(bool direct_start = false); ~timer(); void start(); void stop(); double time() const;// get time between start and stop (or now if still running) in seconds bool isrunning() const { return running; } // check if timer is running private: timer_detail* detail; bool running; }; class timer_detail { public: #ifdef _WIN32 LARGE_INTEGER tstart, tend; double freq; #else struct timeval tstart, tend; struct timezone tz; #endif }; timer::~timer() { delete detail; } timer::timer(bool direct_start): running(false) { detail = new timer_detail; #ifdef _WIN32 LARGE_INTEGER tmp_freq; QueryPerformanceFrequency(&tmp_freq); detail->freq = double(tmp_freq.QuadPart); #endif if (direct_start) start(); } #ifdef _WIN32 void timer::start() { running = true; QueryPerformanceCounter(&detail->tstart); } void timer::stop() { QueryPerformanceCounter(&detail->tend); running = false; } double timer::time() const { if (running) { LARGE_INTEGER tmp_end; QueryPerformanceCounter(&tmp_end); return (double(tmp_end.QuadPart) - double(detail->tstart.QuadPart))/detail->freq; } else return (double(detail->tend.QuadPart) - double(detail->tstart.QuadPart))/detail->freq; } #else void timer::start() { running = true; gettimeofday(&detail->tstart, &detail->tz); } void timer::stop() { gettimeofday(&detail->tend, &detail->tz); running = false; } double timer::time() const { double t1 = double(detail->tstart.tv_sec) + (double(detail->tstart.tv_usec)/1e6); if (running) { struct timeval tmp_end; gettimeofday(&tmp_end, &detail->tz); return double(tmp_end.tv_sec) + (double(tmp_end.tv_usec)/1e6) - t1; } else return double(detail->tend.tv_sec) + (double(detail->tend.tv_usec)/1e6) - t1; } #endif void cuda_device::benchmark() { /* timer sw; for (int blocksize = 4; blocksize <= 256; ++blocksize) for (int threadsize = 250; threadsize <= 257; ++threadsize) { sw.start(); uint64 work = 0; while (sw.time() < 10) { cuda_md5_work<<<blocksize, threadsize>>>(0); cudaMemcpyFromSymbol(detail->buffer_host, buffer2, sizeof(trail_type)*blocksize*threadsize); ++work; } uint64 ow = work; work *= 0x400 * blocksize * threadsize; cout << blocksize << "x" << threadsize << ":\t" << work << " (" << ow << ")" << endl; } */ } int get_num_cuda_devices() { int deviceCount = 0; cutilSafeCall(cudaGetDeviceCount(&deviceCount)); return deviceCount; } void cuda_device_query() { int deviceCount = 0; cutilSafeCall(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) printf("There is no device supporting CUDA\n"); int dev; for (dev = 0; dev < deviceCount; ++dev) { cudaDeviceProp deviceProp; cutilSafeCall(cudaGetDeviceProperties(&deviceProp, dev)); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) printf("There is no device supporting CUDA.\n"); else if (deviceCount == 1) printf("There is 1 device supporting CUDA\n"); else printf("There are %d devices supporting CUDA\n", deviceCount); } printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name); printf(" Major revision number: %d\n", deviceProp.major); printf(" Minor revision number: %d\n", deviceProp.minor); printf(" Total amount of global memory: %u bytes\n", deviceProp.totalGlobalMem); #if CUDART_VERSION >= 2000 printf(" Number of multiprocessors: %d\n", deviceProp.multiProcessorCount); printf(" Number of cores: %d\n", 8 * deviceProp.multiProcessorCount); #endif printf(" Total amount of constant memory: %u bytes\n", deviceProp.totalConstMem); printf(" Total amount of shared memory per block: %u bytes\n", deviceProp.sharedMemPerBlock); printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Warp size: %d\n", deviceProp.warpSize); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" Maximum memory pitch: %u bytes\n", deviceProp.memPitch); printf(" Texture alignment: %u bytes\n", deviceProp.textureAlignment); printf(" Clock rate: %.2f GHz\n", deviceProp.clockRate * 1e-6f); #if CUDART_VERSION >= 2000 printf(" Concurrent copy and execution: %s\n", deviceProp.deviceOverlap ? "Yes" : "No"); #endif } }
the_stack
#ifndef HAVE_OPENCV_CUDEV #error "opencv_cudev is required" #else #include "opencv2/cudaarithm.hpp" #include "opencv2/cudev.hpp" #include "opencv2/core/private.cuda.hpp" using namespace cv; using namespace cv::cuda; using namespace cv::cudev; namespace { template <typename T1, typename T2, typename D, typename S> struct AddWeightedOp : binary_function<T1, T2, D> { S alpha; S beta; S gamma; __device__ __forceinline__ D operator ()(T1 a, T2 b) const { return cudev::saturate_cast<D>(a * alpha + b * beta + gamma); } }; template <typename ScalarDepth> struct TransformPolicy : DefaultTransformPolicy { }; template <> struct TransformPolicy<double> : DefaultTransformPolicy { enum { shift = 1 }; }; template <typename T1, typename T2, typename D> void addWeightedImpl(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst, Stream& stream) { typedef typename LargerType<T1, T2>::type larger_type1; typedef typename LargerType<larger_type1, D>::type larger_type2; typedef typename LargerType<larger_type2, float>::type scalar_type; AddWeightedOp<T1, T2, D, scalar_type> op; op.alpha = static_cast<scalar_type>(alpha); op.beta = static_cast<scalar_type>(beta); op.gamma = static_cast<scalar_type>(gamma); gridTransformBinary_< TransformPolicy<scalar_type> >(globPtr<T1>(src1), globPtr<T2>(src2), globPtr<D>(dst), op, stream); } } void cv::cuda::addWeighted(InputArray _src1, double alpha, InputArray _src2, double beta, double gamma, OutputArray _dst, int ddepth, Stream& stream) { typedef void (*func_t)(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst, Stream& stream); static const func_t funcs[7][7][7] = { { { addWeightedImpl<uchar, uchar, uchar >, addWeightedImpl<uchar, uchar, schar >, addWeightedImpl<uchar, uchar, ushort>, addWeightedImpl<uchar, uchar, short >, addWeightedImpl<uchar, uchar, int >, addWeightedImpl<uchar, uchar, float >, addWeightedImpl<uchar, uchar, double> }, { addWeightedImpl<uchar, schar, uchar >, addWeightedImpl<uchar, schar, schar >, addWeightedImpl<uchar, schar, ushort>, addWeightedImpl<uchar, schar, short >, addWeightedImpl<uchar, schar, int >, addWeightedImpl<uchar, schar, float >, addWeightedImpl<uchar, schar, double> }, { addWeightedImpl<uchar, ushort, uchar >, addWeightedImpl<uchar, ushort, schar >, addWeightedImpl<uchar, ushort, ushort>, addWeightedImpl<uchar, ushort, short >, addWeightedImpl<uchar, ushort, int >, addWeightedImpl<uchar, ushort, float >, addWeightedImpl<uchar, ushort, double> }, { addWeightedImpl<uchar, short, uchar >, addWeightedImpl<uchar, short, schar >, addWeightedImpl<uchar, short, ushort>, addWeightedImpl<uchar, short, short >, addWeightedImpl<uchar, short, int >, addWeightedImpl<uchar, short, float >, addWeightedImpl<uchar, short, double> }, { addWeightedImpl<uchar, int, uchar >, addWeightedImpl<uchar, int, schar >, addWeightedImpl<uchar, int, ushort>, addWeightedImpl<uchar, int, short >, addWeightedImpl<uchar, int, int >, addWeightedImpl<uchar, int, float >, addWeightedImpl<uchar, int, double> }, { addWeightedImpl<uchar, float, uchar >, addWeightedImpl<uchar, float, schar >, addWeightedImpl<uchar, float, ushort>, addWeightedImpl<uchar, float, short >, addWeightedImpl<uchar, float, int >, addWeightedImpl<uchar, float, float >, addWeightedImpl<uchar, float, double> }, { addWeightedImpl<uchar, double, uchar >, addWeightedImpl<uchar, double, schar >, addWeightedImpl<uchar, double, ushort>, addWeightedImpl<uchar, double, short >, addWeightedImpl<uchar, double, int >, addWeightedImpl<uchar, double, float >, addWeightedImpl<uchar, double, double> } }, { { 0/*addWeightedImpl<schar, uchar, uchar >*/, 0/*addWeightedImpl<schar, uchar, schar >*/, 0/*addWeightedImpl<schar, uchar, ushort>*/, 0/*addWeightedImpl<schar, uchar, short >*/, 0/*addWeightedImpl<schar, uchar, int >*/, 0/*addWeightedImpl<schar, uchar, float >*/, 0/*addWeightedImpl<schar, uchar, double>*/ }, { addWeightedImpl<schar, schar, uchar >, addWeightedImpl<schar, schar, schar >, addWeightedImpl<schar, schar, ushort>, addWeightedImpl<schar, schar, short >, addWeightedImpl<schar, schar, int >, addWeightedImpl<schar, schar, float >, addWeightedImpl<schar, schar, double> }, { addWeightedImpl<schar, ushort, uchar >, addWeightedImpl<schar, ushort, schar >, addWeightedImpl<schar, ushort, ushort>, addWeightedImpl<schar, ushort, short >, addWeightedImpl<schar, ushort, int >, addWeightedImpl<schar, ushort, float >, addWeightedImpl<schar, ushort, double> }, { addWeightedImpl<schar, short, uchar >, addWeightedImpl<schar, short, schar >, addWeightedImpl<schar, short, ushort>, addWeightedImpl<schar, short, short >, addWeightedImpl<schar, short, int >, addWeightedImpl<schar, short, float >, addWeightedImpl<schar, short, double> }, { addWeightedImpl<schar, int, uchar >, addWeightedImpl<schar, int, schar >, addWeightedImpl<schar, int, ushort>, addWeightedImpl<schar, int, short >, addWeightedImpl<schar, int, int >, addWeightedImpl<schar, int, float >, addWeightedImpl<schar, int, double> }, { addWeightedImpl<schar, float, uchar >, addWeightedImpl<schar, float, schar >, addWeightedImpl<schar, float, ushort>, addWeightedImpl<schar, float, short >, addWeightedImpl<schar, float, int >, addWeightedImpl<schar, float, float >, addWeightedImpl<schar, float, double> }, { addWeightedImpl<schar, double, uchar >, addWeightedImpl<schar, double, schar >, addWeightedImpl<schar, double, ushort>, addWeightedImpl<schar, double, short >, addWeightedImpl<schar, double, int >, addWeightedImpl<schar, double, float >, addWeightedImpl<schar, double, double> } }, { { 0/*addWeightedImpl<ushort, uchar, uchar >*/, 0/*addWeightedImpl<ushort, uchar, schar >*/, 0/*addWeightedImpl<ushort, uchar, ushort>*/, 0/*addWeightedImpl<ushort, uchar, short >*/, 0/*addWeightedImpl<ushort, uchar, int >*/, 0/*addWeightedImpl<ushort, uchar, float >*/, 0/*addWeightedImpl<ushort, uchar, double>*/ }, { 0/*addWeightedImpl<ushort, schar, uchar >*/, 0/*addWeightedImpl<ushort, schar, schar >*/, 0/*addWeightedImpl<ushort, schar, ushort>*/, 0/*addWeightedImpl<ushort, schar, short >*/, 0/*addWeightedImpl<ushort, schar, int >*/, 0/*addWeightedImpl<ushort, schar, float >*/, 0/*addWeightedImpl<ushort, schar, double>*/ }, { addWeightedImpl<ushort, ushort, uchar >, addWeightedImpl<ushort, ushort, schar >, addWeightedImpl<ushort, ushort, ushort>, addWeightedImpl<ushort, ushort, short >, addWeightedImpl<ushort, ushort, int >, addWeightedImpl<ushort, ushort, float >, addWeightedImpl<ushort, ushort, double> }, { addWeightedImpl<ushort, short, uchar >, addWeightedImpl<ushort, short, schar >, addWeightedImpl<ushort, short, ushort>, addWeightedImpl<ushort, short, short >, addWeightedImpl<ushort, short, int >, addWeightedImpl<ushort, short, float >, addWeightedImpl<ushort, short, double> }, { addWeightedImpl<ushort, int, uchar >, addWeightedImpl<ushort, int, schar >, addWeightedImpl<ushort, int, ushort>, addWeightedImpl<ushort, int, short >, addWeightedImpl<ushort, int, int >, addWeightedImpl<ushort, int, float >, addWeightedImpl<ushort, int, double> }, { addWeightedImpl<ushort, float, uchar >, addWeightedImpl<ushort, float, schar >, addWeightedImpl<ushort, float, ushort>, addWeightedImpl<ushort, float, short >, addWeightedImpl<ushort, float, int >, addWeightedImpl<ushort, float, float >, addWeightedImpl<ushort, float, double> }, { addWeightedImpl<ushort, double, uchar >, addWeightedImpl<ushort, double, schar >, addWeightedImpl<ushort, double, ushort>, addWeightedImpl<ushort, double, short >, addWeightedImpl<ushort, double, int >, addWeightedImpl<ushort, double, float >, addWeightedImpl<ushort, double, double> } }, { { 0/*addWeightedImpl<short, uchar, uchar >*/, 0/*addWeightedImpl<short, uchar, schar >*/, 0/*addWeightedImpl<short, uchar, ushort>*/, 0/*addWeightedImpl<short, uchar, short >*/, 0/*addWeightedImpl<short, uchar, int >*/, 0/*addWeightedImpl<short, uchar, float >*/, 0/*addWeightedImpl<short, uchar, double>*/ }, { 0/*addWeightedImpl<short, schar, uchar >*/, 0/*addWeightedImpl<short, schar, schar >*/, 0/*addWeightedImpl<short, schar, ushort>*/, 0/*addWeightedImpl<short, schar, short >*/, 0/*addWeightedImpl<short, schar, int >*/, 0/*addWeightedImpl<short, schar, float >*/, 0/*addWeightedImpl<short, schar, double>*/ }, { 0/*addWeightedImpl<short, ushort, uchar >*/, 0/*addWeightedImpl<short, ushort, schar >*/, 0/*addWeightedImpl<short, ushort, ushort>*/, 0/*addWeightedImpl<short, ushort, short >*/, 0/*addWeightedImpl<short, ushort, int >*/, 0/*addWeightedImpl<short, ushort, float >*/, 0/*addWeightedImpl<short, ushort, double>*/ }, { addWeightedImpl<short, short, uchar >, addWeightedImpl<short, short, schar >, addWeightedImpl<short, short, ushort>, addWeightedImpl<short, short, short >, addWeightedImpl<short, short, int >, addWeightedImpl<short, short, float >, addWeightedImpl<short, short, double> }, { addWeightedImpl<short, int, uchar >, addWeightedImpl<short, int, schar >, addWeightedImpl<short, int, ushort>, addWeightedImpl<short, int, short >, addWeightedImpl<short, int, int >, addWeightedImpl<short, int, float >, addWeightedImpl<short, int, double> }, { addWeightedImpl<short, float, uchar >, addWeightedImpl<short, float, schar >, addWeightedImpl<short, float, ushort>, addWeightedImpl<short, float, short >, addWeightedImpl<short, float, int >, addWeightedImpl<short, float, float >, addWeightedImpl<short, float, double> }, { addWeightedImpl<short, double, uchar >, addWeightedImpl<short, double, schar >, addWeightedImpl<short, double, ushort>, addWeightedImpl<short, double, short >, addWeightedImpl<short, double, int >, addWeightedImpl<short, double, float >, addWeightedImpl<short, double, double> } }, { { 0/*addWeightedImpl<int, uchar, uchar >*/, 0/*addWeightedImpl<int, uchar, schar >*/, 0/*addWeightedImpl<int, uchar, ushort>*/, 0/*addWeightedImpl<int, uchar, short >*/, 0/*addWeightedImpl<int, uchar, int >*/, 0/*addWeightedImpl<int, uchar, float >*/, 0/*addWeightedImpl<int, uchar, double>*/ }, { 0/*addWeightedImpl<int, schar, uchar >*/, 0/*addWeightedImpl<int, schar, schar >*/, 0/*addWeightedImpl<int, schar, ushort>*/, 0/*addWeightedImpl<int, schar, short >*/, 0/*addWeightedImpl<int, schar, int >*/, 0/*addWeightedImpl<int, schar, float >*/, 0/*addWeightedImpl<int, schar, double>*/ }, { 0/*addWeightedImpl<int, ushort, uchar >*/, 0/*addWeightedImpl<int, ushort, schar >*/, 0/*addWeightedImpl<int, ushort, ushort>*/, 0/*addWeightedImpl<int, ushort, short >*/, 0/*addWeightedImpl<int, ushort, int >*/, 0/*addWeightedImpl<int, ushort, float >*/, 0/*addWeightedImpl<int, ushort, double>*/ }, { 0/*addWeightedImpl<int, short, uchar >*/, 0/*addWeightedImpl<int, short, schar >*/, 0/*addWeightedImpl<int, short, ushort>*/, 0/*addWeightedImpl<int, short, short >*/, 0/*addWeightedImpl<int, short, int >*/, 0/*addWeightedImpl<int, short, float >*/, 0/*addWeightedImpl<int, short, double>*/ }, { addWeightedImpl<int, int, uchar >, addWeightedImpl<int, int, schar >, addWeightedImpl<int, int, ushort>, addWeightedImpl<int, int, short >, addWeightedImpl<int, int, int >, addWeightedImpl<int, int, float >, addWeightedImpl<int, int, double> }, { addWeightedImpl<int, float, uchar >, addWeightedImpl<int, float, schar >, addWeightedImpl<int, float, ushort>, addWeightedImpl<int, float, short >, addWeightedImpl<int, float, int >, addWeightedImpl<int, float, float >, addWeightedImpl<int, float, double> }, { addWeightedImpl<int, double, uchar >, addWeightedImpl<int, double, schar >, addWeightedImpl<int, double, ushort>, addWeightedImpl<int, double, short >, addWeightedImpl<int, double, int >, addWeightedImpl<int, double, float >, addWeightedImpl<int, double, double> } }, { { 0/*addWeightedImpl<float, uchar, uchar >*/, 0/*addWeightedImpl<float, uchar, schar >*/, 0/*addWeightedImpl<float, uchar, ushort>*/, 0/*addWeightedImpl<float, uchar, short >*/, 0/*addWeightedImpl<float, uchar, int >*/, 0/*addWeightedImpl<float, uchar, float >*/, 0/*addWeightedImpl<float, uchar, double>*/ }, { 0/*addWeightedImpl<float, schar, uchar >*/, 0/*addWeightedImpl<float, schar, schar >*/, 0/*addWeightedImpl<float, schar, ushort>*/, 0/*addWeightedImpl<float, schar, short >*/, 0/*addWeightedImpl<float, schar, int >*/, 0/*addWeightedImpl<float, schar, float >*/, 0/*addWeightedImpl<float, schar, double>*/ }, { 0/*addWeightedImpl<float, ushort, uchar >*/, 0/*addWeightedImpl<float, ushort, schar >*/, 0/*addWeightedImpl<float, ushort, ushort>*/, 0/*addWeightedImpl<float, ushort, short >*/, 0/*addWeightedImpl<float, ushort, int >*/, 0/*addWeightedImpl<float, ushort, float >*/, 0/*addWeightedImpl<float, ushort, double>*/ }, { 0/*addWeightedImpl<float, short, uchar >*/, 0/*addWeightedImpl<float, short, schar >*/, 0/*addWeightedImpl<float, short, ushort>*/, 0/*addWeightedImpl<float, short, short >*/, 0/*addWeightedImpl<float, short, int >*/, 0/*addWeightedImpl<float, short, float >*/, 0/*addWeightedImpl<float, short, double>*/ }, { 0/*addWeightedImpl<float, int, uchar >*/, 0/*addWeightedImpl<float, int, schar >*/, 0/*addWeightedImpl<float, int, ushort>*/, 0/*addWeightedImpl<float, int, short >*/, 0/*addWeightedImpl<float, int, int >*/, 0/*addWeightedImpl<float, int, float >*/, 0/*addWeightedImpl<float, int, double>*/ }, { addWeightedImpl<float, float, uchar >, addWeightedImpl<float, float, schar >, addWeightedImpl<float, float, ushort>, addWeightedImpl<float, float, short >, addWeightedImpl<float, float, int >, addWeightedImpl<float, float, float >, addWeightedImpl<float, float, double> }, { addWeightedImpl<float, double, uchar >, addWeightedImpl<float, double, schar >, addWeightedImpl<float, double, ushort>, addWeightedImpl<float, double, short >, addWeightedImpl<float, double, int >, addWeightedImpl<float, double, float >, addWeightedImpl<float, double, double> } }, { { 0/*addWeightedImpl<double, uchar, uchar >*/, 0/*addWeightedImpl<double, uchar, schar >*/, 0/*addWeightedImpl<double, uchar, ushort>*/, 0/*addWeightedImpl<double, uchar, short >*/, 0/*addWeightedImpl<double, uchar, int >*/, 0/*addWeightedImpl<double, uchar, float >*/, 0/*addWeightedImpl<double, uchar, double>*/ }, { 0/*addWeightedImpl<double, schar, uchar >*/, 0/*addWeightedImpl<double, schar, schar >*/, 0/*addWeightedImpl<double, schar, ushort>*/, 0/*addWeightedImpl<double, schar, short >*/, 0/*addWeightedImpl<double, schar, int >*/, 0/*addWeightedImpl<double, schar, float >*/, 0/*addWeightedImpl<double, schar, double>*/ }, { 0/*addWeightedImpl<double, ushort, uchar >*/, 0/*addWeightedImpl<double, ushort, schar >*/, 0/*addWeightedImpl<double, ushort, ushort>*/, 0/*addWeightedImpl<double, ushort, short >*/, 0/*addWeightedImpl<double, ushort, int >*/, 0/*addWeightedImpl<double, ushort, float >*/, 0/*addWeightedImpl<double, ushort, double>*/ }, { 0/*addWeightedImpl<double, short, uchar >*/, 0/*addWeightedImpl<double, short, schar >*/, 0/*addWeightedImpl<double, short, ushort>*/, 0/*addWeightedImpl<double, short, short >*/, 0/*addWeightedImpl<double, short, int >*/, 0/*addWeightedImpl<double, short, float >*/, 0/*addWeightedImpl<double, short, double>*/ }, { 0/*addWeightedImpl<double, int, uchar >*/, 0/*addWeightedImpl<double, int, schar >*/, 0/*addWeightedImpl<double, int, ushort>*/, 0/*addWeightedImpl<double, int, short >*/, 0/*addWeightedImpl<double, int, int >*/, 0/*addWeightedImpl<double, int, float >*/, 0/*addWeightedImpl<double, int, double>*/ }, { 0/*addWeightedImpl<double, float, uchar >*/, 0/*addWeightedImpl<double, float, schar >*/, 0/*addWeightedImpl<double, float, ushort>*/, 0/*addWeightedImpl<double, float, short >*/, 0/*addWeightedImpl<double, float, int >*/, 0/*addWeightedImpl<double, float, float >*/, 0/*addWeightedImpl<double, float, double>*/ }, { addWeightedImpl<double, double, uchar >, addWeightedImpl<double, double, schar >, addWeightedImpl<double, double, ushort>, addWeightedImpl<double, double, short >, addWeightedImpl<double, double, int >, addWeightedImpl<double, double, float >, addWeightedImpl<double, double, double> } } }; GpuMat src1 = getInputMat(_src1, stream); GpuMat src2 = getInputMat(_src2, stream); int sdepth1 = src1.depth(); int sdepth2 = src2.depth(); ddepth = ddepth >= 0 ? CV_MAT_DEPTH(ddepth) : std::max(sdepth1, sdepth2); const int cn = src1.channels(); CV_Assert( src2.size() == src1.size() && src2.channels() == cn ); CV_Assert( sdepth1 <= CV_64F && sdepth2 <= CV_64F && ddepth <= CV_64F ); GpuMat dst = getOutputMat(_dst, src1.size(), CV_MAKE_TYPE(ddepth, cn), stream); GpuMat src1_single = src1.reshape(1); GpuMat src2_single = src2.reshape(1); GpuMat dst_single = dst.reshape(1); if (sdepth1 > sdepth2) { src1_single.swap(src2_single); std::swap(alpha, beta); std::swap(sdepth1, sdepth2); } const func_t func = funcs[sdepth1][sdepth2][ddepth]; if (!func) CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of source and destination types"); func(src1_single, alpha, src2_single, beta, gamma, dst_single, stream); syncOutput(dst, _dst, stream); } #endif
the_stack
#include <opencv2/cudafeatures2d.hpp> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" #define BLOCK_X 8 #define BLOCK_Y 4 #define BLOCK_Z 4 using namespace cv; namespace { // Only use it with unsigned numeric types template <typename T> __device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) { return (bitmap >> pos) & 1; } // Only use it with unsigned numeric types //template <typename T> //__device__ __forceinline__ void SetBit(T &bitmap, unsigned char pos) { // bitmap |= (1 << pos); //} // Returns the root index of the UFTree __device__ unsigned Find(const int *s_buf, unsigned n) { while (s_buf[n] != n) { n = s_buf[n]; } return n; } __device__ unsigned FindAndCompress(int *s_buf, unsigned n) { unsigned id = n; while (s_buf[n] != n) { n = s_buf[n]; s_buf[id] = n; } return n; } // Merges the UFTrees of a and b, linking one root to the other __device__ void Union(int *s_buf, unsigned a, unsigned b) { bool done; do { a = Find(s_buf, a); b = Find(s_buf, b); if (a < b) { int old = atomicMin(s_buf + b, a); done = (old == b); b = old; } else if (b < a) { int old = atomicMin(s_buf + a, b); done = (old == a); a = old; } else { done = true; } } while (!done); } __global__ void InitLabeling(cuda::PtrStepSz3i labels) { unsigned x = (blockIdx.x * BLOCK_X + threadIdx.x) * 2; unsigned y = (blockIdx.y * BLOCK_Y + threadIdx.y) * 2; unsigned z = (blockIdx.z * BLOCK_Z + threadIdx.z) * 2; unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x; if (x < labels.x && y < labels.y && z < labels.z) { labels[labels_index] = labels_index; } } __global__ void Merge(const cuda::PtrStepSz3b img, cuda::PtrStepSz3i labels, unsigned char* last_cube_fg) { unsigned x = (blockIdx.x * BLOCK_X + threadIdx.x) * 2; unsigned y = (blockIdx.y * BLOCK_Y + threadIdx.y) * 2; unsigned z = (blockIdx.z * BLOCK_Z + threadIdx.z) * 2; unsigned img_index = z * (img.stepz / img.elem_size) + y * (img.stepy / img.elem_size) + x; unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x; if (x < labels.x && y < labels.y && z < labels.z) { const unsigned long long P0 = 0x77707770777; unsigned long long P = 0ULL; unsigned char foreground = 0; unsigned short buffer; { if (x + 1 < img.x) { buffer = *reinterpret_cast<unsigned short *>(img.data + img_index); if (buffer & 1) { P |= P0; foreground |= 1; } if (buffer & (1 << 8)) { P |= (P0 << 1); foreground |= (1 << 1); } if (y + 1 < img.y) { buffer = *reinterpret_cast<unsigned short *>(img.data + img_index + img.stepy / img.elem_size); if (buffer & 1) { P |= (P0 << 4); foreground |= (1 << 2); } if (buffer & (1 << 8)) { P |= (P0 << 5); foreground |= (1 << 3); } } if (z + 1 < img.z) { buffer = *reinterpret_cast<unsigned short *>(img.data + img_index + img.stepz / img.elem_size); if (buffer & 1) { P |= (P0 << 16); foreground |= (1 << 4); } if (buffer & (1 << 8)) { P |= (P0 << 17); foreground |= (1 << 5); } if (y + 1 < img.y) { buffer = *reinterpret_cast<unsigned short *>(img.data + img_index + img.stepz / img.elem_size + img.stepy / img.elem_size); if (buffer & 1) { P |= (P0 << 20); foreground |= (1 << 6); } if (buffer & (1 << 8)) { P |= (P0 << 21); foreground |= (1 << 7); } } } } else { if (img[img_index]) { P |= P0; foreground |= 1; } if (y + 1 < labels.y) { if (img[img_index + img.stepy / img.elem_size]) { P |= (P0 << 4); foreground |= (1 << 2); } } if (z + 1 < labels.z) { if (img[img_index + img.stepz / img.elem_size]) { P |= (P0 << 16); foreground |= (1 << 4); } if (y + 1 < labels.y) { if (img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size]) { P |= (P0 << 20); foreground |= (1 << 6); } } } } } /* { if (img[img_index]) { P |= P0; foreground |= 1; } if (x + 1 < img.x) { if (img[img_index + 1]) { P |= (P0 << 1); foreground |= (1 << 1); } if (y + 1 < img.y && img[img_index + img.stepy / img.elem_size + 1]) { P |= (P0 << 5); foreground |= (1 << 3); } } if (y + 1 < img.y) { if (img[img_index + img.stepy / img.elem_size]) { P |= (P0 << 4); foreground |= (1 << 2); } } if (z + 1 < img.z) { if (img[img_index + img.stepz / img.elem_size]) { P |= (P0 << 16); foreground |= (1 << 4); } if (x + 1 < img.x) { if (img[img_index + img.stepz / img.elem_size + 1]) { P |= (P0 << 17); foreground |= (1 << 5); } if (y + 1 < img.y && img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size + 1]) { P |= (P0 << 21); foreground |= (1 << 7); } } if (y + 1 < img.y) { if (img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size]) { P |= (P0 << 20); foreground |= (1 << 6); } } } }*/ // Store foreground voxels bitmask into memory if (x + 1 < labels.x) { labels[labels_index + 1] = foreground; } else if (y + 1 < labels.y) { labels[labels_index + labels.stepy / labels.elem_size] = foreground; } else if (z + 1 < labels.z) { labels[labels_index + labels.stepz / labels.elem_size] = foreground; } else { *last_cube_fg = foreground; } // checks on borders if (x == 0) { P &= 0xEEEEEEEEEEEEEEEE; } if (x + 1 >= img.x) { P &= 0x3333333333333333; } else if (x + 2 >= img.x) { P &= 0x7777777777777777; } if (y == 0) { P &= 0xFFF0FFF0FFF0FFF0; } if (y + 1 >= img.y) { P &= 0x00FF00FF00FF00FF; } else if (y + 2 >= img.y) { P &= 0x0FFF0FFF0FFF0FFF; } if (z == 0) { P &= 0xFFFFFFFFFFFF0000; } if (z + 1 >= img.z) { P &= 0x00000000FFFFFFFF; } //else if (z + 2 >= img.z) { // P &= 0x0000FFFFFFFFFFFF; //} // P is now ready to be used to find neighbour blocks // P value avoids range errors if (P > 0) { // Lower plane unsigned char * plane_data = img.data + img_index - img.stepz; unsigned lower_plane_index = labels_index - 2 * (labels.stepz / labels.elem_size); if (HasBit(P, 0) && plane_data[0 - img.stepy - 1]) { Union(labels.data, labels_index, lower_plane_index - 2 * (labels.stepy / labels.elem_size + 1)); } if ((HasBit(P, 1) && plane_data[0 - img.stepy]) || (HasBit(P, 2) && plane_data[0 - img.stepy + 1])) { Union(labels.data, labels_index, lower_plane_index - 2 * (labels.stepy / labels.elem_size)); } if (HasBit(P, 3) && plane_data[0 - img.stepy + 2]) { Union(labels.data, labels_index, lower_plane_index - 2 * (labels.stepy / labels.elem_size - 1)); } if ((HasBit(P, 4) && plane_data[-1]) || (HasBit(P, 8) && plane_data[img.stepy - 1])) { Union(labels.data, labels_index, lower_plane_index - 2); } if ((HasBit(P, 5) && plane_data[0]) || (HasBit(P, 6) && plane_data[1]) || (HasBit(P, 9) && plane_data[img.stepy]) || (HasBit(P, 10) && plane_data[img.stepy + 1])) { Union(labels.data, labels_index, lower_plane_index); } if ((HasBit(P, 7) && plane_data[2]) || (HasBit(P, 11) && plane_data[img.stepy + 2])) { Union(labels.data, labels_index, lower_plane_index + 2); } if (HasBit(P, 12) && plane_data[2 * img.stepy - 1]) { Union(labels.data, labels_index, lower_plane_index + 2 * (labels.stepy / labels.elem_size - 1)); } if ((HasBit(P, 13) && plane_data[2 * img.stepy]) || (HasBit(P, 14) && plane_data[2 * img.stepy + 1])) { Union(labels.data, labels_index, lower_plane_index + 2 * (labels.stepy / labels.elem_size)); } if (HasBit(P, 15) && plane_data[2 * img.stepy + 2]) { Union(labels.data, labels_index, lower_plane_index + 2 * (labels.stepy / labels.elem_size + 1)); } // Current planes plane_data += img.stepz; if ((HasBit(P, 16) && plane_data[0 - img.stepy - 1]) || (HasBit(P, 32) && plane_data[img.stepz - img.stepy - 1])) { Union(labels.data, labels_index, labels_index - 2 * (labels.stepy / labels.elem_size + 1)); } if ((HasBit(P, 17) && plane_data[0 - img.stepy]) || (HasBit(P, 18) && plane_data[0 - img.stepy + 1]) || (HasBit(P, 33) && plane_data[img.stepz - img.stepy]) || (HasBit(P, 34) && plane_data[img.stepz - img.stepy + 1])) { Union(labels.data, labels_index, labels_index - 2 * (labels.stepy / labels.elem_size)); } if ((HasBit(P, 19) && plane_data[0 - img.stepy + 2]) || (HasBit(P, 35) && plane_data[img.stepz - img.stepy + 2])) { Union(labels.data, labels_index, labels_index - 2 * (labels.stepy / labels.elem_size - 1)); } if ((HasBit(P, 20) && plane_data[-1]) || (HasBit(P, 24) && plane_data[img.stepy - 1]) || (HasBit(P, 36) && plane_data[img.stepz - 1]) || (HasBit(P, 40) && plane_data[img.stepz + img.stepy - 1])) { Union(labels.data, labels_index, labels_index - 2); } } } } __global__ void PathCompression(cuda::PtrStepSz3i labels) { unsigned x = 2 * (blockIdx.x * BLOCK_X + threadIdx.x); unsigned y = 2 * (blockIdx.y * BLOCK_Y + threadIdx.y); unsigned z = 2 * (blockIdx.z * BLOCK_Z + threadIdx.z); unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x; if (x < labels.x && y < labels.y && z < labels.z) { FindAndCompress(labels.data, labels_index); } } __global__ void FinalLabeling(const cuda::PtrStepSz3b img, cuda::PtrStepSz3i labels, unsigned char* last_cube_fg) { unsigned x = 2 * (blockIdx.x * BLOCK_X + threadIdx.x); unsigned y = 2 * (blockIdx.y * BLOCK_Y + threadIdx.y); unsigned z = 2 * (blockIdx.z * BLOCK_Z + threadIdx.z); unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x; if (x < labels.x && y < labels.y && z < labels.z) { int label; unsigned char foreground; unsigned long long buffer; if (x + 1 < labels.x) { buffer = *reinterpret_cast<unsigned long long *>(labels.data + labels_index); label = (buffer & (0xFFFFFFFF)) + 1; foreground = (buffer >> 32) & 0xFFFFFFFF; } else { label = labels[labels_index] + 1; if (y + 1 < labels.y) { foreground = labels[labels_index + labels.stepy / labels.elem_size]; } else if (z + 1 < labels.z) { foreground = labels[labels_index + labels.stepz / labels.elem_size]; } else { foreground = *last_cube_fg; } } if (x + 1 < labels.x) { *reinterpret_cast<unsigned long long *>(labels.data + labels_index) = (static_cast<unsigned long long>(((foreground >> 1) & 1) * label) << 32) | (((foreground >> 0) & 1) * label); if (y + 1 < labels.y) { *reinterpret_cast<unsigned long long *>(labels.data + labels_index + labels.stepy / labels.elem_size) = (static_cast<unsigned long long>(((foreground >> 3) & 1) * label) << 32) | (((foreground >> 2) & 1) * label); } if (z + 1 < labels.z) { *reinterpret_cast<unsigned long long *>(labels.data + labels_index + labels.stepz / labels.elem_size) = (static_cast<unsigned long long>(((foreground >> 5) & 1) * label) << 32) | (((foreground >> 4) & 1) * label); if (y + 1 < labels.y) { *reinterpret_cast<unsigned long long *>(labels.data + labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size)) = (static_cast<unsigned long long>(((foreground >> 7) & 1) * label) << 32) | (((foreground >> 6) & 1) * label); } } } else { labels[labels_index] = ((foreground >> 0) & 1) * label; if (y + 1 < labels.y) { labels[labels_index + (labels.stepy / labels.elem_size)] = ((foreground >> 2) & 1) * label; } if (z + 1 < labels.z) { labels[labels_index + labels.stepz / labels.elem_size] = ((foreground >> 4) & 1) * label; if (y + 1 < labels.y) { labels[labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size)] = ((foreground >> 6) & 1) * label; } } } } } } class BUF_IC_3D : public GpuLabeling3D<Connectivity3D::CONN_26> { private: dim3 grid_size_; dim3 block_size_; unsigned char* last_cube_fg_; bool allocated_last_cude_fg_; public: BUF_IC_3D() {} void PerformLabeling() { d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1); allocated_last_cude_fg_ = false; if ((d_img_.x % 2 == 1) && (d_img_.y % 2 == 1) && (d_img_.z % 2 == 1)) { if (d_img_.x > 1 && d_img_.y > 1) { last_cube_fg_ = reinterpret_cast<unsigned char*>(d_img_labels_.data + (d_img_labels_.z - 1) * d_img_labels_.stepz + (d_img_labels_.y - 2) * d_img_labels_.stepy) + d_img_labels_.x - 2; } else if (d_img_.x > 1 && d_img_.z > 1) { last_cube_fg_ = reinterpret_cast<unsigned char*>(d_img_labels_.data + (d_img_labels_.z - 2) * d_img_labels_.stepz + (d_img_labels_.y - 1) * d_img_labels_.stepy) + d_img_labels_.x - 2; } else if (d_img_.y > 1 && d_img_.z > 1) { last_cube_fg_ = reinterpret_cast<unsigned char*>(d_img_labels_.data + (d_img_labels_.z - 2) * d_img_labels_.stepz + (d_img_labels_.y - 2) * d_img_labels_.stepy) + d_img_labels_.x - 1; } else { cudaMalloc(&last_cube_fg_, sizeof(unsigned char)); allocated_last_cude_fg_ = true; } } grid_size_ = dim3(((d_img_.x + 1) / 2 + BLOCK_X - 1) / BLOCK_X, ((d_img_.y + 1) / 2 + BLOCK_Y - 1) / BLOCK_Y, ((d_img_.z + 1) / 2 + BLOCK_Z - 1) / BLOCK_Z); block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z); InitLabeling << <grid_size_, block_size_ >> > (d_img_labels_); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //Mat1i init_labels; //d_block_labels_.download(init_labels); Merge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_cube_fg_); //Mat1i block_info_final; //d_img_labels_.download(block_info_final); PathCompression << <grid_size_, block_size_ >> > (d_img_labels_); FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_cube_fg_); if (allocated_last_cude_fg_) { cudaFree(last_cube_fg_); } // d_img_labels_.download(img_labels_); cudaDeviceSynchronize(); } private: void Alloc() { d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1); allocated_last_cude_fg_ = false; if ((d_img_.x % 2 == 1) && (d_img_.y % 2 == 1) && (d_img_.z % 2 == 1)) { if (d_img_.x > 1 && d_img_.y > 1) { last_cube_fg_ = reinterpret_cast<unsigned char*>(d_img_labels_.data + (d_img_labels_.z - 1) * d_img_labels_.stepz + (d_img_labels_.y - 2) * d_img_labels_.stepy) + d_img_labels_.x - 2; } else if (d_img_.x > 1 && d_img_.z > 1) { last_cube_fg_ = reinterpret_cast<unsigned char*>(d_img_labels_.data + (d_img_labels_.z - 2) * d_img_labels_.stepz + (d_img_labels_.y - 1) * d_img_labels_.stepy) + d_img_labels_.x - 2; } else if (d_img_.y > 1 && d_img_.z > 1) { last_cube_fg_ = reinterpret_cast<unsigned char*>(d_img_labels_.data + (d_img_labels_.z - 2) * d_img_labels_.stepz + (d_img_labels_.y - 2) * d_img_labels_.stepy) + d_img_labels_.x - 1; } else { cudaMalloc(&last_cube_fg_, sizeof(unsigned char)); allocated_last_cude_fg_ = true; } } } void Dealloc() { if (allocated_last_cude_fg_) { cudaFree(last_cube_fg_); } } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void AllScans() { grid_size_ = dim3(((d_img_.x + 1) / 2 + BLOCK_X - 1) / BLOCK_X, ((d_img_.y + 1) / 2 + BLOCK_Y - 1) / BLOCK_Y, ((d_img_.z + 1) / 2 + BLOCK_Z - 1) / BLOCK_Z); block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z); InitLabeling << <grid_size_, block_size_ >> > (d_img_labels_); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //Mat1i init_labels; //d_block_labels_.download(init_labels); Merge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_cube_fg_); //Mat1i block_info_final; //d_img_labels_.download(block_info_final); PathCompression << <grid_size_, block_size_ >> > (d_img_labels_); FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_cube_fg_); cudaDeviceSynchronize(); } public: void PerformLabelingWithSteps() { perf_.start(); Alloc(); perf_.stop(); double alloc_timing = perf_.last(); perf_.start(); AllScans(); perf_.stop(); perf_.store(Step(StepType::ALL_SCANS), perf_.last()); perf_.start(); Dealloc(); perf_.stop(); double dealloc_timing = perf_.last(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(BUF_IC_3D);
the_stack
* @file writer_impl.cu * @brief cuDF-IO parquet writer class implementation */ #include "writer_impl.hpp" #include "compact_protocol_reader.hpp" #include "compact_protocol_writer.hpp" #include <io/statistics/column_statistics.cuh> #include <io/utilities/column_utils.cuh> #include <io/utilities/config_utils.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/utilities/column.hpp> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/lists/lists_column_view.hpp> #include <cudf/null_mask.hpp> #include <cudf/strings/strings_column_view.hpp> #include <cudf/table/table_device_view.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_scalar.hpp> #include <rmm/device_uvector.hpp> #include <nvcomp/snappy.h> #include <thrust/binary_search.h> #include <thrust/for_each.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/transform.h> #include <thrust/tuple.h> #include <algorithm> #include <cstring> #include <numeric> #include <utility> namespace cudf { namespace io { namespace detail { namespace parquet { using namespace cudf::io::parquet; using namespace cudf::io; namespace { /** * @brief Helper for pinned host memory */ template <typename T> using pinned_buffer = std::unique_ptr<T, decltype(&cudaFreeHost)>; /** * @brief Function that translates GDF compression to parquet compression */ parquet::Compression to_parquet_compression(compression_type compression) { switch (compression) { case compression_type::AUTO: case compression_type::SNAPPY: return parquet::Compression::SNAPPY; case compression_type::NONE: return parquet::Compression::UNCOMPRESSED; default: CUDF_FAIL("Unsupported compression type"); } } } // namespace struct aggregate_writer_metadata { aggregate_writer_metadata(std::vector<partition_info> const& partitions, size_type num_columns, std::vector<SchemaElement> schema, statistics_freq stats_granularity, std::vector<std::map<std::string, std::string>> const& kv_md) : version(1), schema(std::move(schema)), files(partitions.size()) { for (size_t i = 0; i < partitions.size(); ++i) { this->files[i].num_rows = partitions[i].num_rows; } this->column_order_listsize = (stats_granularity != statistics_freq::STATISTICS_NONE) ? num_columns : 0; for (size_t p = 0; p < kv_md.size(); ++p) { std::transform(kv_md[p].begin(), kv_md[p].end(), std::back_inserter(this->files[p].key_value_metadata), [](auto const& kv) { return KeyValue{kv.first, kv.second}; }); } } void update_files(std::vector<partition_info> const& partitions) { CUDF_EXPECTS(partitions.size() == this->files.size(), "New partitions must be same size as previously passed number of partitions"); for (size_t i = 0; i < partitions.size(); ++i) { this->files[i].num_rows += partitions[i].num_rows; } } FileMetaData get_metadata(size_t part) { CUDF_EXPECTS(part < files.size(), "Invalid part index queried"); FileMetaData meta{}; meta.version = this->version; meta.schema = this->schema; meta.num_rows = this->files[part].num_rows; meta.row_groups = this->files[part].row_groups; meta.key_value_metadata = this->files[part].key_value_metadata; meta.created_by = this->created_by; meta.column_order_listsize = this->column_order_listsize; return meta; } void set_file_paths(std::vector<std::string> const& column_chunks_file_path) { for (size_t p = 0; p < this->files.size(); ++p) { auto& file = this->files[p]; auto const& file_path = column_chunks_file_path[p]; for (auto& rowgroup : file.row_groups) { for (auto& col : rowgroup.columns) { col.file_path = file_path; } } } } FileMetaData get_merged_metadata() { FileMetaData merged_md; for (size_t p = 0; p < this->files.size(); ++p) { auto& file = this->files[p]; if (p == 0) { merged_md = this->get_metadata(0); } else { merged_md.row_groups.insert(merged_md.row_groups.end(), std::make_move_iterator(file.row_groups.begin()), std::make_move_iterator(file.row_groups.end())); merged_md.num_rows += file.num_rows; } } return merged_md; } std::vector<size_t> num_row_groups_per_file() { std::vector<size_t> global_rowgroup_base; std::transform(this->files.begin(), this->files.end(), std::back_inserter(global_rowgroup_base), [](auto const& part) { return part.row_groups.size(); }); return global_rowgroup_base; } [[nodiscard]] bool schema_matches(std::vector<SchemaElement> const& schema) const { return this->schema == schema; } auto& file(size_t p) { return files[p]; } [[nodiscard]] size_t num_files() const { return files.size(); } private: int32_t version = 0; std::vector<SchemaElement> schema; struct per_file_metadata { int64_t num_rows = 0; std::vector<RowGroup> row_groups; std::vector<KeyValue> key_value_metadata; }; std::vector<per_file_metadata> files; std::string created_by = ""; uint32_t column_order_listsize = 0; }; /** * @brief Extends SchemaElement to add members required in constructing parquet_column_view * * Added members are: * 1. leaf_column: Pointer to leaf linked_column_view which points to the corresponding data stream * of a leaf schema node. For non-leaf struct node, this is nullptr. * 2. stats_dtype: datatype for statistics calculation required for the data stream of a leaf node. * 3. ts_scale: scale to multiply or divide timestamp by in order to convert timestamp to parquet * supported types */ struct schema_tree_node : public SchemaElement { cudf::detail::LinkedColPtr leaf_column; statistics_dtype stats_dtype; int32_t ts_scale; // TODO(fut): Think about making schema a class that holds a vector of schema_tree_nodes. The // function construct_schema_tree could be its constructor. It can have method to get the per // column nullability given a schema node index corresponding to a leaf schema. Much easier than // that is a method to get path in schema, given a leaf node }; struct leaf_schema_fn { schema_tree_node& col_schema; cudf::detail::LinkedColPtr const& col; column_in_metadata const& col_meta; bool timestamp_is_int96; template <typename T> std::enable_if_t<std::is_same_v<T, bool>, void> operator()() { col_schema.type = Type::BOOLEAN; col_schema.stats_dtype = statistics_dtype::dtype_bool; } template <typename T> std::enable_if_t<std::is_same_v<T, int8_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::INT_8; col_schema.stats_dtype = statistics_dtype::dtype_int8; } template <typename T> std::enable_if_t<std::is_same_v<T, int16_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::INT_16; col_schema.stats_dtype = statistics_dtype::dtype_int16; } template <typename T> std::enable_if_t<std::is_same_v<T, int32_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.stats_dtype = statistics_dtype::dtype_int32; } template <typename T> std::enable_if_t<std::is_same_v<T, int64_t>, void> operator()() { col_schema.type = Type::INT64; col_schema.stats_dtype = statistics_dtype::dtype_int64; } template <typename T> std::enable_if_t<std::is_same_v<T, uint8_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::UINT_8; col_schema.stats_dtype = statistics_dtype::dtype_int8; } template <typename T> std::enable_if_t<std::is_same_v<T, uint16_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::UINT_16; col_schema.stats_dtype = statistics_dtype::dtype_int16; } template <typename T> std::enable_if_t<std::is_same_v<T, uint32_t>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::UINT_32; col_schema.stats_dtype = statistics_dtype::dtype_int32; } template <typename T> std::enable_if_t<std::is_same_v<T, uint64_t>, void> operator()() { col_schema.type = Type::INT64; col_schema.converted_type = ConvertedType::UINT_64; col_schema.stats_dtype = statistics_dtype::dtype_int64; } template <typename T> std::enable_if_t<std::is_same_v<T, float>, void> operator()() { col_schema.type = Type::FLOAT; col_schema.stats_dtype = statistics_dtype::dtype_float32; } template <typename T> std::enable_if_t<std::is_same_v<T, double>, void> operator()() { col_schema.type = Type::DOUBLE; col_schema.stats_dtype = statistics_dtype::dtype_float64; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::string_view>, void> operator()() { col_schema.type = Type::BYTE_ARRAY; col_schema.converted_type = ConvertedType::UTF8; col_schema.stats_dtype = statistics_dtype::dtype_string; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::timestamp_D>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::DATE; col_schema.stats_dtype = statistics_dtype::dtype_int32; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::timestamp_s>, void> operator()() { col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64; col_schema.converted_type = (timestamp_is_int96) ? ConvertedType::UNKNOWN : ConvertedType::TIMESTAMP_MILLIS; col_schema.stats_dtype = statistics_dtype::dtype_timestamp64; col_schema.ts_scale = 1000; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::timestamp_ms>, void> operator()() { col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64; col_schema.converted_type = (timestamp_is_int96) ? ConvertedType::UNKNOWN : ConvertedType::TIMESTAMP_MILLIS; col_schema.stats_dtype = statistics_dtype::dtype_timestamp64; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::timestamp_us>, void> operator()() { col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64; col_schema.converted_type = (timestamp_is_int96) ? ConvertedType::UNKNOWN : ConvertedType::TIMESTAMP_MICROS; col_schema.stats_dtype = statistics_dtype::dtype_timestamp64; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::timestamp_ns>, void> operator()() { col_schema.type = (timestamp_is_int96) ? Type::INT96 : Type::INT64; col_schema.converted_type = ConvertedType::UNKNOWN; col_schema.stats_dtype = statistics_dtype::dtype_timestamp64; if (timestamp_is_int96) { col_schema.ts_scale = -1000; // negative value indicates division by absolute value } // set logical type if it's not int96 else { col_schema.logical_type.isset.TIMESTAMP = true; col_schema.logical_type.TIMESTAMP.unit.isset.NANOS = true; } } // unsupported outside cudf for parquet 1.0. template <typename T> std::enable_if_t<std::is_same_v<T, cudf::duration_D>, void> operator()() { col_schema.type = Type::INT32; col_schema.converted_type = ConvertedType::TIME_MILLIS; col_schema.stats_dtype = statistics_dtype::dtype_int64; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::duration_s>, void> operator()() { col_schema.type = Type::INT64; col_schema.converted_type = ConvertedType::TIME_MILLIS; col_schema.stats_dtype = statistics_dtype::dtype_int64; col_schema.ts_scale = 1000; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::duration_ms>, void> operator()() { col_schema.type = Type::INT64; col_schema.converted_type = ConvertedType::TIME_MILLIS; col_schema.stats_dtype = statistics_dtype::dtype_int64; } template <typename T> std::enable_if_t<std::is_same_v<T, cudf::duration_us>, void> operator()() { col_schema.type = Type::INT64; col_schema.converted_type = ConvertedType::TIME_MICROS; col_schema.stats_dtype = statistics_dtype::dtype_int64; } // unsupported outside cudf for parquet 1.0. template <typename T> std::enable_if_t<std::is_same_v<T, cudf::duration_ns>, void> operator()() { col_schema.type = Type::INT64; col_schema.converted_type = ConvertedType::TIME_MICROS; col_schema.stats_dtype = statistics_dtype::dtype_int64; col_schema.ts_scale = -1000; // negative value indicates division by absolute value } template <typename T> std::enable_if_t<cudf::is_fixed_point<T>(), void> operator()() { if (std::is_same_v<T, numeric::decimal32>) { col_schema.type = Type::INT32; col_schema.stats_dtype = statistics_dtype::dtype_int32; col_schema.decimal_precision = 9; } else if (std::is_same_v<T, numeric::decimal64>) { col_schema.type = Type::INT64; col_schema.stats_dtype = statistics_dtype::dtype_decimal64; col_schema.decimal_precision = 18; } else if (std::is_same_v<T, numeric::decimal128>) { col_schema.type = Type::FIXED_LEN_BYTE_ARRAY; col_schema.type_length = sizeof(__int128_t); col_schema.stats_dtype = statistics_dtype::dtype_decimal128; col_schema.decimal_precision = 38; } else { CUDF_FAIL("Unsupported fixed point type for parquet writer"); } col_schema.converted_type = ConvertedType::DECIMAL; col_schema.decimal_scale = -col->type().scale(); // parquet and cudf disagree about scale signs if (col_meta.is_decimal_precision_set()) { CUDF_EXPECTS(col_meta.get_decimal_precision() >= col_schema.decimal_scale, "Precision must be equal to or greater than scale!"); col_schema.decimal_precision = col_meta.get_decimal_precision(); } } template <typename T> std::enable_if_t<cudf::is_nested<T>(), void> operator()() { CUDF_FAIL("This functor is only meant for physical data types"); } template <typename T> std::enable_if_t<cudf::is_dictionary<T>(), void> operator()() { CUDF_FAIL("Dictionary columns are not supported for writing"); } }; inline bool is_col_nullable(cudf::detail::LinkedColPtr const& col, column_in_metadata const& col_meta, bool single_write_mode) { if (single_write_mode) { return col->nullable(); } else { if (col_meta.is_nullability_defined()) { CUDF_EXPECTS(col_meta.nullable() || !col->nullable(), "Mismatch in metadata prescribed nullability and input column nullability. " "Metadata for nullable input column cannot prescribe nullability = false"); return col_meta.nullable(); } else { // For chunked write, when not provided nullability, we assume the worst case scenario // that all columns are nullable. return true; } } } /** * @brief Construct schema from input columns and per-column input options * * Recursively traverses through linked_columns and corresponding metadata to construct schema tree. * The resulting schema tree is stored in a vector in pre-order traversal order. */ std::vector<schema_tree_node> construct_schema_tree( cudf::detail::LinkedColVector const& linked_columns, table_input_metadata& metadata, bool single_write_mode, bool int96_timestamps) { std::vector<schema_tree_node> schema; schema_tree_node root{}; root.type = UNDEFINED_TYPE; root.repetition_type = NO_REPETITION_TYPE; root.name = "schema"; root.num_children = linked_columns.size(); root.parent_idx = -1; // root schema has no parent schema.push_back(std::move(root)); std::function<void(cudf::detail::LinkedColPtr const&, column_in_metadata&, size_t)> add_schema = [&](cudf::detail::LinkedColPtr const& col, column_in_metadata& col_meta, size_t parent_idx) { bool col_nullable = is_col_nullable(col, col_meta, single_write_mode); auto set_field_id = [&schema, parent_idx](schema_tree_node& s, column_in_metadata const& col_meta) { if (schema[parent_idx].name != "list" and col_meta.is_parquet_field_id_set()) { s.field_id = col_meta.get_parquet_field_id(); } }; if (col->type().id() == type_id::STRUCT) { // if struct, add current and recursively call for all children schema_tree_node struct_schema{}; struct_schema.repetition_type = col_nullable ? FieldRepetitionType::OPTIONAL : FieldRepetitionType::REQUIRED; struct_schema.name = (schema[parent_idx].name == "list") ? "element" : col_meta.get_name(); struct_schema.num_children = col->children.size(); struct_schema.parent_idx = parent_idx; set_field_id(struct_schema, col_meta); schema.push_back(std::move(struct_schema)); auto struct_node_index = schema.size() - 1; // for (auto child_it = col->children.begin(); child_it < col->children.end(); child_it++) { // add_schema(*child_it, struct_node_index); // } CUDF_EXPECTS(col->children.size() == static_cast<size_t>(col_meta.num_children()), "Mismatch in number of child columns between input table and metadata"); for (size_t i = 0; i < col->children.size(); ++i) { add_schema(col->children[i], col_meta.child(i), struct_node_index); } } else if (col->type().id() == type_id::LIST && !col_meta.is_map()) { // List schema is denoted by two levels for each nesting level and one final level for leaf. // The top level is the same name as the column name. // So e.g. List<List<int>> is denoted in the schema by // "col_name" : { "list" : { "element" : { "list" : { "element" } } } } schema_tree_node list_schema_1{}; list_schema_1.converted_type = ConvertedType::LIST; list_schema_1.repetition_type = col_nullable ? FieldRepetitionType::OPTIONAL : FieldRepetitionType::REQUIRED; list_schema_1.name = (schema[parent_idx].name == "list") ? "element" : col_meta.get_name(); list_schema_1.num_children = 1; list_schema_1.parent_idx = parent_idx; set_field_id(list_schema_1, col_meta); schema.push_back(std::move(list_schema_1)); schema_tree_node list_schema_2{}; list_schema_2.repetition_type = FieldRepetitionType::REPEATED; list_schema_2.name = "list"; list_schema_2.num_children = 1; list_schema_2.parent_idx = schema.size() - 1; // Parent is list_schema_1, last added. schema.push_back(std::move(list_schema_2)); CUDF_EXPECTS(col_meta.num_children() == 2, "List column's metadata should have exactly two children"); add_schema(col->children[lists_column_view::child_column_index], col_meta.child(lists_column_view::child_column_index), schema.size() - 1); } else if (col->type().id() == type_id::LIST && col_meta.is_map()) { // Map schema is denoted by a list of struct // e.g. List<Struct<String,String>> will be // "col_name" : { "key_value" : { "key", "value" } } // verify the List child structure is a struct<left_child, right_child> column_view struct_col = *col->children[lists_column_view::child_column_index]; CUDF_EXPECTS(struct_col.type().id() == type_id::STRUCT, "Map should be a List of struct"); CUDF_EXPECTS(struct_col.num_children() == 2, "Map should be a List of struct with two children only but found " + std::to_string(struct_col.num_children())); schema_tree_node map_schema{}; map_schema.converted_type = ConvertedType::MAP; map_schema.repetition_type = col_nullable ? FieldRepetitionType::OPTIONAL : FieldRepetitionType::REQUIRED; map_schema.name = col_meta.get_name(); if (col_meta.is_parquet_field_id_set()) { map_schema.field_id = col_meta.get_parquet_field_id(); } map_schema.num_children = 1; map_schema.parent_idx = parent_idx; schema.push_back(std::move(map_schema)); schema_tree_node repeat_group{}; repeat_group.repetition_type = FieldRepetitionType::REPEATED; repeat_group.name = "key_value"; repeat_group.num_children = 2; repeat_group.parent_idx = schema.size() - 1; // Parent is map_schema, last added. schema.push_back(std::move(repeat_group)); CUDF_EXPECTS(col_meta.num_children() == 2, "List column's metadata should have exactly two children"); CUDF_EXPECTS(col_meta.child(lists_column_view::child_column_index).num_children() == 2, "Map struct column should have exactly two children"); // verify the col meta of children of the struct have name key and value auto& left_child_meta = col_meta.child(lists_column_view::child_column_index).child(0); left_child_meta.set_name("key"); left_child_meta.set_nullability(false); auto& right_child_meta = col_meta.child(lists_column_view::child_column_index).child(1); right_child_meta.set_name("value"); // check the repetition type of key is required i.e. the col should be non-nullable auto key_col = col->children[lists_column_view::child_column_index]->children[0]; CUDF_EXPECTS(!is_col_nullable(key_col, left_child_meta, single_write_mode), "key column cannot be nullable. For chunked writing, explicitly set the " "nullability to false in metadata"); // process key size_type struct_col_index = schema.size() - 1; add_schema(key_col, left_child_meta, struct_col_index); // process value add_schema(col->children[lists_column_view::child_column_index]->children[1], right_child_meta, struct_col_index); } else { // if leaf, add current if (col->type().id() == type_id::STRING) { CUDF_EXPECTS(col_meta.num_children() == 2 or col_meta.num_children() == 0, "String column's corresponding metadata should have zero or two children"); } else { CUDF_EXPECTS(col_meta.num_children() == 0, "Leaf column's corresponding metadata cannot have children"); } schema_tree_node col_schema{}; bool timestamp_is_int96 = int96_timestamps or col_meta.is_enabled_int96_timestamps(); cudf::type_dispatcher(col->type(), leaf_schema_fn{col_schema, col, col_meta, timestamp_is_int96}); col_schema.repetition_type = col_nullable ? OPTIONAL : REQUIRED; col_schema.name = (schema[parent_idx].name == "list") ? "element" : col_meta.get_name(); col_schema.parent_idx = parent_idx; col_schema.leaf_column = col; set_field_id(col_schema, col_meta); schema.push_back(col_schema); } }; CUDF_EXPECTS(metadata.column_metadata.size() == linked_columns.size(), "Mismatch in the number of columns and the corresponding metadata elements"); // Add all linked_columns to schema using parent_idx = 0 (root) for (size_t i = 0; i < linked_columns.size(); ++i) { add_schema(linked_columns[i], metadata.column_metadata[i], 0); } return schema; } /** * @brief Class to store parquet specific information for one data stream. * * Contains information about a single data stream. In case of struct columns, a data stream is one * of the child leaf columns that contains data. * e.g. A column Struct<int, List<float>> contains 2 data streams: * - Struct<int> * - Struct<List<float>> * */ struct parquet_column_view { parquet_column_view(schema_tree_node const& schema_node, std::vector<schema_tree_node> const& schema_tree, rmm::cuda_stream_view stream); [[nodiscard]] column_view leaf_column_view() const; [[nodiscard]] gpu::parquet_column_device_view get_device_view(rmm::cuda_stream_view stream) const; [[nodiscard]] column_view cudf_column_view() const { return cudf_col; } [[nodiscard]] parquet::Type physical_type() const { return schema_node.type; } std::vector<std::string> const& get_path_in_schema() { return path_in_schema; } // LIST related member functions [[nodiscard]] uint8_t max_def_level() const noexcept { return _max_def_level; } [[nodiscard]] uint8_t max_rep_level() const noexcept { return _max_rep_level; } [[nodiscard]] bool is_list() const noexcept { return _is_list; } private: // Schema related members schema_tree_node schema_node; std::vector<std::string> path_in_schema; uint8_t _max_def_level = 0; uint8_t _max_rep_level = 0; rmm::device_uvector<uint8_t> _d_nullability; column_view cudf_col; // List-related members bool _is_list; rmm::device_uvector<size_type> _dremel_offsets; ///< For each row, the absolute offset into the repetition and definition ///< level vectors. O(num rows) rmm::device_uvector<uint8_t> _rep_level; rmm::device_uvector<uint8_t> _def_level; std::vector<uint8_t> _nullability; size_type _data_count = 0; }; parquet_column_view::parquet_column_view(schema_tree_node const& schema_node, std::vector<schema_tree_node> const& schema_tree, rmm::cuda_stream_view stream) : schema_node(schema_node), _d_nullability(0, stream), _dremel_offsets(0, stream), _rep_level(0, stream), _def_level(0, stream) { // Construct single inheritance column_view from linked_column_view auto curr_col = schema_node.leaf_column.get(); column_view single_inheritance_cudf_col = *curr_col; while (curr_col->parent) { auto const& parent = *curr_col->parent; // For list columns, we still need to retain the offset child column. auto children = (parent.type().id() == type_id::LIST) ? std::vector<column_view>{*parent.children[lists_column_view::offsets_column_index], single_inheritance_cudf_col} : std::vector<column_view>{single_inheritance_cudf_col}; single_inheritance_cudf_col = column_view(parent.type(), parent.size(), parent.head(), parent.null_mask(), UNKNOWN_NULL_COUNT, parent.offset(), children); curr_col = curr_col->parent; } cudf_col = single_inheritance_cudf_col; // Construct path_in_schema by travelling up in the schema_tree std::vector<std::string> path; auto curr_schema_node = schema_node; do { path.push_back(curr_schema_node.name); if (curr_schema_node.parent_idx != -1) { curr_schema_node = schema_tree[curr_schema_node.parent_idx]; } } while (curr_schema_node.parent_idx != -1); path_in_schema = std::vector<std::string>(path.crbegin(), path.crend()); // Calculate max definition level by counting the number of levels that are optional (nullable) // and max repetition level by counting the number of REPEATED levels in this column's hierarchy uint16_t max_def_level = 0; uint16_t max_rep_level = 0; curr_schema_node = schema_node; while (curr_schema_node.parent_idx != -1) { if (curr_schema_node.repetition_type == parquet::REPEATED or curr_schema_node.repetition_type == parquet::OPTIONAL) { ++max_def_level; } if (curr_schema_node.repetition_type == parquet::REPEATED) { ++max_rep_level; } curr_schema_node = schema_tree[curr_schema_node.parent_idx]; } CUDF_EXPECTS(max_def_level < 256, "Definition levels above 255 are not supported"); CUDF_EXPECTS(max_rep_level < 256, "Definition levels above 255 are not supported"); _max_def_level = max_def_level; _max_rep_level = max_rep_level; // Construct nullability vector using repetition_type from schema. std::vector<uint8_t> r_nullability; curr_schema_node = schema_node; while (curr_schema_node.parent_idx != -1) { if (not curr_schema_node.is_stub()) { r_nullability.push_back(curr_schema_node.repetition_type == FieldRepetitionType::OPTIONAL); } curr_schema_node = schema_tree[curr_schema_node.parent_idx]; } _nullability = std::vector<uint8_t>(r_nullability.crbegin(), r_nullability.crend()); // TODO(cp): Explore doing this for all columns in a single go outside this ctor. Maybe using // hostdevice_vector. Currently this involves a cudaMemcpyAsync for each column. _d_nullability = cudf::detail::make_device_uvector_async(_nullability, stream); _is_list = (_max_rep_level > 0); if (cudf_col.size() == 0) { return; } if (_is_list) { // Top level column's offsets are not applied to all children. Get the effective offset and // size of the leaf column // Calculate row offset into dremel data (repetition/definition values) and the respective // definition and repetition levels gpu::dremel_data dremel = gpu::get_dremel_data(cudf_col, _d_nullability, _nullability, stream); _dremel_offsets = std::move(dremel.dremel_offsets); _rep_level = std::move(dremel.rep_level); _def_level = std::move(dremel.def_level); _data_count = dremel.leaf_data_size; // Needed for knowing what size dictionary to allocate stream.synchronize(); } else { // For non-list struct, the size of the root column is the same as the size of the leaf column _data_count = cudf_col.size(); } } column_view parquet_column_view::leaf_column_view() const { auto col = cudf_col; while (cudf::is_nested(col.type())) { if (col.type().id() == type_id::LIST) { col = col.child(lists_column_view::child_column_index); } else if (col.type().id() == type_id::STRUCT) { col = col.child(0); // Stored cudf_col has only one child if struct } } return col; } gpu::parquet_column_device_view parquet_column_view::get_device_view( rmm::cuda_stream_view stream) const { column_view col = leaf_column_view(); auto desc = gpu::parquet_column_device_view{}; // Zero out all fields desc.stats_dtype = schema_node.stats_dtype; desc.ts_scale = schema_node.ts_scale; if (is_list()) { desc.level_offsets = _dremel_offsets.data(); desc.rep_values = _rep_level.data(); desc.def_values = _def_level.data(); } desc.num_rows = cudf_col.size(); desc.physical_type = physical_type(); desc.level_bits = CompactProtocolReader::NumRequiredBits(max_rep_level()) << 4 | CompactProtocolReader::NumRequiredBits(max_def_level()); desc.nullability = _d_nullability.data(); return desc; } void writer::impl::init_page_fragments(cudf::detail::hostdevice_2dvector<gpu::PageFragment>& frag, device_span<gpu::parquet_column_device_view const> col_desc, host_span<partition_info const> partitions, device_span<int const> part_frag_offset, uint32_t fragment_size) { auto d_partitions = cudf::detail::make_device_uvector_async(partitions, stream); gpu::InitPageFragments(frag, col_desc, d_partitions, part_frag_offset, fragment_size, stream); frag.device_to_host(stream, true); } void writer::impl::gather_fragment_statistics( device_2dspan<statistics_chunk> frag_stats_chunk, device_2dspan<gpu::PageFragment const> frag, device_span<gpu::parquet_column_device_view const> col_desc, uint32_t num_fragments) { auto num_columns = col_desc.size(); rmm::device_uvector<statistics_group> frag_stats_group(num_fragments * num_columns, stream); auto frag_stats_group_2dview = device_2dspan<statistics_group>(frag_stats_group.data(), num_columns, num_fragments); gpu::InitFragmentStatistics(frag_stats_group_2dview, frag, col_desc, stream); detail::calculate_group_statistics<detail::io_file_format::PARQUET>(frag_stats_chunk.data(), frag_stats_group.data(), num_fragments * num_columns, stream, int96_timestamps); stream.synchronize(); } void writer::impl::init_page_sizes(hostdevice_2dvector<gpu::EncColumnChunk>& chunks, device_span<gpu::parquet_column_device_view const> col_desc, uint32_t num_columns) { chunks.host_to_device(stream); gpu::InitEncoderPages(chunks, {}, col_desc, num_columns, nullptr, nullptr, 0, stream); chunks.device_to_host(stream, true); } auto build_chunk_dictionaries(hostdevice_2dvector<gpu::EncColumnChunk>& chunks, host_span<gpu::parquet_column_device_view const> col_desc, device_2dspan<gpu::PageFragment const> frags, rmm::cuda_stream_view stream) { // At this point, we know all chunks and their sizes. We want to allocate dictionaries for each // chunk that can have dictionary auto h_chunks = chunks.host_view().flat_view(); std::vector<rmm::device_uvector<size_type>> dict_data; std::vector<rmm::device_uvector<uint16_t>> dict_index; if (h_chunks.size() == 0) { return std::pair(std::move(dict_data), std::move(dict_index)); } // Allocate slots for each chunk std::vector<rmm::device_uvector<gpu::slot_type>> hash_maps_storage; hash_maps_storage.reserve(h_chunks.size()); for (auto& chunk : h_chunks) { if (col_desc[chunk.col_desc_id].physical_type == Type::BOOLEAN) { chunk.use_dictionary = false; } else { chunk.use_dictionary = true; auto& inserted_map = hash_maps_storage.emplace_back(chunk.num_values, stream); chunk.dict_map_slots = inserted_map.data(); chunk.dict_map_size = inserted_map.size(); } } chunks.host_to_device(stream); gpu::initialize_chunk_hash_maps(chunks.device_view().flat_view(), stream); gpu::populate_chunk_hash_maps(frags, stream); chunks.device_to_host(stream, true); // Make decision about which chunks have dictionary for (auto& ck : h_chunks) { if (not ck.use_dictionary) { continue; } std::tie(ck.use_dictionary, ck.dict_rle_bits) = [&]() { // calculate size of chunk if dictionary is used // If we have N unique values then the idx for the last value is N - 1 and nbits is the number // of bits required to encode indices into the dictionary auto max_dict_index = (ck.num_dict_entries > 0) ? ck.num_dict_entries - 1 : 0; auto nbits = CompactProtocolReader::NumRequiredBits(max_dict_index); // We don't use dictionary if the indices are > 16 bits because that's the maximum bitpacking // bitsize we efficiently support if (nbits > 16) { return std::pair(false, 0); } // Only these bit sizes are allowed for RLE encoding because it's compute optimized constexpr auto allowed_bitsizes = std::array<size_type, 6>{1, 2, 4, 8, 12, 16}; // ceil to (1/2/4/8/12/16) auto rle_bits = *std::lower_bound(allowed_bitsizes.begin(), allowed_bitsizes.end(), nbits); auto rle_byte_size = util::div_rounding_up_safe(ck.num_values * rle_bits, 8); auto dict_enc_size = ck.uniq_data_size + rle_byte_size; bool use_dict = (ck.plain_data_size > dict_enc_size); if (not use_dict) { rle_bits = 0; } return std::pair(use_dict, rle_bits); }(); } // TODO: (enh) Deallocate hash map storage for chunks that don't use dict and clear pointers. dict_data.reserve(h_chunks.size()); dict_index.reserve(h_chunks.size()); for (auto& chunk : h_chunks) { if (not chunk.use_dictionary) { continue; } size_t dict_data_size = std::min(MAX_DICT_SIZE, chunk.dict_map_size); auto& inserted_dict_data = dict_data.emplace_back(dict_data_size, stream); auto& inserted_dict_index = dict_index.emplace_back(chunk.num_values, stream); chunk.dict_data = inserted_dict_data.data(); chunk.dict_index = inserted_dict_index.data(); } chunks.host_to_device(stream); gpu::collect_map_entries(chunks.device_view().flat_view(), stream); gpu::get_dictionary_indices(frags, stream); return std::pair(std::move(dict_data), std::move(dict_index)); } void writer::impl::init_encoder_pages(hostdevice_2dvector<gpu::EncColumnChunk>& chunks, device_span<gpu::parquet_column_device_view const> col_desc, device_span<gpu::EncPage> pages, statistics_chunk* page_stats, statistics_chunk* frag_stats, size_t max_page_comp_data_size, uint32_t num_columns, uint32_t num_pages, uint32_t num_stats_bfr) { rmm::device_uvector<statistics_merge_group> page_stats_mrg(num_stats_bfr, stream); chunks.host_to_device(stream); InitEncoderPages(chunks, pages, col_desc, num_columns, (num_stats_bfr) ? page_stats_mrg.data() : nullptr, (num_stats_bfr > num_pages) ? page_stats_mrg.data() + num_pages : nullptr, max_page_comp_data_size, stream); if (num_stats_bfr > 0) { detail::merge_group_statistics<detail::io_file_format::PARQUET>( page_stats, frag_stats, page_stats_mrg.data(), num_pages, stream); if (num_stats_bfr > num_pages) { detail::merge_group_statistics<detail::io_file_format::PARQUET>( page_stats + num_pages, page_stats, page_stats_mrg.data() + num_pages, num_stats_bfr - num_pages, stream); } } stream.synchronize(); } void snappy_compress(device_span<device_span<uint8_t const> const> comp_in, device_span<device_span<uint8_t> const> comp_out, device_span<decompress_status> comp_stats, size_t max_page_uncomp_data_size, rmm::cuda_stream_view stream) { size_t num_comp_pages = comp_in.size(); try { size_t temp_size; nvcompStatus_t nvcomp_status = nvcompBatchedSnappyCompressGetTempSize( num_comp_pages, max_page_uncomp_data_size, nvcompBatchedSnappyDefaultOpts, &temp_size); CUDF_EXPECTS(nvcomp_status == nvcompStatus_t::nvcompSuccess, "Error in getting snappy compression scratch size"); // Not needed now but nvcomp API makes no promises about future rmm::device_buffer scratch(temp_size, stream); // Analogous to comp_in.srcDevice rmm::device_uvector<void const*> uncompressed_data_ptrs(num_comp_pages, stream); // Analogous to comp_in.srcSize rmm::device_uvector<size_t> uncompressed_data_sizes(num_comp_pages, stream); // Analogous to comp_in.dstDevice rmm::device_uvector<void*> compressed_data_ptrs(num_comp_pages, stream); // Analogous to comp_stat.bytes_written rmm::device_uvector<size_t> compressed_bytes_written(num_comp_pages, stream); // nvcomp does not currently use comp_in.dstSize. Cannot assume that the output will fit in // the space allocated unless one uses the API nvcompBatchedSnappyCompressGetOutputSize() // Prepare the vectors auto comp_it = thrust::make_zip_iterator(uncompressed_data_ptrs.begin(), uncompressed_data_sizes.begin()); thrust::transform( rmm::exec_policy(stream), comp_in.begin(), comp_in.end(), comp_it, [] __device__(auto const& in) { return thrust::make_tuple(in.data(), in.size()); }); thrust::transform(rmm::exec_policy(stream), comp_out.begin(), comp_out.end(), compressed_data_ptrs.begin(), [] __device__(auto const& out) { return out.data(); }); nvcomp_status = nvcompBatchedSnappyCompressAsync(uncompressed_data_ptrs.data(), uncompressed_data_sizes.data(), max_page_uncomp_data_size, num_comp_pages, scratch.data(), // Not needed rn but future scratch.size(), compressed_data_ptrs.data(), compressed_bytes_written.data(), nvcompBatchedSnappyDefaultOpts, stream.value()); CUDF_EXPECTS(nvcomp_status == nvcompStatus_t::nvcompSuccess, "Error in snappy compression"); // nvcomp also doesn't use comp_out.status . It guarantees that given enough output space, // compression will succeed. // The other `comp_out` field is `reserved` which is for internal cuIO debugging and can be 0. thrust::transform(rmm::exec_policy(stream), compressed_bytes_written.begin(), compressed_bytes_written.end(), comp_stats.begin(), [] __device__(size_t size) { decompress_status status{}; status.bytes_written = size; return status; }); return; } catch (...) { // If we reach this then there was an error in compressing so set an error status for each page thrust::for_each(rmm::exec_policy(stream), comp_stats.begin(), comp_stats.end(), [] __device__(decompress_status & stat) { stat.status = 1; }); }; } void writer::impl::encode_pages(hostdevice_2dvector<gpu::EncColumnChunk>& chunks, device_span<gpu::EncPage> pages, size_t max_page_uncomp_data_size, uint32_t pages_in_batch, uint32_t first_page_in_batch, uint32_t rowgroups_in_batch, uint32_t first_rowgroup, const statistics_chunk* page_stats, const statistics_chunk* chunk_stats) { auto batch_pages = pages.subspan(first_page_in_batch, pages_in_batch); auto batch_pages_stats = (page_stats != nullptr) ? device_span<statistics_chunk const>(page_stats + first_page_in_batch, pages_in_batch) : device_span<statistics_chunk const>(); uint32_t max_comp_pages = (compression_ != parquet::Compression::UNCOMPRESSED) ? pages_in_batch : 0; rmm::device_uvector<device_span<uint8_t const>> comp_in(max_comp_pages, stream); rmm::device_uvector<device_span<uint8_t>> comp_out(max_comp_pages, stream); rmm::device_uvector<decompress_status> comp_stats(max_comp_pages, stream); gpu::EncodePages(batch_pages, comp_in, comp_out, comp_stats, stream); switch (compression_) { case parquet::Compression::SNAPPY: if (nvcomp_integration::is_stable_enabled()) { snappy_compress(comp_in, comp_out, comp_stats, max_page_uncomp_data_size, stream); } else { gpu_snap(comp_in, comp_out, comp_stats, stream); } break; default: break; } // TBD: Not clear if the official spec actually allows dynamically turning off compression at the // chunk-level auto d_chunks_in_batch = chunks.device_view().subspan(first_rowgroup, rowgroups_in_batch); DecideCompression(d_chunks_in_batch.flat_view(), stream); EncodePageHeaders(batch_pages, comp_stats, batch_pages_stats, chunk_stats, stream); GatherPages(d_chunks_in_batch.flat_view(), pages, stream); auto h_chunks_in_batch = chunks.host_view().subspan(first_rowgroup, rowgroups_in_batch); CUDF_CUDA_TRY(cudaMemcpyAsync(h_chunks_in_batch.data(), d_chunks_in_batch.data(), d_chunks_in_batch.flat_view().size_bytes(), cudaMemcpyDeviceToHost, stream.value())); stream.synchronize(); } writer::impl::impl(std::vector<std::unique_ptr<data_sink>> sinks, parquet_writer_options const& options, SingleWriteMode mode, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : _mr(mr), stream(stream), max_row_group_size{options.get_row_group_size_bytes()}, max_row_group_rows{options.get_row_group_size_rows()}, compression_(to_parquet_compression(options.get_compression())), stats_granularity_(options.get_stats_level()), int96_timestamps(options.is_enabled_int96_timestamps()), kv_md(options.get_key_value_metadata()), single_write_mode(mode == SingleWriteMode::YES), out_sink_(std::move(sinks)) { if (options.get_metadata()) { table_meta = std::make_unique<table_input_metadata>(*options.get_metadata()); } init_state(); } writer::impl::impl(std::vector<std::unique_ptr<data_sink>> sinks, chunked_parquet_writer_options const& options, SingleWriteMode mode, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : _mr(mr), stream(stream), max_row_group_size{options.get_row_group_size_bytes()}, max_row_group_rows{options.get_row_group_size_rows()}, compression_(to_parquet_compression(options.get_compression())), stats_granularity_(options.get_stats_level()), int96_timestamps(options.is_enabled_int96_timestamps()), kv_md(options.get_key_value_metadata()), single_write_mode(mode == SingleWriteMode::YES), out_sink_(std::move(sinks)) { if (options.get_metadata()) { table_meta = std::make_unique<table_input_metadata>(*options.get_metadata()); } init_state(); } writer::impl::~impl() { close(); } void writer::impl::init_state() { current_chunk_offset.resize(out_sink_.size()); // Write file header file_header_s fhdr; fhdr.magic = parquet_magic; for (auto& sink : out_sink_) { sink->host_write(&fhdr, sizeof(fhdr)); } std::fill_n(current_chunk_offset.begin(), current_chunk_offset.size(), sizeof(file_header_s)); } void writer::impl::write(table_view const& table, std::vector<partition_info> const& partitions) { last_write_successful = false; CUDF_EXPECTS(not closed, "Data has already been flushed to out and closed"); if (not table_meta) { table_meta = std::make_unique<table_input_metadata>(table); } // Fill unnamed columns' names in table_meta std::function<void(column_in_metadata&, std::string)> add_default_name = [&](column_in_metadata& col_meta, std::string default_name) { if (col_meta.get_name().empty()) col_meta.set_name(default_name); for (size_type i = 0; i < col_meta.num_children(); ++i) { add_default_name(col_meta.child(i), col_meta.get_name() + "_" + std::to_string(i)); } }; for (size_t i = 0; i < table_meta->column_metadata.size(); ++i) { add_default_name(table_meta->column_metadata[i], "_col" + std::to_string(i)); } auto vec = table_to_linked_columns(table); auto schema_tree = construct_schema_tree(vec, *table_meta, single_write_mode, int96_timestamps); // Construct parquet_column_views from the schema tree leaf nodes. std::vector<parquet_column_view> parquet_columns; for (schema_tree_node const& schema_node : schema_tree) { if (schema_node.leaf_column) { parquet_columns.emplace_back(schema_node, schema_tree, stream); } } // Mass allocation of column_device_views for each parquet_column_view std::vector<column_view> cudf_cols; cudf_cols.reserve(parquet_columns.size()); for (auto const& parq_col : parquet_columns) { cudf_cols.push_back(parq_col.cudf_column_view()); } table_view single_streams_table(cudf_cols); size_type num_columns = single_streams_table.num_columns(); std::vector<SchemaElement> this_table_schema(schema_tree.begin(), schema_tree.end()); if (!md) { md = std::make_unique<aggregate_writer_metadata>( partitions, num_columns, std::move(this_table_schema), stats_granularity_, kv_md); } else { // verify the user isn't passing mismatched tables CUDF_EXPECTS(md->schema_matches(this_table_schema), "Mismatch in schema between multiple calls to write_chunk"); md->update_files(partitions); } // Create table_device_view so that corresponding column_device_view data // can be written into col_desc members auto parent_column_table_device_view = table_device_view::create(single_streams_table, stream); rmm::device_uvector<column_device_view> leaf_column_views(0, stream); // Initialize column description hostdevice_vector<gpu::parquet_column_device_view> col_desc(parquet_columns.size(), stream); std::transform( parquet_columns.begin(), parquet_columns.end(), col_desc.host_ptr(), [&](auto const& pcol) { return pcol.get_device_view(stream); }); // Init page fragments // 5000 is good enough for up to ~200-character strings. Longer strings will start producing // fragments larger than the desired page size -> TODO: keep track of the max fragment size, and // iteratively reduce this value if the largest fragment exceeds the max page size limit (we // ideally want the page size to be below 1MB so as to have enough pages to get good // compression/decompression performance). using cudf::io::parquet::gpu::max_page_fragment_size; std::vector<int> num_frag_in_part; std::transform(partitions.begin(), partitions.end(), std::back_inserter(num_frag_in_part), [](auto const& part) { return util::div_rounding_up_unsafe(part.num_rows, max_page_fragment_size); }); size_type num_fragments = std::reduce(num_frag_in_part.begin(), num_frag_in_part.end()); std::vector<int> part_frag_offset; // Store the idx of the first fragment in each partition std::exclusive_scan( num_frag_in_part.begin(), num_frag_in_part.end(), std::back_inserter(part_frag_offset), 0); part_frag_offset.push_back(part_frag_offset.back() + num_frag_in_part.back()); auto d_part_frag_offset = cudf::detail::make_device_uvector_async(part_frag_offset, stream); cudf::detail::hostdevice_2dvector<gpu::PageFragment> fragments( num_columns, num_fragments, stream); if (num_fragments != 0) { // Move column info to device col_desc.host_to_device(stream); leaf_column_views = create_leaf_column_device_views<gpu::parquet_column_device_view>( col_desc, *parent_column_table_device_view, stream); init_page_fragments( fragments, col_desc, partitions, d_part_frag_offset, max_page_fragment_size); } std::vector<size_t> const global_rowgroup_base = md->num_row_groups_per_file(); // Decide row group boundaries based on uncompressed data size int num_rowgroups = 0; std::vector<int> num_rg_in_part(partitions.size()); for (size_t p = 0; p < partitions.size(); ++p) { size_type curr_rg_num_rows = 0; size_t curr_rg_data_size = 0; int first_frag_in_rg = part_frag_offset[p]; int last_frag_in_part = part_frag_offset[p + 1] - 1; for (auto f = first_frag_in_rg; f <= last_frag_in_part; ++f) { size_t fragment_data_size = 0; for (auto c = 0; c < num_columns; c++) { fragment_data_size += fragments[c][f].fragment_data_size; } size_type fragment_num_rows = fragments[0][f].num_rows; // If the fragment size gets larger than rg limit then break off a rg if (f > first_frag_in_rg && // There has to be at least one fragment in row group (curr_rg_data_size + fragment_data_size > max_row_group_size || curr_rg_num_rows + fragment_num_rows > max_row_group_rows)) { auto& rg = md->file(p).row_groups.emplace_back(); rg.num_rows = curr_rg_num_rows; num_rowgroups++; num_rg_in_part[p]++; curr_rg_num_rows = 0; curr_rg_data_size = 0; first_frag_in_rg = f; } curr_rg_num_rows += fragment_num_rows; curr_rg_data_size += fragment_data_size; // TODO: (wishful) refactor to consolidate with above if block if (f == last_frag_in_part) { auto& rg = md->file(p).row_groups.emplace_back(); rg.num_rows = curr_rg_num_rows; num_rowgroups++; num_rg_in_part[p]++; } } } // Allocate column chunks and gather fragment statistics rmm::device_uvector<statistics_chunk> frag_stats(0, stream); if (stats_granularity_ != statistics_freq::STATISTICS_NONE) { frag_stats.resize(num_fragments * num_columns, stream); if (not frag_stats.is_empty()) { auto frag_stats_2dview = device_2dspan<statistics_chunk>(frag_stats.data(), num_columns, num_fragments); gather_fragment_statistics(frag_stats_2dview, fragments, col_desc, num_fragments); } } std::vector<int> first_rg_in_part; std::exclusive_scan( num_rg_in_part.begin(), num_rg_in_part.end(), std::back_inserter(first_rg_in_part), 0); // Initialize row groups and column chunks auto const num_chunks = num_rowgroups * num_columns; hostdevice_2dvector<gpu::EncColumnChunk> chunks(num_rowgroups, num_columns, stream); for (size_t p = 0; p < partitions.size(); ++p) { int f = part_frag_offset[p]; size_type start_row = partitions[p].start_row; for (int r = 0; r < num_rg_in_part[p]; r++) { size_t global_r = global_rowgroup_base[p] + r; // Number of rowgroups already in file/part auto& row_group = md->file(p).row_groups[global_r]; uint32_t fragments_in_chunk = util::div_rounding_up_unsafe(row_group.num_rows, max_page_fragment_size); row_group.total_byte_size = 0; row_group.columns.resize(num_columns); for (int c = 0; c < num_columns; c++) { gpu::EncColumnChunk& ck = chunks[r + first_rg_in_part[p]][c]; ck = {}; ck.col_desc = col_desc.device_ptr() + c; ck.col_desc_id = c; ck.fragments = &fragments.device_view()[c][f]; ck.stats = (not frag_stats.is_empty()) ? frag_stats.data() + c * num_fragments + f : nullptr; ck.start_row = start_row; ck.num_rows = (uint32_t)row_group.num_rows; ck.first_fragment = c * num_fragments + f; auto chunk_fragments = fragments[c].subspan(f, fragments_in_chunk); // In fragment struct, add a pointer to the chunk it belongs to // In each fragment in chunk_fragments, update the chunk pointer here. for (auto& frag : chunk_fragments) { frag.chunk = &chunks.device_view()[r + first_rg_in_part[p]][c]; } ck.num_values = std::accumulate( chunk_fragments.begin(), chunk_fragments.end(), 0, [](uint32_t l, auto r) { return l + r.num_values; }); ck.plain_data_size = std::accumulate( chunk_fragments.begin(), chunk_fragments.end(), 0, [](int sum, gpu::PageFragment frag) { return sum + frag.fragment_data_size; }); auto& column_chunk_meta = row_group.columns[c].meta_data; column_chunk_meta.type = parquet_columns[c].physical_type(); column_chunk_meta.encodings = {Encoding::PLAIN, Encoding::RLE}; column_chunk_meta.path_in_schema = parquet_columns[c].get_path_in_schema(); column_chunk_meta.codec = UNCOMPRESSED; column_chunk_meta.num_values = ck.num_values; } f += fragments_in_chunk; start_row += (uint32_t)row_group.num_rows; } } fragments.host_to_device(stream); auto dict_info_owner = build_chunk_dictionaries(chunks, col_desc, fragments, stream); for (size_t p = 0; p < partitions.size(); p++) { for (int rg = 0; rg < num_rg_in_part[p]; rg++) { size_t global_rg = global_rowgroup_base[p] + rg; for (int col = 0; col < num_columns; col++) { if (chunks.host_view()[rg][col].use_dictionary) { md->file(p).row_groups[global_rg].columns[col].meta_data.encodings.push_back( Encoding::PLAIN_DICTIONARY); } } } } // Build chunk dictionaries and count pages if (num_chunks != 0) { init_page_sizes(chunks, col_desc, num_columns); } // Get the maximum page size across all chunks size_type max_page_uncomp_data_size = std::accumulate(chunks.host_view().flat_view().begin(), chunks.host_view().flat_view().end(), 0, [](uint32_t max_page_size, gpu::EncColumnChunk const& chunk) { return std::max(max_page_size, chunk.max_page_data_size); }); size_t max_page_comp_data_size = 0; if (compression_ != parquet::Compression::UNCOMPRESSED) { auto status = nvcompBatchedSnappyCompressGetMaxOutputChunkSize( max_page_uncomp_data_size, nvcompBatchedSnappyDefaultOpts, &max_page_comp_data_size); CUDF_EXPECTS(status == nvcompStatus_t::nvcompSuccess, "Error in getting compressed size from nvcomp"); } // Find which partition a rg belongs to std::vector<int> rg_to_part; for (size_t p = 0; p < num_rg_in_part.size(); ++p) { std::fill_n(std::back_inserter(rg_to_part), num_rg_in_part[p], p); } // Initialize batches of rowgroups to encode (mainly to limit peak memory usage) std::vector<size_type> batch_list; size_type num_pages = 0; size_t max_bytes_in_batch = 1024 * 1024 * 1024; // 1GB - TODO: Tune this size_t max_uncomp_bfr_size = 0; size_t max_comp_bfr_size = 0; size_t max_chunk_bfr_size = 0; size_type max_pages_in_batch = 0; size_t bytes_in_batch = 0; size_t comp_bytes_in_batch = 0; for (size_type r = 0, groups_in_batch = 0, pages_in_batch = 0; r <= num_rowgroups; r++) { size_t rowgroup_size = 0; size_t comp_rowgroup_size = 0; if (r < num_rowgroups) { for (int i = 0; i < num_columns; i++) { gpu::EncColumnChunk* ck = &chunks[r][i]; ck->first_page = num_pages; num_pages += ck->num_pages; pages_in_batch += ck->num_pages; rowgroup_size += ck->bfr_size; ck->compressed_size = ck->ck_stat_size + ck->page_headers_size + max_page_comp_data_size * ck->num_pages; comp_rowgroup_size += ck->compressed_size; max_chunk_bfr_size = std::max(max_chunk_bfr_size, (size_t)std::max(ck->bfr_size, ck->compressed_size)); } } // TBD: We may want to also shorten the batch if we have enough pages (not just based on size) if ((r == num_rowgroups) || (groups_in_batch != 0 && bytes_in_batch + rowgroup_size > max_bytes_in_batch)) { max_uncomp_bfr_size = std::max(max_uncomp_bfr_size, bytes_in_batch); max_comp_bfr_size = std::max(max_comp_bfr_size, comp_bytes_in_batch); max_pages_in_batch = std::max(max_pages_in_batch, pages_in_batch); if (groups_in_batch != 0) { batch_list.push_back(groups_in_batch); groups_in_batch = 0; } bytes_in_batch = 0; comp_bytes_in_batch = 0; pages_in_batch = 0; } bytes_in_batch += rowgroup_size; comp_bytes_in_batch += comp_rowgroup_size; groups_in_batch++; } // Clear compressed buffer size if compression has been turned off if (compression_ == parquet::Compression::UNCOMPRESSED) { max_comp_bfr_size = 0; } // Initialize data pointers in batch uint32_t num_stats_bfr = (stats_granularity_ != statistics_freq::STATISTICS_NONE) ? num_pages + num_chunks : 0; rmm::device_buffer uncomp_bfr(max_uncomp_bfr_size, stream); rmm::device_buffer comp_bfr(max_comp_bfr_size, stream); rmm::device_uvector<gpu::EncPage> pages(num_pages, stream); // This contains stats for both the pages and the rowgroups. TODO: make them separate. rmm::device_uvector<statistics_chunk> page_stats(num_stats_bfr, stream); for (auto b = 0, r = 0; b < static_cast<size_type>(batch_list.size()); b++) { auto bfr = static_cast<uint8_t*>(uncomp_bfr.data()); auto bfr_c = static_cast<uint8_t*>(comp_bfr.data()); for (auto j = 0; j < batch_list[b]; j++, r++) { for (auto i = 0; i < num_columns; i++) { gpu::EncColumnChunk& ck = chunks[r][i]; ck.uncompressed_bfr = bfr; ck.compressed_bfr = bfr_c; bfr += ck.bfr_size; bfr_c += ck.compressed_size; } } } if (num_pages != 0) { init_encoder_pages(chunks, col_desc, {pages.data(), pages.size()}, (num_stats_bfr) ? page_stats.data() : nullptr, (num_stats_bfr) ? frag_stats.data() : nullptr, max_page_comp_data_size, num_columns, num_pages, num_stats_bfr); } pinned_buffer<uint8_t> host_bfr{nullptr, cudaFreeHost}; // Encode row groups in batches for (auto b = 0, r = 0; b < static_cast<size_type>(batch_list.size()); b++) { // Count pages in this batch auto const rnext = r + batch_list[b]; auto const first_page_in_batch = chunks[r][0].first_page; auto const first_page_in_next_batch = (rnext < num_rowgroups) ? chunks[rnext][0].first_page : num_pages; auto const pages_in_batch = first_page_in_next_batch - first_page_in_batch; // device_span<gpu::EncPage> batch_pages{pages.data() + first_page_in_batch, } encode_pages( chunks, {pages.data(), pages.size()}, max_page_uncomp_data_size, pages_in_batch, first_page_in_batch, batch_list[b], r, (stats_granularity_ == statistics_freq::STATISTICS_PAGE) ? page_stats.data() : nullptr, (stats_granularity_ != statistics_freq::STATISTICS_NONE) ? page_stats.data() + num_pages : nullptr); std::vector<std::future<void>> write_tasks; for (; r < rnext; r++) { int p = rg_to_part[r]; int global_r = global_rowgroup_base[p] + r - first_rg_in_part[p]; auto& row_group = md->file(p).row_groups[global_r]; for (auto i = 0; i < num_columns; i++) { gpu::EncColumnChunk& ck = chunks[r][i]; auto& column_chunk_meta = row_group.columns[i].meta_data; uint8_t* dev_bfr; if (ck.is_compressed) { column_chunk_meta.codec = compression_; dev_bfr = ck.compressed_bfr; } else { dev_bfr = ck.uncompressed_bfr; } if (out_sink_[p]->is_device_write_preferred(ck.compressed_size)) { // let the writer do what it wants to retrieve the data from the gpu. write_tasks.push_back(out_sink_[p]->device_write_async( dev_bfr + ck.ck_stat_size, ck.compressed_size, stream)); // we still need to do a (much smaller) memcpy for the statistics. if (ck.ck_stat_size != 0) { column_chunk_meta.statistics_blob.resize(ck.ck_stat_size); CUDF_CUDA_TRY(cudaMemcpyAsync(column_chunk_meta.statistics_blob.data(), dev_bfr, ck.ck_stat_size, cudaMemcpyDeviceToHost, stream.value())); stream.synchronize(); } } else { if (!host_bfr) { host_bfr = pinned_buffer<uint8_t>{[](size_t size) { uint8_t* ptr = nullptr; CUDF_CUDA_TRY(cudaMallocHost(&ptr, size)); return ptr; }(max_chunk_bfr_size), cudaFreeHost}; } // copy the full data CUDF_CUDA_TRY(cudaMemcpyAsync(host_bfr.get(), dev_bfr, ck.ck_stat_size + ck.compressed_size, cudaMemcpyDeviceToHost, stream.value())); stream.synchronize(); out_sink_[p]->host_write(host_bfr.get() + ck.ck_stat_size, ck.compressed_size); if (ck.ck_stat_size != 0) { column_chunk_meta.statistics_blob.resize(ck.ck_stat_size); memcpy(column_chunk_meta.statistics_blob.data(), host_bfr.get(), ck.ck_stat_size); } } row_group.total_byte_size += ck.compressed_size; column_chunk_meta.data_page_offset = current_chunk_offset[p] + ((ck.use_dictionary) ? ck.dictionary_size : 0); column_chunk_meta.dictionary_page_offset = (ck.use_dictionary) ? current_chunk_offset[p] : 0; column_chunk_meta.total_uncompressed_size = ck.bfr_size; column_chunk_meta.total_compressed_size = ck.compressed_size; current_chunk_offset[p] += ck.compressed_size; } } for (auto const& task : write_tasks) { task.wait(); } } last_write_successful = true; } std::unique_ptr<std::vector<uint8_t>> writer::impl::close( std::vector<std::string> const& column_chunks_file_path) { if (closed) { return nullptr; } closed = true; if (not last_write_successful) { return nullptr; } for (size_t p = 0; p < out_sink_.size(); p++) { std::vector<uint8_t> buffer; CompactProtocolWriter cpw(&buffer); file_ender_s fendr; buffer.resize(0); fendr.footer_len = static_cast<uint32_t>(cpw.write(md->get_metadata(p))); fendr.magic = parquet_magic; out_sink_[p]->host_write(buffer.data(), buffer.size()); out_sink_[p]->host_write(&fendr, sizeof(fendr)); out_sink_[p]->flush(); } // Optionally output raw file metadata with the specified column chunk file path if (column_chunks_file_path.size() > 0) { CUDF_EXPECTS(column_chunks_file_path.size() == md->num_files(), "Expected one column chunk path per output file"); md->set_file_paths(column_chunks_file_path); file_header_s fhdr = {parquet_magic}; std::vector<uint8_t> buffer; CompactProtocolWriter cpw(&buffer); buffer.insert(buffer.end(), reinterpret_cast<const uint8_t*>(&fhdr), reinterpret_cast<const uint8_t*>(&fhdr) + sizeof(fhdr)); file_ender_s fendr; fendr.magic = parquet_magic; fendr.footer_len = static_cast<uint32_t>(cpw.write(md->get_merged_metadata())); buffer.insert(buffer.end(), reinterpret_cast<const uint8_t*>(&fendr), reinterpret_cast<const uint8_t*>(&fendr) + sizeof(fendr)); return std::make_unique<std::vector<uint8_t>>(std::move(buffer)); } else { return {nullptr}; } return nullptr; } // Forward to implementation writer::writer(std::vector<std::unique_ptr<data_sink>> sinks, parquet_writer_options const& options, SingleWriteMode mode, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : _impl(std::make_unique<impl>(std::move(sinks), options, mode, stream, mr)) { } writer::writer(std::vector<std::unique_ptr<data_sink>> sinks, chunked_parquet_writer_options const& options, SingleWriteMode mode, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) : _impl(std::make_unique<impl>(std::move(sinks), options, mode, stream, mr)) { } // Destructor within this translation unit writer::~writer() = default; // Forward to implementation void writer::write(table_view const& table, std::vector<partition_info> const& partitions) { _impl->write( table, partitions.empty() ? std::vector<partition_info>{{0, table.num_rows()}} : partitions); } // Forward to implementation std::unique_ptr<std::vector<uint8_t>> writer::close( std::vector<std::string> const& column_chunks_file_path) { return _impl->close(column_chunks_file_path); } std::unique_ptr<std::vector<uint8_t>> writer::merge_row_group_metadata( std::vector<std::unique_ptr<std::vector<uint8_t>>> const& metadata_list) { std::vector<uint8_t> output; CompactProtocolWriter cpw(&output); FileMetaData md; md.row_groups.reserve(metadata_list.size()); for (const auto& blob : metadata_list) { CompactProtocolReader cpreader( blob.get()->data(), std::max<size_t>(blob.get()->size(), sizeof(file_ender_s)) - sizeof(file_ender_s)); cpreader.skip_bytes(sizeof(file_header_s)); // Skip over file header if (md.num_rows == 0) { cpreader.read(&md); } else { FileMetaData tmp; cpreader.read(&tmp); md.row_groups.insert(md.row_groups.end(), std::make_move_iterator(tmp.row_groups.begin()), std::make_move_iterator(tmp.row_groups.end())); md.num_rows += tmp.num_rows; } } // Reader doesn't currently populate column_order, so infer it here if (md.row_groups.size() != 0) { uint32_t num_columns = static_cast<uint32_t>(md.row_groups[0].columns.size()); md.column_order_listsize = (num_columns > 0 && md.row_groups[0].columns[0].meta_data.statistics_blob.size()) ? num_columns : 0; } // Thrift-encode the resulting output file_header_s fhdr; file_ender_s fendr; fhdr.magic = parquet_magic; output.insert(output.end(), reinterpret_cast<const uint8_t*>(&fhdr), reinterpret_cast<const uint8_t*>(&fhdr) + sizeof(fhdr)); fendr.footer_len = static_cast<uint32_t>(cpw.write(md)); fendr.magic = parquet_magic; output.insert(output.end(), reinterpret_cast<const uint8_t*>(&fendr), reinterpret_cast<const uint8_t*>(&fendr) + sizeof(fendr)); return std::make_unique<std::vector<uint8_t>>(std::move(output)); } } // namespace parquet } // namespace detail } // namespace io } // namespace cudf
the_stack
#include <doctest.h> #include <cuda/api_wrappers.hpp> #include <type_traits> #include <cstdint> #include <vector> #include <algorithm> #include <cassert> // for device-side assertions struct result_of_check { bool result; kat::size_t line_number; }; namespace kernels { template <typename F> __global__ void run_simple_test( F test_function, result_of_check* __restrict results = nullptr, kat::size_t num_checks = 0 ) { assert(not(results == nullptr and num_checks != 0)); test_function(results, num_checks); } } // namespace kernels #define KAT_HD_CHECK(check_expression) \ do { \ results[check_index++] = result_of_check{ ( check_expression ) , __LINE__ }; \ } while(false); // Note: Want more arguments? Define another macro. It's not trivial to have the same // macro for the no-extra-args case without VA_OPT; it requires some macro voodoo // which I'd rather not get into // #define HD_TEST_FUNCTOR_START(identifier) \ struct identifier { \ KAT_HD void operator()( \ result_of_check* results, \ kat::size_t num_checks) \ { \ kat::size_t check_index { 0 }; \ (void) check_index; #define HD_TEST_FUNCTOR_END } \ }; // TODO: Don't pass the number of checks, have the device function return // a dynamically-allocated std-vector-like object, and carefull copy it to // the host side (first its size, then its data after corresponding allocation // on the host side). Perhaps with thrust device_vector? Or roll my own? template <typename F> auto execute_simple_testcase_on_gpu( F testcase_device_function, size_t num_checks = 0) { cuda::device_t device { cuda::device::current::get() }; auto host_side_results { std::vector<result_of_check>(num_checks) }; if (num_checks == 0) { cuda::launch( kernels::run_simple_test<F>, single_thread_launch_config(), testcase_device_function, nullptr, num_checks ); } else { auto device_side_results { cuda::memory::device::make_unique<result_of_check[]>(device, num_checks) }; cuda::memory::device::zero(device_side_results.get(), num_checks * sizeof(result_of_check)); // just to be on the safe side cuda::launch( kernels::run_simple_test<F>, single_thread_launch_config(), testcase_device_function, device_side_results.get(), num_checks ); cuda::memory::copy(host_side_results.data(), device_side_results.get(), sizeof(result_of_check) * num_checks); } device.synchronize(); // Probably unnecessary, but let's just be on the safe side return host_side_results; } void check_results( std::string test_or_testcase_name, const result_of_check* results, kat::size_t num_checks) { std::stringstream ss; // Note that it's possible for there to be _no_ results for(kat::size_t i = 0; i < num_checks; i++) { ss.str(""); ss << test_or_testcase_name << " failed check #" << (i+1) << " (1-based) at source line " << results[i].line_number; auto message = ss.str(); CHECK_MESSAGE(results[i].result, message); } } void check_results( const result_of_check* results, kat::size_t num_checks) { check_results(doctest::current_test_name(), results, num_checks); } template <typename ContiguousContainer> void check_results(const ContiguousContainer& results) { check_results(results.data(), results.size()); } namespace detail { enum everything_checks { shorts_is_empty #if __cplusplus >= 201703L , shorts_is_std_empty #endif , shorts_data_is_null , shorts_begin_equal_to_end , shorts_cbegin_equal_to_cend #if __cplusplus >= 202001L , definitely_reinterpret_casted , definitely_equivalent #endif , num_checks }; struct lwg_3225_constructibility_with_c_array { KAT_HD void operator()(result_of_check* = nullptr, kat::size_t = 0) { static_assert( std::is_constructible<kat::span<int, 1>, int(&)[1]>::value, ""); static_assert( std::is_constructible<kat::span<const int, 1>, int(&)[1]>::value, ""); static_assert( std::is_constructible<kat::span<const int, 1>, const int(&)[1]>::value, ""); static_assert( not std::is_constructible<kat::span<int, 1>, int(&)[2]>::value, ""); static_assert( not std::is_constructible<kat::span<const int, 1>, int(&)[2]>::value, ""); static_assert( not std::is_constructible<kat::span<const int, 1>, const int(&)[2]>::value, ""); static_assert( std::is_constructible<kat::span<int>, int(&)[2]>::value, ""); static_assert( std::is_constructible<kat::span<const int>, int(&)[2]>::value, ""); static_assert( std::is_constructible<kat::span<const int>, const int(&)[2]>::value, ""); } }; struct lwg_3225_constructibility_with_kat_array { KAT_HD void operator()(result_of_check* = nullptr, kat::size_t = 0) { static_assert( std::is_constructible<kat::span<const int* const>, kat::array<int*, 2>>::value, ""); static_assert( std::is_constructible<kat::span<const int>, kat::array<const int, 4>>::value, ""); static_assert( std::is_constructible<kat::span<int, 1>, kat::array<int, 1>&>::value, ""); static_assert( std::is_constructible<kat::span<const int, 1>, kat::array<int, 1>&>::value, ""); static_assert( std::is_constructible<kat::span<const int, 1>, kat::array<const int, 1>&>::value, ""); static_assert( std::is_constructible<kat::span<const int, 1>, const kat::array<int, 1>&>::value, ""); static_assert( std::is_constructible<kat::span<const int, 1>, const kat::array<const int, 1>&>::value, ""); static_assert( not std::is_constructible<kat::span<int, 1>, kat::array<int, 2>&>::value, ""); static_assert( not std::is_constructible<kat::span<const int, 1>, kat::array<int, 2>&>::value, ""); static_assert( not std::is_constructible<kat::span<const int, 1>, kat::array<const int, 2>&>::value, ""); static_assert( not std::is_constructible<kat::span<int, 1>, const kat::array<int, 2>&>::value, ""); static_assert( not std::is_constructible<kat::span<const int, 1>, const kat::array<int, 2>&>::value, ""); static_assert( not std::is_constructible<kat::span<const int, 1>, const kat::array<const int, 2>&>::value, ""); static_assert( std::is_constructible<kat::span<int>, kat::array<int, 2>&>::value, ""); static_assert( std::is_constructible<kat::span<const int>, kat::array<int, 2>&>::value, ""); static_assert( std::is_constructible<kat::span<const int>, kat::array<const int, 2>&>::value, ""); static_assert( std::is_constructible<kat::span<const int>, const kat::array<int, 2>&>::value, ""); static_assert( std::is_constructible<kat::span<const int>, const kat::array<const int, 2>&>::value, ""); static_assert( not std::is_constructible<kat::span<int, 1>, kat::array<const int, 1>&>::value, ""); static_assert( not std::is_constructible<kat::span<int, 1>, const kat::array<int, 1>&>::value, ""); static_assert( not std::is_constructible<kat::span<int, 1>, const kat::array<const int, 1>&>::value, ""); static_assert( not std::is_constructible<kat::span<int>, kat::array<const int, 2>&>::value, ""); static_assert( not std::is_constructible<kat::span<int>, const kat::array<int, 2>&>::value, ""); static_assert( not std::is_constructible<kat::span<int>, const kat::array<const int, 2>&>::value, ""); } }; struct lwg_3225_constructibility_with_std_array { KAT_HD void operator()(result_of_check* = nullptr, kat::size_t = 0) { static_assert( std::is_constructible<kat::span<const int* const>, std::array<int*, 2>>::value, ""); static_assert( std::is_constructible<kat::span<const int>, std::array<const int, 4>>::value, ""); static_assert( std::is_constructible<kat::span<int, 1>, std::array<int, 1>&>::value, ""); static_assert( std::is_constructible<kat::span<const int, 1>, std::array<int, 1>&>::value, ""); static_assert( std::is_constructible<kat::span<const int, 1>, std::array<const int, 1>&>::value, ""); static_assert( std::is_constructible<kat::span<const int, 1>, const std::array<int, 1>&>::value, ""); static_assert( std::is_constructible<kat::span<const int, 1>, const std::array<const int, 1>&>::value, ""); static_assert( not std::is_constructible<kat::span<int, 1>, std::array<int, 2>&>::value, ""); static_assert( not std::is_constructible<kat::span<const int, 1>, std::array<int, 2>&>::value, ""); static_assert( not std::is_constructible<kat::span<const int, 1>, std::array<const int, 2>&>::value, ""); static_assert( not std::is_constructible<kat::span<int, 1>, const std::array<int, 2>&>::value, ""); static_assert( not std::is_constructible<kat::span<const int, 1>, const std::array<int, 2>&>::value, ""); static_assert( not std::is_constructible<kat::span<const int, 1>, const std::array<const int, 2>&>::value, ""); static_assert( std::is_constructible<kat::span<int>, std::array<int, 2>&>::value, ""); static_assert( std::is_constructible<kat::span<const int>, std::array<int, 2>&>::value, ""); static_assert( std::is_constructible<kat::span<const int>, std::array<const int, 2>&>::value, ""); static_assert( std::is_constructible<kat::span<const int>, const std::array<int, 2>&>::value, ""); static_assert( std::is_constructible<kat::span<const int>, const std::array<const int, 2>&>::value, ""); static_assert( not std::is_constructible<kat::span<int, 1>, std::array<const int, 1>&>::value, ""); static_assert( not std::is_constructible<kat::span<int, 1>, const std::array<int, 1>&>::value, ""); static_assert( not std::is_constructible<kat::span<int, 1>, const std::array<const int, 1>&>::value, ""); static_assert( not std::is_constructible<kat::span<int>, std::array<const int, 2>&>::value, ""); static_assert( not std::is_constructible<kat::span<int>, const std::array<int, 2>&>::value, ""); static_assert( not std::is_constructible<kat::span<int>, const std::array<const int, 2>&>::value, ""); } }; namespace nothrow_construcitibility_ { template<bool DoesntThrow> struct sentinel { int* p; }; template<bool DoesntThrow> bool operator==(sentinel<DoesntThrow> s, int* p) noexcept { return s.p == p; } template<bool DoesntThrow> std::ptrdiff_t operator-(sentinel<DoesntThrow> s, int* p) noexcept(DoesntThrow) { return s.p - p; } template<bool DoesntThrow> std::ptrdiff_t operator-(int* p, sentinel<DoesntThrow> s) noexcept { return p - s.p; } } struct nothrow_constructibility { KAT_HD void operator()(result_of_check* = nullptr, kat::size_t = 0) { using kat::span; using namespace nothrow_construcitibility_; static_assert( std::is_nothrow_constructible< kat::span<int>>::value, "" ); static_assert( std::is_nothrow_constructible< kat::span<int, 0>>::value, "" ); static_assert( std::is_nothrow_constructible< kat::span<int>, span<int>&>::value, "" ); static_assert( std::is_nothrow_constructible< kat::span<const int>, span<int>&>::value, "" ); static_assert( std::is_nothrow_constructible< kat::span<int>, span<int, 1>&>::value, "" ); static_assert( std::is_nothrow_constructible< kat::span<const int>, span<int, 1>&>::value, "" ); static_assert( std::is_nothrow_constructible< kat::span<int, 1>, span<int, 1>&>::value, "" ); static_assert( std::is_nothrow_constructible< kat::span<const int, 1>, span<int, 1>&>::value, "" ); static_assert( std::is_nothrow_constructible< kat::span<int>, int(&)[1]>::value, "" ); static_assert( std::is_nothrow_constructible< kat::span<int, 1>, int(&)[1]>::value, "" ); static_assert( std::is_nothrow_constructible< kat::span<int>, std::array<int, 1>&>::value, "" ); static_assert( std::is_nothrow_constructible< kat::span<int, 1>, std::array<int, 1>&>::value, "" ); // // static_assert(std::sized_sentinel_for<sentinel<true>, int*>); // static_assert(std::sized_sentinel_for<sentinel<false>, int*>); static_assert(std::is_nothrow_constructible< kat::span<int>, int*, std::size_t>::value, ""); #if __cpplusplus >= 202001L constexpr const bool throws_exceptions = false; // These tests require a construct with a different type for the beginning iterator and the sentinel; // while they may theoretically be made available for C++ versions earlier than C++20, we'll just leave them out. // Stick to more conservative arguments for now. static_assert(std::is_nothrow_constructible< kat::span<int>, int*, const int*>::value, ""); static_assert(std::is_nothrow_constructible< kat::span<int>, int*, sentinel<not throws_exceptions>>::value, ""); static_assert(not std::is_nothrow_constructible< kat::span<int>, int*, sentinel<throws_exceptions>>::value, ""); #endif } }; struct everything { KAT_HD void operator()(result_of_check* results, kat::size_t num_checks) { auto check_index { 0 }; struct alignas(256) strawman { int x; int y; bool z; int w; }; struct naked_span { char* p; std::size_t n; }; struct strawman_span { strawman* p; std::size_t n; }; #if __cplusplus >= 202001L // In C++20, span's Extent is allowed to not take up any space if it's an empty struct - // and have the same starting address as the next field; this uses [[no_unique_address] // but ... we don't have that (unless we swap the span implementation for GSL's :-( // static_assert(sizeof(kat::span<char, 0>) <= sizeof(char*), ""); static_assert(sizeof(kat::span<const char, 0>) <= sizeof(const char*), ""); static_assert(sizeof(kat::span<strawman, 0>) <= sizeof(strawman*), ""); static_assert(sizeof(kat::span<strawman, 1>) <= sizeof(strawman*), ""); #endif static_assert(sizeof(kat::span<char>) <= sizeof(naked_span), ""); static_assert(sizeof(kat::span<strawman>) <= sizeof(strawman_span), ""); constexpr static const kat::array<int, 9> arr_data{ 0, 1, 2, 3, 4, 5, 6, 7, 8 }; constexpr auto arr_data_span = kat::span<const int, sizeof(arr_data) / sizeof(int)>(arr_data); static_assert(arr_data_span.size() == 9, ""); static_assert(arr_data_span.size_bytes() == 9 * sizeof(int), ""); static_assert(*arr_data_span.begin() == 0, ""); static_assert(*arr_data_span.data() == 0, ""); static_assert(arr_data_span.front() == 0, ""); static_assert(arr_data_span.back() == 8, ""); static_assert(arr_data_span[0] == 0, ""); static_assert(arr_data_span[1] == 1, ""); static_assert(arr_data_span[2] == 2, ""); static_assert(arr_data_span[3] == 3, ""); static_assert(arr_data_span[4] == 4, ""); static_assert(arr_data_span[5] == 5, ""); static_assert(arr_data_span[6] == 6, ""); static_assert(arr_data_span[7] == 7, ""); static_assert(arr_data_span[8] == 8, ""); static_assert(!arr_data_span.empty(), ""); static_assert(decltype(arr_data_span)::extent == 9, ""); constexpr static int data[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 }; constexpr auto data_span = kat::span<const int, sizeof(data) / sizeof(int)>(data); static_assert(data_span.size() == 9, ""); static_assert(data_span.size_bytes() == 9 * sizeof(int), ""); static_assert(*data_span.begin() == 0, ""); static_assert(*data_span.data() == 0, ""); static_assert(data_span.front() == 0, ""); static_assert(data_span.back() == 8, ""); static_assert(data_span[0] == 0, ""); static_assert(data_span[1] == 1, ""); static_assert(data_span[2] == 2, ""); static_assert(data_span[3] == 3, ""); static_assert(data_span[4] == 4, ""); static_assert(data_span[5] == 5, ""); static_assert(data_span[6] == 6, ""); static_assert(data_span[7] == 7, ""); static_assert(data_span[8] == 8, ""); static_assert(!data_span.empty(), ""); static_assert(decltype(data_span)::extent == 9, ""); constexpr auto data_span_first = data_span.first<3>(); static_assert( std::is_same<typename std::remove_cv<decltype(data_span_first)>::type, kat::span<const int, 3>>::value, ""); static_assert(decltype(data_span_first)::extent == 3, ""); static_assert(data_span_first.size() == 3, ""); static_assert(data_span_first.front() == 0, ""); static_assert(data_span_first.back() == 2, ""); static_assert(std::tuple_size<decltype(data_span_first)>::value == 3, ""); static_assert(std::is_same<std::tuple_element_t<0, decltype(data_span_first)>, const int>::value, ""); constexpr auto data_span_first_dyn = data_span.first(4); static_assert( std::is_same<typename std::remove_cv<decltype(data_span_first_dyn)>::type, kat::span<const int>>::value, ""); static_assert(decltype(data_span_first_dyn)::extent == kat::dynamic_extent, ""); static_assert(data_span_first_dyn.size() == 4, ""); static_assert(data_span_first_dyn.front() == 0, ""); static_assert(data_span_first_dyn.back() == 3, ""); constexpr auto data_span_last = data_span.last<5>(); static_assert( std::is_same<typename std::remove_cv<decltype(data_span_last)>::type, kat::span<const int, 5>>::value, ""); static_assert(decltype(data_span_last)::extent == 5, ""); static_assert(data_span_last.size() == 5, ""); static_assert(data_span_last.front() == 4, ""); static_assert(data_span_last.back() == 8, ""); static_assert(std::tuple_size<decltype(data_span_last)>::value == 5, ""); static_assert(std::is_same<std::tuple_element_t<0, decltype(data_span_last)>, const int>::value, ""); constexpr auto data_span_last_dyn = data_span.last(6); static_assert( std::is_same<typename std::remove_cv<decltype(data_span_last_dyn)>::type, kat::span<const int>>::value, ""); static_assert(decltype(data_span_last_dyn)::extent == kat::dynamic_extent, ""); static_assert(data_span_last_dyn.size() == 6, ""); static_assert(data_span_last_dyn.front() == 3, ""); static_assert(data_span_last_dyn.back() == 8, ""); constexpr auto data_span_subspan = data_span.subspan<1, 3>(); static_assert( std::is_same<typename std::remove_cv<decltype(data_span_subspan)>::type, kat::span<const int, 3>>::value, ""); static_assert(decltype(data_span_subspan)::extent == 3, ""); static_assert(data_span_subspan.size() == 3, ""); static_assert(data_span_subspan.front() == 1, ""); static_assert(data_span_subspan.back() == 3, ""); // constexpr auto data_span_subspan_offset = data_span.subspan<8>(); constexpr auto data_span_subspan_offset = data_span.subspan<8, 1>(); static_assert( std::is_same<typename std::remove_cv<decltype(data_span_subspan_offset)>::type, kat::span<const int, 1>>::value, ""); static_assert(decltype(data_span_subspan_offset)::extent == 1, ""); static_assert(data_span_subspan_offset.size() == 1, ""); static_assert(data_span_subspan_offset.front() == 8, ""); static_assert(data_span_subspan_offset.back() == 8, ""); constexpr auto data_span_subspan_empty = data_span.subspan(9, 0); static_assert( std::is_same<typename std::remove_cv<decltype(data_span_subspan_empty)>::type, kat::span<const int>>::value, ""); static_assert(decltype(data_span_subspan_empty)::extent == kat::dynamic_extent, ""); static_assert(data_span_subspan_empty.size() == 0, ""); // TODO: The following line should work, i.e. deduction should give us // the second template argument as Extent - Offset, but somehow it doesn't. // Perhaps it's because I broke a method up into two cases to avoid if constexpr; // perhaps it's because of NVCC - who knows. // // constexpr auto data_span_subspan_empty_static = data_span.subspan<9>(); // // instead, well use the following line: constexpr auto data_span_subspan_empty_static = data_span.subspan<9,0>(); static_assert(std::is_same<typename std::remove_cv<decltype(data_span_subspan_empty_static)>::type, kat::span<const int, 0>>::value, ""); // std::cout << std::hash<decltype(data_span_subspan_empty_static)>() << std::endl; static_assert(decltype(data_span_subspan_empty_static)::extent == 0, ""); static_assert(data_span_subspan_empty.size() == 0, ""); kat::span<short> shorts{}; KAT_HD_CHECK(shorts.empty()); #if __cplusplus >= 201703L results[shorts_is_std_empty] = std::empty(shorts); #else #endif KAT_HD_CHECK(shorts.data() == nullptr); KAT_HD_CHECK(shorts.begin() == shorts.end()); KAT_HD_CHECK(shorts.cbegin() == shorts.cend()); #if __cplusplus >= 202001L std::vector<std::int_least32_t> value{ 0 }; kat::span<int32_t> muh_span(value); VERIFY(muh_span.size() == 1); std::byte* original_bytes = reinterpret_cast<std::byte*>(value.data()); original_bytes[0] = static_cast<std::byte>(1); original_bytes[1] = static_cast<std::byte>(2); original_bytes[2] = static_cast<std::byte>(3); original_bytes[3] = static_cast<std::byte>(4); kat::span<const std::byte> muh_byte_span = std::as_bytes(muh_span); kat::span<std::byte> muh_mutable_byte_span = std::as_writable_bytes(muh_span); kat::span<std::byte> muh_original_byte_span(original_bytes, original_bytes + 4); bool definitely_reinterpret_casted0 = std::equal(muh_byte_span.cbegin(), muh_byte_span.cend(), muh_original_byte_span.cbegin(), muh_original_byte_span.cend()); bool definitely_reinterpret_casted1 = std::equal(muh_mutable_byte_span.cbegin(), muh_mutable_byte_span.cend(), muh_original_byte_span.cbegin(), muh_original_byte_span.cend()); KAT_HD_CHECK(definitely_reinterpret_casted0 && definitely_reinterpret_casted1); kat::span<std::byte> muh_original_byte_span_ptr_size(original_bytes, 4); KAT_HD_CHECK( std::equal(muh_original_byte_span_ptr_size.cbegin(), muh_original_byte_span_ptr_size.cend(), muh_original_byte_span.cbegin(), muh_original_byte_span.cend()) ); #endif } }; } // namespace detail template <typename T, T Value> struct value_as_type { static constexpr const T value { Value }; }; TEST_SUITE("span-host-side-libstdcxx") { // Note: // These tests are inspired and/or derived from the stdlibc++ tests // for kat::span. They are therefore subject to the same license // as the code for kat::span itself - see <kat/span.hpp> for details. // // ... however... we can't use those unit tests which test for assertion // failure, or process exit/abort - not with doctest, anyway TEST_CASE("LWG-3225-constructibility-with-C-array") { detail::lwg_3225_constructibility_with_c_array{}(nullptr, 0); } TEST_CASE("LWG-3225-constructibility-with-kat-array") { detail::lwg_3225_constructibility_with_kat_array{}(); } TEST_CASE("LWG-3225-constructibility-with-std-array") { detail::lwg_3225_constructibility_with_std_array{}(); } TEST_CASE("nothrow-construcitibility") { detail::nothrow_constructibility{}(); } TEST_CASE("everything") { result_of_check results[detail::num_checks] = {}; detail::everything{}(results, detail::num_checks); check_results(results, detail::num_checks); } } // TEST_SUITE("host-side") // The following tests are adapted from the Microsoft implementation // of the GSL - C++ core guidelines support library. They are licensed // under the MIT License (MIT): // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. using kat::span; using kat::make_span; struct AddressOverloaded { #if (__cplusplus > 201402L) [[maybe_unused]] #endif AddressOverloaded operator&() const { return {}; } }; struct constructors { KAT_HD void operator()(result_of_check* results, kat::size_t num_checks) { auto check_index { 0 }; span<int> s; KAT_HD_CHECK(s.size() == 0); KAT_HD_CHECK(s.data() == nullptr); span<const int> cs; KAT_HD_CHECK(cs.size() == 0); KAT_HD_CHECK(cs.data() == nullptr); } }; struct constructors_with_extent { KAT_HD void operator()(result_of_check* results, kat::size_t num_checks) { auto check_index { 0 }; span<int, 0> s; KAT_HD_CHECK(s.size() == 0); KAT_HD_CHECK(s.data() == nullptr); span<const int, 0> cs; KAT_HD_CHECK(cs.size() == 0); KAT_HD_CHECK(cs.data() == nullptr); } }; struct constructors_with_bracket_init { KAT_HD void operator()(result_of_check* results, kat::size_t num_checks) { auto check_index { 0 }; span<int> s {}; KAT_HD_CHECK(s.size() == 0); KAT_HD_CHECK(s.data() == nullptr); span<const int> cs {}; KAT_HD_CHECK(cs.size() == 0); KAT_HD_CHECK(cs.data() == nullptr); } }; struct from_pointer_length_constructor { KAT_HD void operator()(result_of_check* results, kat::size_t num_checks) { auto check_index { 0 }; int arr[4] = {1, 2, 3, 4}; { for (int i = 0; i < 4; ++i) { { span<int> s = {&arr[0], std::size_t {i}}; KAT_HD_CHECK(s.size() == i); KAT_HD_CHECK(s.data() == &arr[0]); KAT_HD_CHECK(s.empty() == (i == 0)); for (int j = 0; j < i; ++j) { KAT_HD_CHECK(arr[j] == s[j]); // These are supported in GSL, but not by our span // KAT_HD_CHECK(arr[j] == s.at(j)); // KAT_HD_CHECK(arr[j] == s(j)); } } } } { span<int, 2> s {&arr[0], 2}; KAT_HD_CHECK(s.size() == 2); KAT_HD_CHECK(s.data() == &arr[0]); KAT_HD_CHECK(s[0] == 1); KAT_HD_CHECK(s[1] == 2); } { auto s = kat::make_span(&arr[0], 2); KAT_HD_CHECK(s.size() == 2); KAT_HD_CHECK(s.data() == &arr[0]); KAT_HD_CHECK(s[0] == 1); KAT_HD_CHECK(s[1] == 2); } } }; struct from_pointer_pointer_construction { KAT_HD void operator()(result_of_check* results, kat::size_t num_checks) { auto check_index { 0 }; int arr[4] = {1, 2, 3, 4}; { span<int> s {&arr[0], &arr[2]}; KAT_HD_CHECK(s.size() == 2); KAT_HD_CHECK(s.data() == &arr[0]); KAT_HD_CHECK(s[0] == 1); KAT_HD_CHECK(s[1] == 2); } { span<int, 2> s {&arr[0], &arr[2]}; KAT_HD_CHECK(s.size() == 2); KAT_HD_CHECK(s.data() == &arr[0]); KAT_HD_CHECK(s[0] == 1); KAT_HD_CHECK(s[1] == 2); } { span<int> s {&arr[0], &arr[0]}; KAT_HD_CHECK(s.size() == 0); KAT_HD_CHECK(s.data() == &arr[0]); } { span<int, 0> s {&arr[0], &arr[0]}; KAT_HD_CHECK(s.size() == 0); KAT_HD_CHECK(s.data() == &arr[0]); } { int* p = nullptr; span<int> s {p, p}; KAT_HD_CHECK(s.size() == 0); KAT_HD_CHECK(s.data() == nullptr); } { int* p = nullptr; span<int, 0> s {p, p}; KAT_HD_CHECK(s.size() == 0); KAT_HD_CHECK(s.data() == nullptr); } { auto s = make_span(&arr[0], &arr[2]); KAT_HD_CHECK(s.size() == 2); KAT_HD_CHECK(s.data() == &arr[0]); KAT_HD_CHECK(s[0] == 1); KAT_HD_CHECK(s[1] == 2); } { auto s = make_span(&arr[0], &arr[0]); KAT_HD_CHECK(s.size() == 0); KAT_HD_CHECK(s.data() == &arr[0]); } { int* p = nullptr; auto s = make_span(p, p); KAT_HD_CHECK(s.size() == 0); KAT_HD_CHECK(s.data() == nullptr); } } }; struct from_array_constructor { KAT_HD void operator()(result_of_check* results, kat::size_t num_checks) { auto check_index { 0 }; int arr[5] = {1, 2, 3, 4, 5}; { const span<int> s {arr}; KAT_HD_CHECK(s.size() == 5); KAT_HD_CHECK(s.data() == &arr[0]); } { const span<int, 5> s {arr}; KAT_HD_CHECK(s.size() == 5); KAT_HD_CHECK(s.data() == &arr[0]); } int arr2d[2][3] = {1, 2, 3, 4, 5, 6}; { const span<int[3]> s {kat::addressof(arr2d[0]), 1}; KAT_HD_CHECK(s.size() == 1); KAT_HD_CHECK(s.data() == kat::addressof(arr2d[0])); } int arr3d[2][3][2] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; { const span<int[3][2]> s {kat::addressof(arr3d[0]), 1}; KAT_HD_CHECK(s.size() == 1); } { const auto s = make_span(arr); KAT_HD_CHECK(s.size() == 5); KAT_HD_CHECK(s.data() == kat::addressof(arr[0])); } { const auto s = make_span(kat::addressof(arr2d[0]), 1); KAT_HD_CHECK(s.size() == 1); KAT_HD_CHECK(s.data() == kat::addressof(arr2d[0])); } { const auto s = make_span(kat::addressof(arr3d[0]), 1); KAT_HD_CHECK(s.size() == 1); KAT_HD_CHECK(s.data() == kat::addressof(arr3d[0])); } } }; struct from_dynamic_array_constructor { KAT_HD void operator()(result_of_check* results, kat::size_t num_checks) { auto check_index { 0 }; double(*arr)[3][4] = new double[100][3][4]; { span<double> s(&arr[0][0][0], 10); KAT_HD_CHECK(s.size() == 10); KAT_HD_CHECK(s.data() == &arr[0][0][0]); } { auto s = make_span(&arr[0][0][0], 10); KAT_HD_CHECK(s.size() == 10); KAT_HD_CHECK(s.data() == &arr[0][0][0]); } delete[] arr; } }; struct from_convertible_span_constructor { KAT_HD void operator()(result_of_check*, kat::size_t) { { struct BaseClass { }; struct DerivedClass : BaseClass { }; span<DerivedClass> avd; span<const DerivedClass> avcd = avd; static_cast<void>(avcd); } } }; struct copy_move_and_assignment { KAT_HD void operator()(result_of_check* results, kat::size_t num_checks) { auto check_index { 0 }; span<int> s1; KAT_HD_CHECK(s1.empty()); int arr[] = {3, 4, 5}; span<const int> s2 = arr; KAT_HD_CHECK(s2.size() == 3); KAT_HD_CHECK(s2.data() == &arr[0]); s2 = s1; KAT_HD_CHECK(s2.empty()); auto get_temp_span = [&]() -> span<int> { return {&arr[1], 2}; }; auto use_span = [&](span<const int> s) { KAT_HD_CHECK(s.size() == 2); KAT_HD_CHECK(s.data() == &arr[1]); }; use_span(get_temp_span()); s1 = get_temp_span(); KAT_HD_CHECK(s1.size() == 2); KAT_HD_CHECK(s1.data() == &arr[1]); } }; struct first { KAT_HD void operator()(result_of_check* results, kat::size_t num_checks) { auto check_index { 0 }; int arr[5] = {1, 2, 3, 4, 5}; { span<int, 5> av = arr; KAT_HD_CHECK(av.first<2>().size() == 2); KAT_HD_CHECK(av.first(2).size() == 2); } { span<int, 5> av = arr; KAT_HD_CHECK(av.first<0>().size() == 0); KAT_HD_CHECK(av.first(0).size() == 0); } { span<int, 5> av = arr; KAT_HD_CHECK(av.first<5>().size() == 5); KAT_HD_CHECK(av.first(5).size() == 5); } { span<int, 5> av = arr; } { span<int> av; KAT_HD_CHECK(av.first<0>().size() == 0); KAT_HD_CHECK(av.first(0).size() == 0); } } }; struct last { KAT_HD void operator()(result_of_check* results, kat::size_t num_checks) { auto check_index { 0 }; int arr[5] = {1, 2, 3, 4, 5}; { span<int, 5> av = arr; KAT_HD_CHECK(av.last<2>().size() == 2); KAT_HD_CHECK(av.last(2).size() == 2); } { span<int, 5> av = arr; KAT_HD_CHECK(av.last<0>().size() == 0); KAT_HD_CHECK(av.last(0).size() == 0); } { span<int, 5> av = arr; KAT_HD_CHECK(av.last<5>().size() == 5); KAT_HD_CHECK(av.last(5).size() == 5); } { span<int, 5> av = arr; } { span<int> av; KAT_HD_CHECK(av.last<0>().size() == 0); KAT_HD_CHECK(av.last(0).size() == 0); } } }; struct subspan { KAT_HD void operator()(result_of_check* results, kat::size_t num_checks) { auto check_index { 0 }; int arr[5] = {1, 2, 3, 4, 5}; { span<int, 5> av = arr; KAT_HD_CHECK((av.subspan<2, 2>().size()) == 2); KAT_HD_CHECK(decltype(av.subspan<2, 2>())::extent == 2); KAT_HD_CHECK(av.subspan(2, 2).size() == 2); KAT_HD_CHECK(av.subspan(2, 3).size() == 3); } { span<int, 5> av = arr; KAT_HD_CHECK((av.subspan<0, 0>().size()) == 0); KAT_HD_CHECK(decltype(av.subspan<0, 0>())::extent == 0); KAT_HD_CHECK(av.subspan(0, 0).size() == 0); } { span<int, 5> av = arr; KAT_HD_CHECK((av.subspan<0, 5>().size()) == 5); KAT_HD_CHECK(decltype(av.subspan<0, 5>())::extent == 5); KAT_HD_CHECK(av.subspan(0, 5).size() == 5); } { span<int, 5> av = arr; KAT_HD_CHECK((av.subspan<4, 0>().size()) == 0); KAT_HD_CHECK(decltype(av.subspan<4, 0>())::extent == 0); KAT_HD_CHECK(av.subspan(4, 0).size() == 0); KAT_HD_CHECK(av.subspan(5, 0).size() == 0); } { span<int, 5> av = arr; // TODO: This should work without specifying the extent! // KAT_HD_CHECK(av.subspan<1>().size() == 4); // KAT_HD_CHECK(decltype(av.subspan<1>())::extent == 4); KAT_HD_CHECK((av.subspan<1,4>().size() == 4)); KAT_HD_CHECK(decltype(av.subspan<1,4>())::extent == 4); } { span<int> av; KAT_HD_CHECK((av.subspan<0, 0>().size()) == 0); KAT_HD_CHECK(decltype(av.subspan<0, 0>())::extent == 0); KAT_HD_CHECK(av.subspan(0, 0).size() == 0); } { span<int> av; KAT_HD_CHECK(av.subspan(0).size() == 0); } { span<int> av = arr; KAT_HD_CHECK(av.subspan(0).size() == 5); KAT_HD_CHECK(av.subspan(1).size() == 4); KAT_HD_CHECK(av.subspan(4).size() == 1); KAT_HD_CHECK(av.subspan(5).size() == 0); const auto av2 = av.subspan(1); for (int i = 0; i < 4; ++i) KAT_HD_CHECK(av2[i] == i + 2); } { span<int, 5> av = arr; KAT_HD_CHECK(av.subspan(0).size() == 5); KAT_HD_CHECK(av.subspan(1).size() == 4); KAT_HD_CHECK(av.subspan(4).size() == 1); KAT_HD_CHECK(av.subspan(5).size() == 0); const auto av2 = av.subspan(1); for (int i = 0; i < 4; ++i) KAT_HD_CHECK(av2[i] == i + 2); } } }; TEST_SUITE("span-host-side-gsl") { TEST_CASE("constructors") { constexpr const auto num_checks { 4 }; result_of_check results[num_checks] = {}; constructors{}(results, num_checks); } TEST_CASE("constructors_with_extent") { constexpr const auto num_checks { 4 }; result_of_check results[num_checks] = {}; constructors_with_extent{}(results, num_checks); } TEST_CASE("constructors_with_bracket_init") { constexpr const auto num_checks { 4 }; result_of_check results[num_checks] = {}; constructors_with_bracket_init{}(results, num_checks); } TEST_CASE("from_pointer_length_constructor") { constexpr const auto num_checks { 26 }; result_of_check results[num_checks] = {}; from_pointer_length_constructor{}(results, num_checks); } TEST_CASE("from_pointer_pointer_construction") { constexpr const auto num_checks { 24 }; result_of_check results[num_checks] = {}; from_pointer_pointer_construction{}(results, num_checks); } TEST_CASE("from_array_constructor") { constexpr const auto num_checks { 13 }; result_of_check results[num_checks] = {}; from_array_constructor{}(results, num_checks); } TEST_CASE("from_dynamic_array_constructor") { constexpr const auto num_checks { 4 }; result_of_check results[num_checks] = {}; from_dynamic_array_constructor{}(results, num_checks); } TEST_CASE("from_std_array_constructor") { // Not using a host-device functor for this one - as std::array is only host-side std::array<int, 4> arr = {1, 2, 3, 4}; { span<int> s {arr}; // CHECK(s.size() == narrow_cast<ptrdiff_t>(arr.size())); CHECK(s.data() == arr.data()); span<const int> cs {arr}; // CHECK(cs.size() == narrow_cast<ptrdiff_t>(arr.size())); CHECK(cs.data() == arr.data()); } { span<int, 4> s {arr}; // CHECK(s.size() == narrow_cast<ptrdiff_t>(arr.size())); CHECK(s.data() == arr.data()); span<const int, 4> cs {arr}; // CHECK(cs.size() == narrow_cast<ptrdiff_t>(arr.size())); CHECK(cs.data() == arr.data()); } { std::array<int, 0> empty_arr {}; span<int> s {empty_arr}; CHECK(s.size() == 0); CHECK(s.empty()); } // std::array<AddressOverloaded, 4> ao_arr {}; // // { // span<AddressOverloaded, 4> fs {ao_arr}; // CHECK(fs.size() == narrow_cast<ptrdiff_t>(ao_arr.size())); // CHECK(ao_arr.data() == fs.data()); // } { auto get_an_array = []() -> std::array<int, 4> {return {1, 2, 3, 4};}; auto take_a_span = [](span<const int> s) {static_cast<void>(s);}; // try to take a temporary std::array take_a_span(get_an_array()); } { auto s = make_span(arr); // CHECK(s.size() == narrow_cast<ptrdiff_t>(arr.size())); CHECK(s.data() == arr.data()); } // This test checks for the bug found in gcc 6.1, 6.2, 6.3, 6.4, 6.5 7.1, 7.2, 7.3 - issue #590 { span<int> s1 = make_span(arr); static span<int> s2; s2 = s1; CHECK(s1.size() == s2.size()); } } TEST_CASE("from_const_std_array_constructor") { // Not using a host-device functor for this one - as std::array is only host-side const std::array<int, 4> arr = {1, 2, 3, 4}; { span<const int> s {arr}; // CHECK(s.size() == narrow_cast<ptrdiff_t>(arr.size())); CHECK(s.data() == arr.data()); } { span<const int, 4> s {arr}; // CHECK(s.size() == narrow_cast<ptrdiff_t>(arr.size())); CHECK(s.data() == arr.data()); } // const std::array<AddressOverloaded, 4> ao_arr {}; // // { // span<const AddressOverloaded, 4> s {ao_arr}; // CHECK(s.size() == narrow_cast<ptrdiff_t>(ao_arr.size())); // CHECK(s.data() == ao_arr.data()); // } { auto get_an_array = []() -> const std::array<int, 4> {return {1, 2, 3, 4};}; auto take_a_span = [](span<const int> s) {static_cast<void>(s);}; // try to take a temporary std::array take_a_span(get_an_array()); } { auto s = make_span(arr); // CHECK(s.size() == narrow_cast<ptrdiff_t>(arr.size())); CHECK(s.data() == arr.data()); } } TEST_CASE("from_std_array_const_constructor") { // Not using a host-device functor for this one - as std::array is only host-side std::array<const int, 4> arr = {1, 2, 3, 4}; { span<const int> s {arr}; // CHECK(s.size() == narrow_cast<ptrdiff_t>(arr.size())); CHECK(s.data() == arr.data()); } { span<const int, 4> s {arr}; // CHECK(s.size() == narrow_cast<ptrdiff_t>(arr.size())); CHECK(s.data() == arr.data()); } { auto s = make_span(arr); // CHECK(s.size() == narrow_cast<ptrdiff_t>(arr.size())); CHECK(s.data() == arr.data()); } } // TODO: These don't work. And we don't have a Container& constructor, either... and // if we enable one (lifted from GSL), this still doesn't pass. TEST_CASE("from_container_constructor" * doctest::skip()) { // Not using a host-device functor for this one - as std::vector is only host-side std::vector<int> v = {1, 2, 3}; const std::vector<int> cv = v; // // { // span<int> s {v}; // // CHECK(s.size() == narrow_cast<std::ptrdiff_t>(v.size())); // CHECK(s.data() == v.data()); // // span<const int> cs {v}; // // CHECK(cs.size() == narrow_cast<std::ptrdiff_t>(v.size())); // CHECK(cs.data() == v.data()); // } // // std::string str = "hello"; // const std::string cstr = "hello"; // // { // span<const char> cs {str}; // // CHECK(cs.size() == narrow_cast<std::ptrdiff_t>(str.size())); // CHECK(cs.data() == str.data()); // } // // { // span<const char> cs {cstr}; // // CHECK(cs.size() == narrow_cast<std::ptrdiff_t>(cstr.size())); // CHECK(cs.data() == cstr.data()); // } // // { // auto get_temp_vector = []() -> std::vector<int> {return {};}; // auto use_span = [](span<const int> s) {static_cast<void>(s);}; // use_span(get_temp_vector()); // } // // // { // auto get_temp_string = []() -> std::string {return {};}; // auto use_span = [](span<const char> s) {static_cast<void>(s);}; // use_span(get_temp_string()); // } // // { // auto get_temp_string = []() -> const std::string {return {};}; // auto use_span = [](span<const char> s) {static_cast<void>(s);}; // use_span(get_temp_string()); // } // // // { // auto s = make_span(v); // // CHECK(s.size() == narrow_cast<std::ptrdiff_t>(v.size())); // CHECK(s.data() == v.data()); // // auto cs = make_span(cv); // // CHECK(cs.size() == narrow_cast<std::ptrdiff_t>(cv.size())); // CHECK(cs.data() == cv.data()); // } } TEST_CASE("from_convertible_span_constructor") { from_convertible_span_constructor{}(nullptr, 0); } TEST_CASE("copy_move_and_assignment") { constexpr const auto num_checks { 7 }; result_of_check results[num_checks] = {}; copy_move_and_assignment{}(results, num_checks); check_results(results, num_checks); } TEST_CASE("first") { constexpr const auto num_checks { 8 }; result_of_check results[num_checks] = {}; first{}(results, num_checks); check_results(results, num_checks); } TEST_CASE("last") { // std::set_terminate([] { // std::cerr << "Expected Death. last"; // std::abort(); // }); constexpr const auto num_checks { 8 }; result_of_check results[num_checks] = {}; last{}(results, num_checks); check_results(results, num_checks); } TEST_CASE("subspan") { constexpr const auto num_checks { 36 }; result_of_check results[num_checks] = {}; subspan{}(results, num_checks); check_results(results, num_checks); } // We don't have this size-zero optimization //TEST_CASE("size_optimization") //{ // span<int> s; // CHECK(sizeof(s) == sizeof(int*) + sizeof(ptrdiff_t)); // // span<int, 0> se; // (void) se; // CHECK(sizeof(se) == sizeof(int*)); //} } // TEST_SUITE("span-host-side-gsl") TEST_SUITE("span-device-side-from-libstdcxx") { TEST_CASE("LWG-3225-constructibility-with-C-array") { execute_simple_testcase_on_gpu(detail::lwg_3225_constructibility_with_c_array{}); } TEST_CASE("LWG-3225-constructibility-with-kat-array") { execute_simple_testcase_on_gpu(detail::lwg_3225_constructibility_with_kat_array{}); } TEST_CASE("LWG-3225-constructibility-with-std-array") { execute_simple_testcase_on_gpu(detail::lwg_3225_constructibility_with_std_array{}); } TEST_CASE("nothrow-constructibility") { execute_simple_testcase_on_gpu(detail::nothrow_constructibility{}); } TEST_CASE("everything") { auto results = execute_simple_testcase_on_gpu(detail::everything{}, detail::num_checks); check_results(results); } } // TEST_SUITE("span-device-side-from-libstdcxx") TEST_SUITE("span-device-side-from-libgsl") { TEST_CASE("from_convertible_span_constructor") { from_convertible_span_constructor{}(nullptr, 0); } TEST_CASE("copy_move_and_assignment") { constexpr const auto num_checks { 7 }; auto results = execute_simple_testcase_on_gpu(copy_move_and_assignment{}, num_checks); check_results(results); } TEST_CASE("first") { constexpr const auto num_checks { 8 }; auto results = execute_simple_testcase_on_gpu(first{}, num_checks); check_results(results); } TEST_CASE("last") { constexpr const auto num_checks { 8 }; auto results = execute_simple_testcase_on_gpu(last{}, num_checks); check_results(results); } TEST_CASE("subspan") { constexpr const auto num_checks { 36 }; auto results = execute_simple_testcase_on_gpu(subspan{}, num_checks); check_results(results); } } // TEST_SUITE("span-device-side-from-libgsl")
the_stack
#include <memory> #include <type_traits> #include <gtest/gtest.h> #include <ginkgo/core/base/array.hpp> #include <ginkgo/core/base/dim.hpp> #include <ginkgo/core/base/types.hpp> #include <ginkgo/core/matrix/dense.hpp> #include "common/unified/base/kernel_launch_reduction.hpp" #include "common/unified/base/kernel_launch_solver.hpp" #include "core/test/utils.hpp" namespace { using gko::dim; using gko::int64; using gko::size_type; using std::is_same; class KernelLaunch : public ::testing::Test { protected: KernelLaunch() : exec(gko::CudaExecutor::create(0, gko::ReferenceExecutor::create(), false, gko::allocation_mode::device)), zero_array(exec->get_master(), 16), iota_array(exec->get_master(), 16), iota_transp_array(exec->get_master(), 16), iota_dense(gko::matrix::Dense<>::create(exec, dim<2>{4, 4})), zero_dense(gko::matrix::Dense<>::create(exec, dim<2>{4, 4}, 6)), zero_dense2(gko::matrix::Dense<>::create(exec, dim<2>{4, 4}, 5)), vec_dense(gko::matrix::Dense<>::create(exec, dim<2>{1, 4})) { auto ref_iota_dense = gko::matrix::Dense<>::create(exec->get_master(), dim<2>{4, 4}); for (int i = 0; i < 16; i++) { zero_array.get_data()[i] = 0; iota_array.get_data()[i] = i; iota_transp_array.get_data()[i] = (i % 4 * 4) + i / 4; ref_iota_dense->at(i / 4, i % 4) = i; } zero_dense->fill(0.0); zero_dense2->fill(0.0); iota_dense->copy_from(ref_iota_dense.get()); zero_array.set_executor(exec); iota_array.set_executor(exec); iota_transp_array.set_executor(exec); } std::shared_ptr<gko::CudaExecutor> exec; gko::Array<int> zero_array; gko::Array<int> iota_array; gko::Array<int> iota_transp_array; std::unique_ptr<gko::matrix::Dense<>> iota_dense; std::unique_ptr<gko::matrix::Dense<>> zero_dense; std::unique_ptr<gko::matrix::Dense<>> zero_dense2; std::unique_ptr<gko::matrix::Dense<>> vec_dense; }; // nvcc doesn't like device lambdas declared in complex classes, move it out void run1d(std::shared_ptr<gko::CudaExecutor> exec, size_type dim, int* data) { gko::kernels::cuda::run_kernel( exec, [] GKO_KERNEL(auto i, auto d) { static_assert(is_same<decltype(i), int64>::value, "index"); static_assert(is_same<decltype(d), int*>::value, "type"); d[i] = i; }, dim, data); } TEST_F(KernelLaunch, Runs1D) { run1d(exec, zero_array.get_num_elems(), zero_array.get_data()); GKO_ASSERT_ARRAY_EQ(zero_array, iota_array); } void run1d(std::shared_ptr<gko::CudaExecutor> exec, gko::Array<int>& data) { gko::kernels::cuda::run_kernel( exec, [] GKO_KERNEL(auto i, auto d, auto d_ptr) { static_assert(is_same<decltype(i), int64>::value, "index"); static_assert(is_same<decltype(d), int*>::value, "type"); static_assert(is_same<decltype(d_ptr), const int*>::value, "type"); if (d == d_ptr) { d[i] = i; } else { d[i] = 0; } }, data.get_num_elems(), data, data.get_const_data()); } TEST_F(KernelLaunch, Runs1DArray) { run1d(exec, zero_array); GKO_ASSERT_ARRAY_EQ(zero_array, iota_array); } void run1d(std::shared_ptr<gko::CudaExecutor> exec, gko::matrix::Dense<>* m) { gko::kernels::cuda::run_kernel( exec, [] GKO_KERNEL(auto i, auto d, auto d2, auto d_ptr) { static_assert(is_same<decltype(i), int64>::value, "index"); static_assert(is_same<decltype(d(0, 0)), double&>::value, "type"); static_assert(is_same<decltype(d2(0, 0)), const double&>::value, "type"); static_assert(is_same<decltype(d_ptr), const double*>::value, "type"); bool pointers_correct = d.data == d_ptr && d2.data == d_ptr; bool strides_correct = d.stride == 5 && d2.stride == 5; bool accessors_2d_correct = &d(0, 0) == d_ptr && &d(1, 0) == d_ptr + d.stride && &d2(0, 0) == d_ptr && &d2(1, 0) == d_ptr + d.stride; bool accessors_1d_correct = &d[0] == d_ptr && &d2[0] == d_ptr; if (pointers_correct && strides_correct && accessors_2d_correct && accessors_1d_correct) { d(i / 4, i % 4) = i; } else { d(i / 4, i % 4) = 0; } }, 16, m, static_cast<const gko::matrix::Dense<>*>(m), m->get_const_values()); } TEST_F(KernelLaunch, Runs1DDense) { run1d(exec, zero_dense2.get()); GKO_ASSERT_MTX_NEAR(zero_dense2, iota_dense, 0.0); } void run2d(std::shared_ptr<gko::CudaExecutor> exec, int* data) { gko::kernels::cuda::run_kernel( exec, [] GKO_KERNEL(auto i, auto j, auto d) { static_assert(is_same<decltype(i), int64>::value, "index"); static_assert(is_same<decltype(j), int64>::value, "index"); static_assert(is_same<decltype(d), int*>::value, "type"); d[i + 4 * j] = 4 * i + j; }, dim<2>{4, 4}, data); } TEST_F(KernelLaunch, Runs2D) { run2d(exec, zero_array.get_data()); GKO_ASSERT_ARRAY_EQ(zero_array, iota_transp_array); } void run2d(std::shared_ptr<gko::CudaExecutor> exec, gko::Array<int>& data) { gko::kernels::cuda::run_kernel( exec, [] GKO_KERNEL(auto i, auto j, auto d, auto d_ptr) { static_assert(is_same<decltype(i), int64>::value, "index"); static_assert(is_same<decltype(j), int64>::value, "index"); static_assert(is_same<decltype(d), int*>::value, "type"); static_assert(is_same<decltype(d_ptr), const int*>::value, "type"); if (d == d_ptr) { d[i + 4 * j] = 4 * i + j; } else { d[i + 4 * j] = 0; } }, dim<2>{4, 4}, data, data.get_const_data()); } TEST_F(KernelLaunch, Runs2DArray) { run2d(exec, zero_array); GKO_ASSERT_ARRAY_EQ(zero_array, iota_transp_array); } void run2d(std::shared_ptr<gko::CudaExecutor> exec, gko::matrix::Dense<>* m1, gko::matrix::Dense<>* m2, gko::matrix::Dense<>* m3) { gko::kernels::cuda::run_kernel_solver( exec, [] GKO_KERNEL(auto i, auto j, auto d, auto d2, auto d_ptr, auto d3, auto d4, auto d2_ptr, auto d3_ptr) { static_assert(is_same<decltype(i), int64>::value, "index"); static_assert(is_same<decltype(d(0, 0)), double&>::value, "type"); static_assert(is_same<decltype(d2(0, 0)), const double&>::value, "type"); static_assert(is_same<decltype(d_ptr), const double*>::value, "type"); static_assert(is_same<decltype(d3(0, 0)), double&>::value, "type"); static_assert(is_same<decltype(d4), double*>::value, "type"); static_assert(is_same<decltype(d2_ptr), double*>::value, "type"); static_assert(is_same<decltype(d3_ptr), double*>::value, "type"); bool pointers_correct = d.data == d_ptr && d2.data == d_ptr && d3.data == d2_ptr && d4 == d3_ptr; bool strides_correct = d.stride == 5 && d2.stride == 5 && d3.stride == 6; bool accessors_2d_correct = &d(0, 0) == d_ptr && &d(1, 0) == d_ptr + d.stride && &d2(0, 0) == d_ptr && &d2(1, 0) == d_ptr + d2.stride && &d3(0, 0) == d2_ptr && &d3(1, 0) == d2_ptr + d3.stride; bool accessors_1d_correct = &d[0] == d_ptr && &d2[0] == d_ptr && &d3[0] == d2_ptr; if (pointers_correct && strides_correct && accessors_2d_correct && accessors_1d_correct) { d(i, j) = 4 * i + j; } else { d(i, j) = 0; } }, dim<2>{4, 4}, m2->get_stride(), m1, static_cast<const gko::matrix::Dense<>*>(m1), m1->get_const_values(), gko::kernels::cuda::default_stride(m2), gko::kernels::cuda::row_vector(m3), m2->get_values(), m3->get_values()); } TEST_F(KernelLaunch, Runs2DDense) { run2d(exec, zero_dense2.get(), zero_dense.get(), vec_dense.get()); GKO_ASSERT_MTX_NEAR(zero_dense2, iota_dense, 0.0); } void run1d_reduction(std::shared_ptr<gko::CudaExecutor> exec) { gko::Array<int64> output{exec, 1}; gko::kernels::cuda::run_kernel_reduction( exec, [] GKO_KERNEL(auto i, auto a) { static_assert(is_same<decltype(i), int64>::value, "index"); static_assert(is_same<decltype(a), int64*>::value, "value"); return i + 1; }, [] GKO_KERNEL(auto i, auto j) { return i + j; }, [] GKO_KERNEL(auto j) { return j * 2; }, int64{}, output.get_data(), size_type{100000}, output); // 2 * sum i=0...99999 (i+1) ASSERT_EQ(exec->copy_val_to_host(output.get_const_data()), 10000100000LL); gko::kernels::cuda::run_kernel_reduction( exec, [] GKO_KERNEL(auto i, auto a) { static_assert(is_same<decltype(i), int64>::value, "index"); static_assert(is_same<decltype(a), int64*>::value, "value"); return i + 1; }, [] GKO_KERNEL(auto i, auto j) { static_assert(is_same<decltype(i), int64>::value, "a"); static_assert(is_same<decltype(i), int64>::value, "b"); return i + j; }, [] GKO_KERNEL(auto j) { static_assert(is_same<decltype(j), int64>::value, "value"); return j * 2; }, int64{}, output.get_data(), size_type{100}, output); // 2 * sum i=0...99 (i+1) ASSERT_EQ(exec->copy_val_to_host(output.get_const_data()), 10100LL); } TEST_F(KernelLaunch, Reduction1D) { run1d_reduction(exec); } void run2d_reduction(std::shared_ptr<gko::CudaExecutor> exec) { gko::Array<int64> output{exec, 1}; gko::kernels::cuda::run_kernel_reduction( exec, [] GKO_KERNEL(auto i, auto j, auto a) { static_assert(is_same<decltype(i), int64>::value, "index"); static_assert(is_same<decltype(j), int64>::value, "index"); static_assert(is_same<decltype(a), int64*>::value, "value"); return (i + 1) * (j + 1); }, [] GKO_KERNEL(auto i, auto j) { static_assert(is_same<decltype(i), int64>::value, "a"); static_assert(is_same<decltype(i), int64>::value, "b"); return i + j; }, [] GKO_KERNEL(auto j) { static_assert(is_same<decltype(j), int64>::value, "value"); return j * 4; }, int64{}, output.get_data(), gko::dim<2>{1000, 100}, output); // 4 * sum i=0...999 sum j=0...99 of (i+1)*(j+1) ASSERT_EQ(exec->copy_val_to_host(output.get_const_data()), 10110100000LL); gko::kernels::cuda::run_kernel_reduction( exec, [] GKO_KERNEL(auto i, auto j, auto a) { static_assert(is_same<decltype(i), int64>::value, "index"); static_assert(is_same<decltype(j), int64>::value, "index"); static_assert(is_same<decltype(a), int64*>::value, "value"); return (i + 1) * (j + 1); }, [] GKO_KERNEL(auto i, auto j) { static_assert(is_same<decltype(i), int64>::value, "a"); static_assert(is_same<decltype(i), int64>::value, "b"); return i + j; }, [] GKO_KERNEL(auto j) { static_assert(is_same<decltype(j), int64>::value, "value"); return j * 4; }, int64{}, output.get_data(), gko::dim<2>{10, 10}, output); // 4 * sum i=0...9 sum j=0...9 of (i+1)*(j+1) ASSERT_EQ(exec->copy_val_to_host(output.get_const_data()), 12100LL); } TEST_F(KernelLaunch, Reduction2D) { run2d_reduction(exec); } void run2d_row_reduction(std::shared_ptr<gko::CudaExecutor> exec) { for (auto num_rows : {0, 100, 1000, 10000}) { for (auto num_cols : {0, 10, 100, 1000, 10000}) { SCOPED_TRACE(std::to_string(num_rows) + " rows, " + std::to_string(num_cols) + " cols"); gko::Array<int64> host_ref{exec->get_master(), static_cast<size_type>(2 * num_rows)}; std::fill_n(host_ref.get_data(), 2 * num_rows, 1234); gko::Array<int64> output{exec, host_ref}; for (int64 i = 0; i < num_rows; i++) { // we are computing 2 * sum {j=0, j<cols} (i+1)*(j+1) for each // row i and storing it with stride 2 host_ref.get_data()[2 * i] = static_cast<int64>(num_cols) * (num_cols + 1) * (i + 1); } gko::kernels::cuda::run_kernel_row_reduction( exec, [] GKO_KERNEL(auto i, auto j, auto a) { static_assert(is_same<decltype(i), int64>::value, "index"); static_assert(is_same<decltype(j), int64>::value, "index"); static_assert(is_same<decltype(a), int64*>::value, "value"); return (i + 1) * (j + 1); }, [] GKO_KERNEL(auto i, auto j) { static_assert(is_same<decltype(i), int64>::value, "a"); static_assert(is_same<decltype(i), int64>::value, "b"); return i + j; }, [] GKO_KERNEL(auto j) { static_assert(is_same<decltype(j), int64>::value, "value"); return j * 2; }, int64{}, output.get_data(), 2, gko::dim<2>{static_cast<size_type>(num_rows), static_cast<size_type>(num_cols)}, output); GKO_ASSERT_ARRAY_EQ(host_ref, output); } } } TEST_F(KernelLaunch, ReductionRow2D) { run2d_row_reduction(exec); } void run2d_col_reduction(std::shared_ptr<gko::CudaExecutor> exec) { // empty, most threads idle, most threads busy, multiple blocks for (auto num_rows : {0, 10, 100, 1000, 10000}) { // check different edge cases: subwarp sizes, blocked mode for (auto num_cols : {0, 1, 2, 3, 4, 5, 7, 8, 9, 16, 31, 32, 63, 127, 128, 129}) { SCOPED_TRACE(std::to_string(num_rows) + " rows, " + std::to_string(num_cols) + " cols"); gko::Array<int64> host_ref{exec->get_master(), static_cast<size_type>(num_cols)}; gko::Array<int64> output{exec, static_cast<size_type>(num_cols)}; for (int64 i = 0; i < num_cols; i++) { // we are computing 2 * sum {j=0, j<row} (i+1)*(j+1) for each // column i host_ref.get_data()[i] = static_cast<int64>(num_rows) * (num_rows + 1) * (i + 1); } gko::kernels::cuda::run_kernel_col_reduction( exec, [] GKO_KERNEL(auto i, auto j, auto a) { static_assert(is_same<decltype(i), int64>::value, "index"); static_assert(is_same<decltype(j), int64>::value, "index"); static_assert(is_same<decltype(a), int64*>::value, "value"); return (i + 1) * (j + 1); }, [] GKO_KERNEL(auto i, auto j) { static_assert(is_same<decltype(i), int64>::value, "a"); static_assert(is_same<decltype(i), int64>::value, "b"); return i + j; }, [] GKO_KERNEL(auto j) { static_assert(is_same<decltype(j), int64>::value, "value"); return j * 2; }, int64{}, output.get_data(), gko::dim<2>{static_cast<size_type>(num_rows), static_cast<size_type>(num_cols)}, output); GKO_ASSERT_ARRAY_EQ(host_ref, output); } } } TEST_F(KernelLaunch, ReductionCol2D) { run2d_col_reduction(exec); } } // namespace
the_stack
#include <cstdio> #include <utility_kernels.h> #include <convolution_kernels.h> namespace vision { texture<float, 2, cudaReadModeElementType> imageTexture; texture<float, 2, cudaReadModeElementType> floatTexture; texture<float2, 2, cudaReadModeElementType> float2Texture; // 24-bit multiplication is faster on G80, // but we must be sure to multiply integers // only within [-8M, 8M - 1] range #define IMUL(a, b) __mul24(a, b) // Maps to a single instruction on G8x / G9x / G10x #define IMAD(a, b, c) (__mul24((a), (b)) + (c)) // image resize kernel with border replication __global__ void resize_replicate_border_gpu(float *d_out, int pitch, int width, int height) { const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; if (x >= width || y >= height) return; *((float *)((char *)d_out + y *pitch) + x) = tex2D(imageTexture, x, y); } // integrated (non-separable) low-pass filtering and subsampling // width and height refer to the subsampled imageTexture __global__ void lpfSubsampleTexture(float *d_Out, int pitch, int width, int height) { const int ix = IMAD(blockDim.x, blockIdx.x, threadIdx.x); // output const int iy = IMAD(blockDim.y, blockIdx.y, threadIdx.y); // output const float x = 2.0f * (float)ix + 0.5f; // input const float y = 2.0f * (float)iy + 0.5f; // input if (ix >= width || iy >= height) return; float sum = 0.0f; sum += tex2D(imageTexture, x + 2.0f, y + 2.0f) * 0.0039062500000000f; sum += tex2D(imageTexture, x + 1.0f, y + 2.0f) * 0.0156250000000000f; sum += tex2D(imageTexture, x + 0.0f, y + 2.0f) * 0.0234375000000000f; sum += tex2D(imageTexture, x + -1.0f, y + 2.0f) * 0.0156250000000000f; sum += tex2D(imageTexture, x + -2.0f, y + 2.0f) * 0.0039062500000000f; sum += tex2D(imageTexture, x + 2.0f, y + 1.0f) * 0.0156250000000000f; sum += tex2D(imageTexture, x + 1.0f, y + 1.0f) * 0.0625000000000000f; sum += tex2D(imageTexture, x + 0.0f, y + 1.0f) * 0.0937500000000000f; sum += tex2D(imageTexture, x + -1.0f, y + 1.0f) * 0.0625000000000000f; sum += tex2D(imageTexture, x + -2.0f, y + 1.0f) * 0.0156250000000000f; sum += tex2D(imageTexture, x + 2.0f, y + 0.0f) * 0.0234375000000000f; sum += tex2D(imageTexture, x + 1.0f, y + 0.0f) * 0.0937500000000000f; sum += tex2D(imageTexture, x + 0.0f, y + 0.0f) * 0.1406250000000000f; sum += tex2D(imageTexture, x + -1.0f, y + 0.0f) * 0.0937500000000000f; sum += tex2D(imageTexture, x + -2.0f, y + 0.0f) * 0.0234375000000000f; sum += tex2D(imageTexture, x + 2.0f, y + -1.0f) * 0.0156250000000000f; sum += tex2D(imageTexture, x + 1.0f, y + -1.0f) * 0.0625000000000000f; sum += tex2D(imageTexture, x + 0.0f, y + -1.0f) * 0.0937500000000000f; sum += tex2D(imageTexture, x + -1.0f, y + -1.0f) * 0.0625000000000000f; sum += tex2D(imageTexture, x + -2.0f, y + -1.0f) * 0.0156250000000000f; sum += tex2D(imageTexture, x + 2.0f, y + -2.0f) * 0.0039062500000000f; sum += tex2D(imageTexture, x + 1.0f, y + -2.0f) * 0.0156250000000000f; sum += tex2D(imageTexture, x + 0.0f, y + -2.0f) * 0.0234375000000000f; sum += tex2D(imageTexture, x + -1.0f, y + -2.0f) * 0.0156250000000000f; sum += tex2D(imageTexture, x + -2.0f, y + -2.0f) * 0.0039062500000000f; *((float *)((char *)d_Out + iy *pitch) + ix) = sum; } // integrated (non-separable) low-pass filtering and subsampling // width and height refer to the subsampled imageTexture // this kernel deals with missing values __global__ void lpfSubsampleTextureNaN(float *d_Out, int pitch, int width, int height) { const int ix = IMAD(blockDim.x, blockIdx.x, threadIdx.x); // output const int iy = IMAD(blockDim.y, blockIdx.y, threadIdx.y); // output const float x = 2.0f * (float)ix + 0.5f; // input const float y = 2.0f * (float)iy + 0.5f; // input if (ix >= width || iy >= height) return; float sum = 0.0f, summask = 0.0f; float data; data = tex2D(imageTexture, x + 2.0f, y + 2.0f); if (isfinite(data)) { summask += 0.0039062500000000f; sum += data * 0.0039062500000000f; } data = tex2D(imageTexture, x + 1.0f, y + 2.0f); if (isfinite(data)) { summask += 0.0156250000000000f; sum += data * 0.0156250000000000f; } data = tex2D(imageTexture, x + 0.0f, y + 2.0f); if (isfinite(data)) { summask += 0.0234375000000000f; sum += data * 0.0234375000000000f; } data = tex2D(imageTexture, x + -1.0f, y + 2.0f); if (isfinite(data)) { summask += 0.0156250000000000f; sum += data * 0.0156250000000000f; } data = tex2D(imageTexture, x + -2.0f, y + 2.0f); if (isfinite(data)) { summask += 0.0039062500000000f; sum += data * 0.0039062500000000f; } data = tex2D(imageTexture, x + 2.0f, y + 1.0f); if (isfinite(data)) { summask += 0.0156250000000000f; sum += data * 0.0156250000000000f; } data = tex2D(imageTexture, x + 1.0f, y + 1.0f); if (isfinite(data)) { summask += 0.0625000000000000f; sum += data * 0.0625000000000000f; } data = tex2D(imageTexture, x + 0.0f, y + 1.0f); if (isfinite(data)) { summask += 0.0937500000000000f; sum += data * 0.0937500000000000f; } data = tex2D(imageTexture, x + -1.0f, y + 1.0f); if (isfinite(data)) { summask += 0.0625000000000000f; sum += data * 0.0625000000000000f; } data = tex2D(imageTexture, x + -2.0f, y + 1.0f); if (isfinite(data)) { summask += 0.0156250000000000f; sum += data * 0.0156250000000000f; } data = tex2D(imageTexture, x + 2.0f, y + 0.0f); if (isfinite(data)) { summask += 0.0234375000000000f; sum += data * 0.0234375000000000f; } data = tex2D(imageTexture, x + 1.0f, y + 0.0f); if (isfinite(data)) { summask += 0.0937500000000000f; sum += data * 0.0937500000000000f; } data = tex2D(imageTexture, x + 0.0f, y + 0.0f); if (isfinite(data)) { summask += 0.1406250000000000f; sum += data * 0.1406250000000000f; } data = tex2D(imageTexture, x + -1.0f, y + 0.0f); if (isfinite(data)) { summask += 0.0937500000000000f; sum += data * 0.0937500000000000f; } data = tex2D(imageTexture, x + -2.0f, y + 0.0f); if (isfinite(data)) { summask += 0.0234375000000000f; sum += data * 0.0234375000000000f; } data = tex2D(imageTexture, x + 2.0f, y + -1.0f); if (isfinite(data)) { summask += 0.0156250000000000f; sum += data * 0.0156250000000000f; } data = tex2D(imageTexture, x + 1.0f, y + -1.0f); if (isfinite(data)) { summask += 0.0625000000000000f; sum += data * 0.0625000000000000f; } data = tex2D(imageTexture, x + 0.0f, y + -1.0f); if (isfinite(data)) { summask += 0.0937500000000000f; sum += data * 0.0937500000000000f; } data = tex2D(imageTexture, x + -1.0f, y + -1.0f); if (isfinite(data)) { summask += 0.0625000000000000f; sum += data * 0.0625000000000000f; } data = tex2D(imageTexture, x + -2.0f, y + -1.0f); if (isfinite(data)) { summask += 0.0156250000000000f; sum += data * 0.0156250000000000f; } data = tex2D(imageTexture, x + 2.0f, y + -2.0f); if (isfinite(data)) { summask += 0.0039062500000000f; sum += data * 0.0039062500000000f; } data = tex2D(imageTexture, x + 1.0f, y + -2.0f); if (isfinite(data)) { summask += 0.0156250000000000f; sum += data * 0.0156250000000000f; } data = tex2D(imageTexture, x + 0.0f, y + -2.0f); if (isfinite(data)) { summask += 0.0234375000000000f; sum += data * 0.0234375000000000f; } data = tex2D(imageTexture, x + -1.0f, y + -2.0f); if (isfinite(data)) { summask += 0.0156250000000000f; sum += data * 0.0156250000000000f; } data = tex2D(imageTexture, x + -2.0f, y + -2.0f); if (isfinite(data)) { summask += 0.0039062500000000f; sum += data * 0.0039062500000000f; } *((float *)((char *)d_Out + iy *pitch) + ix) = sum / summask; } __global__ void convolutionRowTexture(float2 *d_out, int width, int height, int pitch) { const int ix = IMAD(blockDim.x, blockIdx.x, threadIdx.x); const int iy = IMAD(blockDim.y, blockIdx.y, threadIdx.y); const float x = (float)ix + 0.5f; float y = (float)iy + 0.5f; if (ix >= width || iy >= height) return; //////////////////////////////////////////////// 0_1 float pixel; float2 sum0 = make_float2(0.0f, 0.0f); pixel = tex2D(floatTexture, x + 5.0f, y); sum0.x += pixel * -0.0059423308933587f; sum0.y += pixel * -0.0854403050734955f; pixel = tex2D(floatTexture, x + 4.0f, y); sum0.x += pixel * 0.1996709851458967f; sum0.y += pixel * 0.0005340226360081f; pixel = tex2D(floatTexture, x + 3.0f, y); sum0.x += pixel * -0.0064730167337227f; sum0.y += pixel * 0.4124452895315693f; pixel = tex2D(floatTexture, x + 2.0f, y); sum0.x += pixel * -0.6814284613758747f; sum0.y += pixel * -0.0005666721140656f; pixel = tex2D(floatTexture, x + 1.0f, y); sum0.x += pixel * -0.0058271761429405f; sum0.y += pixel * -0.9093473751107518f; pixel = tex2D(floatTexture, x + 0.0f, y); sum0.x += pixel * 1.0000000000000000f; sum0.y += pixel * 0.0000000000000000f; pixel = tex2D(floatTexture, x + -1.0f, y); sum0.x += pixel * -0.0058271761429405f; sum0.y += pixel * 0.9093473751107518f; pixel = tex2D(floatTexture, x + -2.0f, y); sum0.x += pixel * -0.6814284613758747f; sum0.y += pixel * 0.0005666721140656f; pixel = tex2D(floatTexture, x + -3.0f, y); sum0.x += pixel * -0.0064730167337227f; sum0.y += pixel * -0.4124452895315693f; pixel = tex2D(floatTexture, x + -4.0f, y); sum0.x += pixel * 0.1996709851458967f; sum0.y += pixel * -0.0005340226360081f; pixel = tex2D(floatTexture, x + -5.0f, y); sum0.x += pixel * -0.0059423308933587f; sum0.y += pixel * 0.0854403050734955f; *((float2 *)((char *)d_out + iy *pitch) + ix) = sum0; // 0 even and odd //////////////////////////////////////////////// 0_2 y += (float)height; float2 pixel2; sum0.x = 0.0f; sum0.y = 0.0f; pixel2 = tex2D(float2Texture, x + 5.0f, y); sum0.x += pixel2.x * 0.0824462481622174f; sum0.y += pixel2.y * 0.0824462481622174f; pixel2 = tex2D(float2Texture, x + 4.0f, y); sum0.x += pixel2.x * 0.2046904635506605f; sum0.y += pixel2.y * 0.2046904635506605f; pixel2 = tex2D(float2Texture, x + 3.0f, y); sum0.x += pixel2.x * 0.4107230390429492f; sum0.y += pixel2.y * 0.4107230390429492f; pixel2 = tex2D(float2Texture, x + 2.0f, y); sum0.x += pixel2.x * 0.6742558727374832f; sum0.y += pixel2.y * 0.6742558727374832f; pixel2 = tex2D(float2Texture, x + 1.0f, y); sum0.x += pixel2.x * 0.9070814926070788f; sum0.y += pixel2.y * 0.9070814926070788f; pixel2 = tex2D(float2Texture, x + 0.0f, y); sum0.x += pixel2.x * 0.9998288981228244f; sum0.y += pixel2.y * 0.9998288981228244f; pixel2 = tex2D(float2Texture, x + -1.0f, y); sum0.x += pixel2.x * 0.9070814926070788f; sum0.y += pixel2.y * 0.9070814926070788f; pixel2 = tex2D(float2Texture, x + -2.0f, y); sum0.x += pixel2.x * 0.6742558727374832f; sum0.y += pixel2.y * 0.6742558727374832f; pixel2 = tex2D(float2Texture, x + -3.0f, y); sum0.x += pixel2.x * 0.4107230390429492f; sum0.y += pixel2.y * 0.4107230390429492f; pixel2 = tex2D(float2Texture, x + -4.0f, y); sum0.x += pixel2.x * 0.2046904635506605f; sum0.y += pixel2.y * 0.2046904635506605f; pixel2 = tex2D(float2Texture, x + -5.0f, y); sum0.x += pixel2.x * 0.0824462481622174f; sum0.y += pixel2.y * 0.0824462481622174f; *((float2 *)((char *)d_out + (iy + 4 *height) *pitch) + ix) = sum0; // 90 even and odd //////////////////////////////////////////////// 1 y += (float)height; sum0.x = 0.0f; sum0.y = 0.0f; float2 sum1 = make_float2(0.0f, 0.0f); pixel2 = tex2D(float2Texture, x + 5.0f, y); sum0.x += pixel2.x * 0.0448539697327717f; sum0.y += pixel2.x * 0.0563848093336092f; sum1.x += pixel2.y * 0.0563848093336092f; sum1.y += pixel2.y * 0.0448539697327717f; pixel2 = tex2D(float2Texture, x + 4.0f, y); sum0.x += pixel2.x * -0.0728676598565544f; sum0.y += pixel2.x * 0.1985363845521255f; sum1.x += pixel2.y * 0.1985363845521255f; sum1.y += pixel2.y * -0.0728676598565544f; pixel2 = tex2D(float2Texture, x + 3.0f, y); sum0.x += pixel2.x * -0.4218122479628296f; sum0.y += pixel2.x * 0.0779500055097176f; sum1.x += pixel2.y * 0.0779500055097176f; sum1.y += pixel2.y * -0.4218122479628296f; pixel2 = tex2D(float2Texture, x + 2.0f, y); sum0.x += pixel2.x * -0.4264028852345470f; sum0.y += pixel2.x * -0.5368628619030967f; sum1.x += pixel2.y * -0.5368628619030967f; sum1.y += pixel2.y * -0.4264028852345470f; pixel2 = tex2D(float2Texture, x + 1.0f, y); sum0.x += pixel2.x * 0.3845516108160854f; sum0.y += pixel2.x * -0.8133545314478231f; sum1.x += pixel2.y * -0.8133545314478231f; sum1.y += pixel2.y * 0.3845516108160854f; pixel2 = tex2D(float2Texture, x + 0.0f, y); sum0.x += pixel2.x * 0.9833544262984621f; sum0.y += pixel2.x * -0.0000000012323343f; sum1.x += pixel2.y * -0.0000000012323343f; sum1.y += pixel2.y * 0.9833544262984621f; pixel2 = tex2D(float2Texture, x + -1.0f, y); sum0.x += pixel2.x * 0.3845516108160854f; sum0.y += pixel2.x * 0.8133545314478231f; sum1.x += pixel2.y * 0.8133545314478231f; sum1.y += pixel2.y * 0.3845516108160854f; pixel2 = tex2D(float2Texture, x + -2.0f, y); sum0.x += pixel2.x * -0.4264028852345470f; sum0.y += pixel2.x * 0.5368628619030967f; sum1.x += pixel2.y * 0.5368628619030967f; sum1.y += pixel2.y * -0.4264028852345470f; pixel2 = tex2D(float2Texture, x + -3.0f, y); sum0.x += pixel2.x * -0.4218122479628296f; sum0.y += pixel2.x * -0.0779500055097176f; sum1.x += pixel2.y * -0.0779500055097176f; sum1.y += pixel2.y * -0.4218122479628296f; pixel2 = tex2D(float2Texture, x + -4.0f, y); sum0.x += pixel2.x * -0.0728676598565544f; sum0.y += pixel2.x * -0.1985363845521255f; sum1.x += pixel2.y * -0.1985363845521255f; sum1.y += pixel2.y * -0.0728676598565544f; pixel2 = tex2D(float2Texture, x + -5.0f, y); sum0.x += pixel2.x * 0.0448539697327717f; sum0.y += pixel2.x * -0.0563848093336092f; sum1.x += pixel2.y * -0.0563848093336092f; sum1.y += pixel2.y * 0.0448539697327717f; // combination stage pixel2.x = sum0.x - sum1.x; // 45 even F4YF4X-F5YF5X pixel2.y = sum1.y + sum0.y; // 45 odd F5YF4X+F4YF5X *((float2 *)((char *)d_out + (iy + 2 *height) *pitch) + ix) = pixel2; pixel2.x = sum0.x + sum1.x; // 135 even F4YF4X+F5YF5X pixel2.y = sum1.y - sum0.y; // 135 odd F5YF4X-F4YF5X *((float2 *)((char *)d_out + (iy + 6 *height) *pitch) + ix) = pixel2; //////////////////////////////////////////////// 2 y += (float)height; sum0.x = 0.0f; sum0.y = 0.0f; sum1.x = 0.0f; sum1.y = 0.0f; pixel2 = tex2D(float2Texture, x + 5.0f, y); sum0.x += pixel2.x * -0.0865021727156619f; sum0.y += pixel2.x * -0.0082494019300884f; sum1.x += pixel2.y * -0.0082494019300884f; sum1.y += pixel2.y * -0.0865021727156619f; pixel2 = tex2D(float2Texture, x + 4.0f, y); sum0.x += pixel2.x * -0.1544706682064459f; sum0.y += pixel2.x * -0.1387420567977273f; sum1.x += pixel2.y * -0.1387420567977273f; sum1.y += pixel2.y * -0.1544706682064459f; pixel2 = tex2D(float2Texture, x + 3.0f, y); sum0.x += pixel2.x * -0.0961909886276083f; sum0.y += pixel2.x * -0.4004431484945309f; sum1.x += pixel2.y * -0.4004431484945309f; sum1.y += pixel2.y * -0.0961909886276083f; pixel2 = tex2D(float2Texture, x + 2.0f, y); sum0.x += pixel2.x * 0.2425229792248418f; sum0.y += pixel2.x * -0.6316382348347102f; sum1.x += pixel2.y * -0.6316382348347102f; sum1.y += pixel2.y * 0.2425229792248418f; pixel2 = tex2D(float2Texture, x + 1.0f, y); sum0.x += pixel2.x * 0.7444812173872333f; sum0.y += pixel2.x * -0.5161793771775458f; sum1.x += pixel2.y * -0.5161793771775458f; sum1.y += pixel2.y * 0.7444812173872333f; pixel2 = tex2D(float2Texture, x + 0.0f, y); sum0.x += pixel2.x * 0.9999674491845810f; sum0.y += pixel2.x * 0.0000034368466824f; sum1.x += pixel2.y * 0.0000034368466824f; sum1.y += pixel2.y * 0.9999674491845810f; pixel2 = tex2D(float2Texture, x + -1.0f, y); sum0.x += pixel2.x * 0.7444812173872333f; sum0.y += pixel2.x * 0.5161793771775458f; sum1.x += pixel2.y * 0.5161793771775458f; sum1.y += pixel2.y * 0.7444812173872333f; pixel2 = tex2D(float2Texture, x + -2.0f, y); sum0.x += pixel2.x * 0.2425229792248418f; sum0.y += pixel2.x * 0.6316382348347102f; sum1.x += pixel2.y * 0.6316382348347102f; sum1.y += pixel2.y * 0.2425229792248418f; pixel2 = tex2D(float2Texture, x + -3.0f, y); sum0.x += pixel2.x * -0.0961909886276083f; sum0.y += pixel2.x * 0.4004431484945309f; sum1.x += pixel2.y * 0.4004431484945309f; sum1.y += pixel2.y * -0.0961909886276083f; pixel2 = tex2D(float2Texture, x + -4.0f, y); sum0.x += pixel2.x * -0.1544706682064459f; sum0.y += pixel2.x * 0.1387420567977273f; sum1.x += pixel2.y * 0.1387420567977273f; sum1.y += pixel2.y * -0.1544706682064459f; pixel2 = tex2D(float2Texture, x + -5.0f, y); sum0.x += pixel2.x * -0.0865021727156619f; sum0.y += pixel2.x * 0.0082494019300884f; sum1.x += pixel2.y * 0.0082494019300884f; sum1.y += pixel2.y * -0.0865021727156619f; // combination stage pixel2.x = sum0.x - sum1.x; // 67.5 even F6YF8X-F7YF9X pixel2.y = sum1.y + sum0.y; // 67.5 odd F7YF8X+F6YF9X *((float2 *)((char *)d_out + (iy + 3 *height) *pitch) + ix) = pixel2; pixel2.x = sum0.x + sum1.x; // 112.5 even F6YF8X+F7YF9X pixel2.y = sum1.y - sum0.y; // 112.5 odd F7YF8X-F6YF9X *((float2 *)((char *)d_out + (iy + 5 *height) *pitch) + ix) = pixel2; //////////////////////////////////////////////// 3 y += (float)height; sum0.x = 0.0f; sum0.y = 0.0f; sum1.x = 0.0f; sum1.y = 0.0f; pixel2 = tex2D(float2Texture, x + 5.0f, y); sum0.x += pixel2.x * 0.0422085900963304f; sum0.y += pixel2.x * -0.0702919919517587f; sum1.x += pixel2.y * -0.0702919919517587f; sum1.y += pixel2.y * 0.0422085900963304f; pixel2 = tex2D(float2Texture, x + 4.0f, y); sum0.x += pixel2.x * 0.1753604466059522f; sum0.y += pixel2.x * 0.0985418840494423f; sum1.x += pixel2.y * 0.0985418840494423f; sum1.y += pixel2.y * 0.1753604466059522f; pixel2 = tex2D(float2Texture, x + 3.0f, y); sum0.x += pixel2.x * -0.1499117207828439f; sum0.y += pixel2.x * 0.3900078591931931f; sum1.x += pixel2.y * 0.3900078591931931f; sum1.y += pixel2.y * -0.1499117207828439f; pixel2 = tex2D(float2Texture, x + 2.0f, y); sum0.x += pixel2.x * -0.6656505296765876f; sum0.y += pixel2.x * -0.1608071493187968f; sum1.x += pixel2.y * -0.1608071493187968f; sum1.y += pixel2.y * -0.6656505296765876f; pixel2 = tex2D(float2Texture, x + 1.0f, y); sum0.x += pixel2.x * 0.0996765973979726f; sum0.y += pixel2.x * -0.9011408273947247f; sum1.x += pixel2.y * -0.9011408273947247f; sum1.y += pixel2.y * 0.0996765973979726f; pixel2 = tex2D(float2Texture, x + 0.0f, y); sum0.x += pixel2.x * 0.9966332327183527f; sum0.y += pixel2.x * -0.0000000000000001f; sum1.x += pixel2.y * -0.0000000000000001f; sum1.y += pixel2.y * 0.9966332327183527f; pixel2 = tex2D(float2Texture, x + -1.0f, y); sum0.x += pixel2.x * 0.0996765973979726f; sum0.y += pixel2.x * 0.9011408273947247f; sum1.x += pixel2.y * 0.9011408273947247f; sum1.y += pixel2.y * 0.0996765973979726f; pixel2 = tex2D(float2Texture, x + -2.0f, y); sum0.x += pixel2.x * -0.6656505296765876f; sum0.y += pixel2.x * 0.1608071493187968f; sum1.x += pixel2.y * 0.1608071493187968f; sum1.y += pixel2.y * -0.6656505296765876f; pixel2 = tex2D(float2Texture, x + -3.0f, y); sum0.x += pixel2.x * -0.1499117207828439f; sum0.y += pixel2.x * -0.3900078591931931f; sum1.x += pixel2.y * -0.3900078591931931f; sum1.y += pixel2.y * -0.1499117207828439f; pixel2 = tex2D(float2Texture, x + -4.0f, y); sum0.x += pixel2.x * 0.1753604466059522f; sum0.y += pixel2.x * -0.0985418840494423f; sum1.x += pixel2.y * -0.0985418840494423f; sum1.y += pixel2.y * 0.1753604466059522f; pixel2 = tex2D(float2Texture, x + -5.0f, y); sum0.x += pixel2.x * 0.0422085900963304f; sum0.y += pixel2.x * 0.0702919919517587f; sum1.x += pixel2.y * 0.0702919919517587f; sum1.y += pixel2.y * 0.0422085900963304f; // combination stage pixel2.x = sum0.x - sum1.x; // 22.5 even F8YF6X-F9YF7X pixel2.y = sum1.y + sum0.y; // 22.5 odd F9YF6X+F8YF7X *((float2 *)((char *)d_out + (iy + height) *pitch) + ix) = pixel2; pixel2.x = sum0.x + sum1.x; // 157.5 even F8YF6X+F9YF7X pixel2.y = sum1.y - sum0.y; // 157.5 odd F9YF6X-F8YF7X *((float2 *)((char *)d_out + (iy + 7 *height) *pitch) + ix) = pixel2; } __global__ void convolutionColumnTexture(float *d_out, int width, int height, int pitch) { const int ix = IMAD(blockDim.x, blockIdx.x, threadIdx.x); const int iy = IMAD(blockDim.y, blockIdx.y, threadIdx.y); const float x = (float)ix + 0.5f; const float y = (float)iy + 0.5f; if (ix >= width || iy >= height) return; float pixel; float sum01 = 0.0f; float2 sum02 = make_float2(0.0f, 0.0f); float2 sum1 = make_float2(0.0f, 0.0f); float2 sum2 = make_float2(0.0f, 0.0f); float2 sum3 = make_float2(0.0f, 0.0f); pixel = tex2D(imageTexture, x, y + 5.0f); sum01 += pixel * 0.0824462481622174f; sum02.x += pixel * -0.0059423308933587f; sum02.y += pixel * -0.0854403050734955f; sum1.x += pixel * 0.0448539697327717f; sum1.y += pixel * 0.0563848093336092f; sum2.x += pixel * 0.0422085900963304f; sum2.y += pixel * -0.0702919919517587f; sum3.x += pixel * -0.0865021727156619f; sum3.y += pixel * -0.0082494019300884f; pixel = tex2D(imageTexture, x, y + 4.0f); sum01 += pixel * 0.2046904635506605f; sum02.x += pixel * 0.1996709851458967f; sum02.y += pixel * 0.0005340226360081f; sum1.x += pixel * -0.0728676598565544f; sum1.y += pixel * 0.1985363845521255f; sum2.x += pixel * 0.1753604466059522f; sum2.y += pixel * 0.0985418840494423f; sum3.x += pixel * -0.1544706682064459f; sum3.y += pixel * -0.1387420567977273f; pixel = tex2D(imageTexture, x, y + 3.0f); sum01 += pixel * 0.4107230390429492f; sum02.x += pixel * -0.0064730167337227f; sum02.y += pixel * 0.4124452895315693f; sum1.x += pixel * -0.4218122479628296f; sum1.y += pixel * 0.0779500055097176f; sum2.x += pixel * -0.1499117207828439f; sum2.y += pixel * 0.3900078591931931f; sum3.x += pixel * -0.0961909886276083f; sum3.y += pixel * -0.4004431484945309f; pixel = tex2D(imageTexture, x, y + 2.0f); sum01 += pixel * 0.6742558727374832f; sum02.x += pixel * -0.6814284613758747f; sum02.y += pixel * -0.0005666721140656f; sum1.x += pixel * -0.4264028852345470f; sum1.y += pixel * -0.5368628619030967f; sum2.x += pixel * -0.6656505296765876f; sum2.y += pixel * -0.1608071493187968f; sum3.x += pixel * 0.2425229792248418f; sum3.y += pixel * -0.6316382348347102f; pixel = tex2D(imageTexture, x, y + 1.0f); sum01 += pixel * 0.9070814926070788f; sum02.x += pixel * -0.0058271761429405f; sum02.y += pixel * -0.9093473751107518f; sum1.x += pixel * 0.3845516108160854f; sum1.y += pixel * -0.8133545314478231f; sum2.x += pixel * 0.0996765973979726f; sum2.y += pixel * -0.9011408273947247f; sum3.x += pixel * 0.7444812173872333f; sum3.y += pixel * -0.5161793771775458f; pixel = tex2D(imageTexture, x, y + 0.0f); sum01 += pixel * 0.9998288981228244f; sum02.x += pixel * 1.0000000000000000f; sum02.y += pixel * 0.0000000000000000f; sum1.x += pixel * 0.9833544262984621f; sum1.y += pixel * -0.0000000012323343f; sum2.x += pixel * 0.9966332327183527f; sum2.y += pixel * -0.0000000000000001f; sum3.x += pixel * 0.9999674491845810f; sum3.y += pixel * 0.0000034368466824f; pixel = tex2D(imageTexture, x, y + -1.0f); sum01 += pixel * 0.9070814926070788f; sum02.x += pixel * -0.0058271761429405f; sum02.y += pixel * 0.9093473751107518f; sum1.x += pixel * 0.3845516108160854f; sum1.y += pixel * 0.8133545314478231f; sum2.x += pixel * 0.0996765973979726f; sum2.y += pixel * 0.9011408273947247f; sum3.x += pixel * 0.7444812173872333f; sum3.y += pixel * 0.5161793771775458f; pixel = tex2D(imageTexture, x, y + -2.0f); sum01 += pixel * 0.6742558727374832f; sum02.x += pixel * -0.6814284613758747f; sum02.y += pixel * 0.0005666721140656f; sum1.x += pixel * -0.4264028852345470f; sum1.y += pixel * 0.5368628619030967f; sum2.x += pixel * -0.6656505296765876f; sum2.y += pixel * 0.1608071493187968f; sum3.x += pixel * 0.2425229792248418f; sum3.y += pixel * 0.6316382348347102f; pixel = tex2D(imageTexture, x, y + -3.0f); sum01 += pixel * 0.4107230390429492f; sum02.x += pixel * -0.0064730167337227f; sum02.y += pixel * -0.4124452895315693f; sum1.x += pixel * -0.4218122479628296f; sum1.y += pixel * -0.0779500055097176f; sum2.x += pixel * -0.1499117207828439f; sum2.y += pixel * -0.3900078591931931f; sum3.x += pixel * -0.0961909886276083f; sum3.y += pixel * 0.4004431484945309f; pixel = tex2D(imageTexture, x, y + -4.0f); sum01 += pixel * 0.2046904635506605f; sum02.x += pixel * 0.1996709851458967f; sum02.y += pixel * -0.0005340226360081f; sum1.x += pixel * -0.0728676598565544f; sum1.y += pixel * -0.1985363845521255f; sum2.x += pixel * 0.1753604466059522f; sum2.y += pixel * -0.0985418840494423f; sum3.x += pixel * -0.1544706682064459f; sum3.y += pixel * 0.1387420567977273f; pixel = tex2D(imageTexture, x, y + -5.0f); sum01 += pixel * 0.0824462481622174f; sum02.x += pixel * -0.0059423308933587f; sum02.y += pixel * 0.0854403050734955f; sum1.x += pixel * 0.0448539697327717f; sum1.y += pixel * -0.0563848093336092f; sum2.x += pixel * 0.0422085900963304f; sum2.y += pixel * 0.0702919919517587f; sum3.x += pixel * -0.0865021727156619f; sum3.y += pixel * 0.0082494019300884f; *((float *)((char *)d_out + iy *pitch) + ix) = sum01; *((float2 *)((char *)d_out + (iy + height) *pitch) + ix) = sum02; *((float2 *)((char *)d_out + (iy + 2 *height) *pitch) + ix) = sum1; *((float2 *)((char *)d_out + (iy + 3 *height) *pitch) + ix) = sum2; *((float2 *)((char *)d_out + (iy + 4 *height) *pitch) + ix) = sum3; } __global__ void convolutionRowTexture4(float2 *d_out, int width, int height, int pitch) { const int ix = IMAD(blockDim.x, blockIdx.x, threadIdx.x); const int iy = IMAD(blockDim.y, blockIdx.y, threadIdx.y); const float x = (float)ix + 0.5f; float y = (float)iy + 0.5f; if (ix >= width || iy >= height) return; //////////////////////////////////////////////// 0_1 float pixel; float2 sum0 = make_float2(0.0f, 0.0f); pixel = tex2D(floatTexture, x + 5.0f, y); sum0.x += pixel * -0.0059423308933587f; sum0.y += pixel * -0.0854403050734955f; pixel = tex2D(floatTexture, x + 4.0f, y); sum0.x += pixel * 0.1996709851458967f; sum0.y += pixel * 0.0005340226360081f; pixel = tex2D(floatTexture, x + 3.0f, y); sum0.x += pixel * -0.0064730167337227f; sum0.y += pixel * 0.4124452895315693f; pixel = tex2D(floatTexture, x + 2.0f, y); sum0.x += pixel * -0.6814284613758747f; sum0.y += pixel * -0.0005666721140656f; pixel = tex2D(floatTexture, x + 1.0f, y); sum0.x += pixel * -0.0058271761429405f; sum0.y += pixel * -0.9093473751107518f; pixel = tex2D(floatTexture, x + 0.0f, y); sum0.x += pixel * 1.0000000000000000f; sum0.y += pixel * 0.0000000000000000f; pixel = tex2D(floatTexture, x + -1.0f, y); sum0.x += pixel * -0.0058271761429405f; sum0.y += pixel * 0.9093473751107518f; pixel = tex2D(floatTexture, x + -2.0f, y); sum0.x += pixel * -0.6814284613758747f; sum0.y += pixel * 0.0005666721140656f; pixel = tex2D(floatTexture, x + -3.0f, y); sum0.x += pixel * -0.0064730167337227f; sum0.y += pixel * -0.4124452895315693f; pixel = tex2D(floatTexture, x + -4.0f, y); sum0.x += pixel * 0.1996709851458967f; sum0.y += pixel * -0.0005340226360081f; pixel = tex2D(floatTexture, x + -5.0f, y); sum0.x += pixel * -0.0059423308933587f; sum0.y += pixel * 0.0854403050734955f; *((float2 *)((char *)d_out + iy *pitch) + ix) = sum0; // 0 even and odd //////////////////////////////////////////////// 0_2 y += (float)height; float2 pixel2; sum0.x = 0.0f; sum0.y = 0.0f; pixel2 = tex2D(float2Texture, x + 5.0f, y); sum0.x += pixel2.x * 0.0824462481622174f; sum0.y += pixel2.y * 0.0824462481622174f; pixel2 = tex2D(float2Texture, x + 4.0f, y); sum0.x += pixel2.x * 0.2046904635506605f; sum0.y += pixel2.y * 0.2046904635506605f; pixel2 = tex2D(float2Texture, x + 3.0f, y); sum0.x += pixel2.x * 0.4107230390429492f; sum0.y += pixel2.y * 0.4107230390429492f; pixel2 = tex2D(float2Texture, x + 2.0f, y); sum0.x += pixel2.x * 0.6742558727374832f; sum0.y += pixel2.y * 0.6742558727374832f; pixel2 = tex2D(float2Texture, x + 1.0f, y); sum0.x += pixel2.x * 0.9070814926070788f; sum0.y += pixel2.y * 0.9070814926070788f; pixel2 = tex2D(float2Texture, x + 0.0f, y); sum0.x += pixel2.x * 0.9998288981228244f; sum0.y += pixel2.y * 0.9998288981228244f; pixel2 = tex2D(float2Texture, x + -1.0f, y); sum0.x += pixel2.x * 0.9070814926070788f; sum0.y += pixel2.y * 0.9070814926070788f; pixel2 = tex2D(float2Texture, x + -2.0f, y); sum0.x += pixel2.x * 0.6742558727374832f; sum0.y += pixel2.y * 0.6742558727374832f; pixel2 = tex2D(float2Texture, x + -3.0f, y); sum0.x += pixel2.x * 0.4107230390429492f; sum0.y += pixel2.y * 0.4107230390429492f; pixel2 = tex2D(float2Texture, x + -4.0f, y); sum0.x += pixel2.x * 0.2046904635506605f; sum0.y += pixel2.y * 0.2046904635506605f; pixel2 = tex2D(float2Texture, x + -5.0f, y); sum0.x += pixel2.x * 0.0824462481622174f; sum0.y += pixel2.y * 0.0824462481622174f; *((float2 *)((char *)d_out + (iy + 2 *height) *pitch) + ix) = sum0; // 90 even and odd //////////////////////////////////////////////// 1 y += (float)height; sum0.x = 0.0f; sum0.y = 0.0f; float2 sum1 = make_float2(0.0f, 0.0f); pixel2 = tex2D(float2Texture, x + 5.0f, y); sum0.x += pixel2.x * 0.0448539697327717f; sum0.y += pixel2.x * 0.0563848093336092f; sum1.x += pixel2.y * 0.0563848093336092f; sum1.y += pixel2.y * 0.0448539697327717f; pixel2 = tex2D(float2Texture, x + 4.0f, y); sum0.x += pixel2.x * -0.0728676598565544f; sum0.y += pixel2.x * 0.1985363845521255f; sum1.x += pixel2.y * 0.1985363845521255f; sum1.y += pixel2.y * -0.0728676598565544f; pixel2 = tex2D(float2Texture, x + 3.0f, y); sum0.x += pixel2.x * -0.4218122479628296f; sum0.y += pixel2.x * 0.0779500055097176f; sum1.x += pixel2.y * 0.0779500055097176f; sum1.y += pixel2.y * -0.4218122479628296f; pixel2 = tex2D(float2Texture, x + 2.0f, y); sum0.x += pixel2.x * -0.4264028852345470f; sum0.y += pixel2.x * -0.5368628619030967f; sum1.x += pixel2.y * -0.5368628619030967f; sum1.y += pixel2.y * -0.4264028852345470f; pixel2 = tex2D(float2Texture, x + 1.0f, y); sum0.x += pixel2.x * 0.3845516108160854f; sum0.y += pixel2.x * -0.8133545314478231f; sum1.x += pixel2.y * -0.8133545314478231f; sum1.y += pixel2.y * 0.3845516108160854f; pixel2 = tex2D(float2Texture, x + 0.0f, y); sum0.x += pixel2.x * 0.9833544262984621f; sum0.y += pixel2.x * -0.0000000012323343f; sum1.x += pixel2.y * -0.0000000012323343f; sum1.y += pixel2.y * 0.9833544262984621f; pixel2 = tex2D(float2Texture, x + -1.0f, y); sum0.x += pixel2.x * 0.3845516108160854f; sum0.y += pixel2.x * 0.8133545314478231f; sum1.x += pixel2.y * 0.8133545314478231f; sum1.y += pixel2.y * 0.3845516108160854f; pixel2 = tex2D(float2Texture, x + -2.0f, y); sum0.x += pixel2.x * -0.4264028852345470f; sum0.y += pixel2.x * 0.5368628619030967f; sum1.x += pixel2.y * 0.5368628619030967f; sum1.y += pixel2.y * -0.4264028852345470f; pixel2 = tex2D(float2Texture, x + -3.0f, y); sum0.x += pixel2.x * -0.4218122479628296f; sum0.y += pixel2.x * -0.0779500055097176f; sum1.x += pixel2.y * -0.0779500055097176f; sum1.y += pixel2.y * -0.4218122479628296f; pixel2 = tex2D(float2Texture, x + -4.0f, y); sum0.x += pixel2.x * -0.0728676598565544f; sum0.y += pixel2.x * -0.1985363845521255f; sum1.x += pixel2.y * -0.1985363845521255f; sum1.y += pixel2.y * -0.0728676598565544f; pixel2 = tex2D(float2Texture, x + -5.0f, y); sum0.x += pixel2.x * 0.0448539697327717f; sum0.y += pixel2.x * -0.0563848093336092f; sum1.x += pixel2.y * -0.0563848093336092f; sum1.y += pixel2.y * 0.0448539697327717f; // combination stage pixel2.x = sum0.x - sum1.x; // 45 even F4YF4X-F5YF5X pixel2.y = sum1.y + sum0.y; // 45 odd F5YF4X+F4YF5X *((float2 *)((char *)d_out + (iy + 1 *height) *pitch) + ix) = pixel2; pixel2.x = sum0.x + sum1.x; // 135 even F4YF4X+F5YF5X pixel2.y = sum1.y - sum0.y; // 135 odd F5YF4X-F4YF5X *((float2 *)((char *)d_out + (iy + 3 *height) *pitch) + ix) = pixel2; } __global__ void convolutionColumnTexture4(float *d_out, int width, int height, int pitch) { const int ix = IMAD(blockDim.x, blockIdx.x, threadIdx.x); const int iy = IMAD(blockDim.y, blockIdx.y, threadIdx.y); const float x = (float)ix + 0.5f; const float y = (float)iy + 0.5f; if (ix >= width || iy >= height) return; float pixel; float sum01 = 0.0f; float2 sum02 = make_float2(0.0f, 0.0f); float2 sum1 = make_float2(0.0f, 0.0f); pixel = tex2D(imageTexture, x, y + 5.0f); sum01 += pixel * 0.0824462481622174f; sum02.x += pixel * -0.0059423308933587f; sum02.y += pixel * -0.0854403050734955f; sum1.x += pixel * 0.0448539697327717f; sum1.y += pixel * 0.0563848093336092f; pixel = tex2D(imageTexture, x, y + 4.0f); sum01 += pixel * 0.2046904635506605f; sum02.x += pixel * 0.1996709851458967f; sum02.y += pixel * 0.0005340226360081f; sum1.x += pixel * -0.0728676598565544f; sum1.y += pixel * 0.1985363845521255f; pixel = tex2D(imageTexture, x, y + 3.0f); sum01 += pixel * 0.4107230390429492f; sum02.x += pixel * -0.0064730167337227f; sum02.y += pixel * 0.4124452895315693f; sum1.x += pixel * -0.4218122479628296f; sum1.y += pixel * 0.0779500055097176f; pixel = tex2D(imageTexture, x, y + 2.0f); sum01 += pixel * 0.6742558727374832f; sum02.x += pixel * -0.6814284613758747f; sum02.y += pixel * -0.0005666721140656f; sum1.x += pixel * -0.4264028852345470f; sum1.y += pixel * -0.5368628619030967f; pixel = tex2D(imageTexture, x, y + 1.0f); sum01 += pixel * 0.9070814926070788f; sum02.x += pixel * -0.0058271761429405f; sum02.y += pixel * -0.9093473751107518f; sum1.x += pixel * 0.3845516108160854f; sum1.y += pixel * -0.8133545314478231f; pixel = tex2D(imageTexture, x, y + 0.0f); sum01 += pixel * 0.9998288981228244f; sum02.x += pixel * 1.0000000000000000f; sum02.y += pixel * 0.0000000000000000f; sum1.x += pixel * 0.9833544262984621f; sum1.y += pixel * -0.0000000012323343f; pixel = tex2D(imageTexture, x, y + -1.0f); sum01 += pixel * 0.9070814926070788f; sum02.x += pixel * -0.0058271761429405f; sum02.y += pixel * 0.9093473751107518f; sum1.x += pixel * 0.3845516108160854f; sum1.y += pixel * 0.8133545314478231f; pixel = tex2D(imageTexture, x, y + -2.0f); sum01 += pixel * 0.6742558727374832f; sum02.x += pixel * -0.6814284613758747f; sum02.y += pixel * 0.0005666721140656f; sum1.x += pixel * -0.4264028852345470f; sum1.y += pixel * 0.5368628619030967f; pixel = tex2D(imageTexture, x, y + -3.0f); sum01 += pixel * 0.4107230390429492f; sum02.x += pixel * -0.0064730167337227f; sum02.y += pixel * -0.4124452895315693f; sum1.x += pixel * -0.4218122479628296f; sum1.y += pixel * -0.0779500055097176f; pixel = tex2D(imageTexture, x, y + -4.0f); sum01 += pixel * 0.2046904635506605f; sum02.x += pixel * 0.1996709851458967f; sum02.y += pixel * -0.0005340226360081f; sum1.x += pixel * -0.0728676598565544f; sum1.y += pixel * -0.1985363845521255f; pixel = tex2D(imageTexture, x, y + -5.0f); sum01 += pixel * 0.0824462481622174f; sum02.x += pixel * -0.0059423308933587f; sum02.y += pixel * 0.0854403050734955f; sum1.x += pixel * 0.0448539697327717f; sum1.y += pixel * -0.0563848093336092f; *((float *)((char *)d_out + iy *pitch) + ix) = sum01; *((float2 *)((char *)d_out + (iy + height) *pitch) + ix) = sum02; *((float2 *)((char *)d_out + (iy + 2 *height) *pitch) + ix) = sum1; } ////////////////////////////////////////////////////////////// // Calling functions /////////////////////////////////////////////////////////////// void resize_replicate_border(const float *d_PixelsIn, int d_PixelsInPitch, float *d_PixelsOut, int d_PixelsOutPitch, int width_in, int height_in, int width_out, int height_out) { dim3 threads(16, 8); dim3 blocks(iDivUp(width_out, threads.x), iDivUp(height_out, threads.y)); imageTexture.normalized = 0; imageTexture.filterMode = cudaFilterModePoint; imageTexture.addressMode[0] = cudaAddressModeClamp; imageTexture.addressMode[1] = cudaAddressModeClamp; cudaChannelFormatDesc channelFloat = cudaCreateChannelDesc<float>(); cudaBindTexture2D(0, &imageTexture, d_PixelsIn, &channelFloat, width_in, height_in, d_PixelsInPitch); resize_replicate_border_gpu << <blocks, threads>>> (d_PixelsOut, d_PixelsOutPitch, width_out, height_out); cudaUnbindTexture(imageTexture); } void downSample(const float *d_PixelsIn, int d_PixelsInPitch, float *d_PixelsOut, int d_PixelsOutPitch, int width, int height) { int width_out = width / 2; int height_out = height / 2; dim3 threads(16, 8); dim3 blocks(iDivUp(width_out, threads.x), iDivUp(height_out, threads.y)); imageTexture.normalized = 0; imageTexture.filterMode = cudaFilterModePoint; imageTexture.addressMode[0] = cudaAddressModeClamp; imageTexture.addressMode[1] = cudaAddressModeClamp; cudaChannelFormatDesc channelFloat = cudaCreateChannelDesc<float>(); cudaBindTexture2D(0, &imageTexture, d_PixelsIn, &channelFloat, width, height, d_PixelsInPitch); lpfSubsampleTexture << <blocks, threads>>> (d_PixelsOut, d_PixelsOutPitch, width_out, height_out); cudaUnbindTexture(imageTexture); } void gaborFilterItl(const float *d_Image, int d_ImagePitch, float2 *d_GabItl, int d_GabItlPitch, char *d_TEMP, int d_TEMPPitch, int width, int height, bool fourOrientations) { // printf("%p %d %p %d %p %d %d %d %d\n",d_Image, d_ImagePitch, d_GabItl, // d_GabItlPitch, d_TEMP, d_TEMPPitch, width, height, fourOrientations); // setup execution parameters dim3 threads(16, 8); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); imageTexture.normalized = 0; imageTexture.filterMode = cudaFilterModePoint; imageTexture.addressMode[0] = cudaAddressModeClamp; imageTexture.addressMode[1] = cudaAddressModeClamp; floatTexture.normalized = 0; floatTexture.filterMode = cudaFilterModePoint; floatTexture.addressMode[0] = cudaAddressModeClamp; floatTexture.addressMode[1] = cudaAddressModeClamp; float2Texture.normalized = 0; float2Texture.filterMode = cudaFilterModePoint; float2Texture.addressMode[0] = cudaAddressModeClamp; float2Texture.addressMode[1] = cudaAddressModeClamp; cudaChannelFormatDesc channelFloat = cudaCreateChannelDesc<float>(); cudaChannelFormatDesc channelFloat2 = cudaCreateChannelDesc<float2>(); cudaBindTexture2D(0, &imageTexture, d_Image, &channelFloat, width, height, d_ImagePitch); if (fourOrientations) { convolutionColumnTexture4 << <blocks, threads>>> ((float *)d_TEMP, width, height, d_TEMPPitch); cudaUnbindTexture(imageTexture); cudaBindTexture2D(0, &floatTexture, d_TEMP, &channelFloat, width, height, d_TEMPPitch); cudaBindTexture2D(0, &float2Texture, d_TEMP, &channelFloat2, width, 3 * height, d_TEMPPitch); // the use of this big texture with the responses tiled on top of each other // is fine here since we do column filtering first, in the row filter we // only leave the image on the left and right side convolutionRowTexture4 << <blocks, threads>>> (d_GabItl, width, height, d_GabItlPitch); } else { convolutionColumnTexture << <blocks, threads>>> ((float *)d_TEMP, width, height, d_TEMPPitch); cudaUnbindTexture(imageTexture); cudaBindTexture2D(0, &floatTexture, d_TEMP, &channelFloat, width, height, d_TEMPPitch); cudaBindTexture2D(0, &float2Texture, d_TEMP, &channelFloat2, width, 5 * height, d_TEMPPitch); // the use of this big texture with the responses tiled on top of each other // is fine here since we do column filtering first, in the row filter we // only leave the image on the left and right side convolutionRowTexture << <blocks, threads>>> (d_GabItl, width, height, d_GabItlPitch); } cudaUnbindTexture(floatTexture); cudaUnbindTexture(float2Texture); } void downSampleNaN(const float *d_PixelsIn, int d_PixelsInPitch, float *d_PixelsOut, int d_PixelsOutPitch, int width, int height) { int width_out = width / 2; int height_out = height / 2; dim3 threads(16, 8); dim3 blocks(iDivUp(width_out, threads.x), iDivUp(height_out, threads.y)); imageTexture.normalized = 0; imageTexture.filterMode = cudaFilterModePoint; imageTexture.addressMode[0] = cudaAddressModeClamp; imageTexture.addressMode[1] = cudaAddressModeClamp; cudaChannelFormatDesc channelFloat = cudaCreateChannelDesc<float>(); cudaBindTexture2D(0, &imageTexture, d_PixelsIn, &channelFloat, width, height, d_PixelsInPitch); lpfSubsampleTextureNaN << <blocks, threads>>> (d_PixelsOut, d_PixelsOutPitch, width_out, height_out); cudaUnbindTexture(imageTexture); } } // end namespace vision
the_stack
#define CUDA_KERNEL #include "fluid_system_cuda.cuh" #include "cutil_math.h" // cutil32.lib #include <string.h> #include <assert.h> #include <curand.h> #include <curand_kernel.h> __constant__ FParams fparam; // CPU Fluid params __constant__ FBufs fbuf; // GPU Particle buffers (unsorted) __constant__ FBufs ftemp; // GPU Particle buffers (sorted) __constant__ uint gridActive; #define SCAN_BLOCKSIZE 512 extern "C" __global__ void insertParticles ( int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; //-- debugging (pointers should match CUdeviceptrs on host side) // printf ( " pos: %012llx, gcell: %012llx, gndx: %012llx, gridcnt: %012llx\n", fbuf.bufC(FPOS), fbuf.bufC(FGCELL), fbuf.bufC(FGNDX), fbuf.bufC(FGRIDCNT) ); register float3 gridMin = fparam.gridMin; register float3 gridDelta = fparam.gridDelta; register int3 gridRes = fparam.gridRes; register int3 gridScan = fparam.gridScanMax; register int gs; register float3 gcf; register int3 gc; gcf = (fbuf.bufF3(FPOS)[i] - gridMin) * gridDelta; gc = make_int3( int(gcf.x), int(gcf.y), int(gcf.z) ); gs = (gc.y * gridRes.z + gc.z)*gridRes.x + gc.x; if ( gc.x >= 1 && gc.x <= gridScan.x && gc.y >= 1 && gc.y <= gridScan.y && gc.z >= 1 && gc.z <= gridScan.z ) { fbuf.bufI(FGCELL)[i] = gs; // Grid cell insert. fbuf.bufI(FGNDX)[i] = atomicAdd ( &fbuf.bufI(FGRIDCNT)[ gs ], 1 ); // Grid counts. //gcf = (-make_float3(poff,poff,poff) + fbuf.bufF3(FPOS)[i] - gridMin) * gridDelta; //gc = make_int3( int(gcf.x), int(gcf.y), int(gcf.z) ); //gs = ( gc.y * gridRes.z + gc.z)*gridRes.x + gc.x; } else { fbuf.bufI(FGCELL)[i] = GRID_UNDEF; } } // Counting Sort - Full (deep copy) extern "C" __global__ void countingSortFull ( int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; // Copy particle from original, unsorted buffer (msortbuf), // into sorted memory location on device (mpos/mvel) uint icell = ftemp.bufI(FGCELL) [ i ]; if ( icell != GRID_UNDEF ) { // Determine the sort_ndx, location of the particle after sort uint indx = ftemp.bufI(FGNDX) [ i ]; int sort_ndx = fbuf.bufI(FGRIDOFF) [ icell ] + indx ; // global_ndx = grid_cell_offet + particle_offset //printf ( "%d: cell: %d, off: %d, ndx: %d\n", i, icell, fbuf.bufI(FGRIDOFF)[icell], indx ); // Transfer data to sort location fbuf.bufI (FGRID) [ sort_ndx ] = sort_ndx; // full sort, grid indexing becomes identity fbuf.bufF3(FPOS) [sort_ndx] = ftemp.bufF3(FPOS) [i]; fbuf.bufF3(FVEL) [sort_ndx] = ftemp.bufF3(FVEL) [i]; fbuf.bufF3(FVEVAL)[sort_ndx] = ftemp.bufF3(FVEVAL) [i]; fbuf.bufF3(FFORCE)[sort_ndx] = ftemp.bufF3(FFORCE) [i]; fbuf.bufF (FPRESS)[sort_ndx] = ftemp.bufF(FPRESS) [i]; fbuf.bufF (FDENSITY)[sort_ndx] = ftemp.bufF(FDENSITY) [i]; fbuf.bufI (FCLR) [sort_ndx] = ftemp.bufI(FCLR) [i]; fbuf.bufI (FGCELL) [sort_ndx] = icell; fbuf.bufI (FGNDX) [sort_ndx] = indx; } } extern "C" __device__ float contributePressure ( int i, float3 p, int cell ) { if ( fbuf.bufI(FGRIDCNT)[cell] == 0 ) return 0.0; float3 dist; float dsq, c, sum = 0.0; register float d2 = fparam.psimscale * fparam.psimscale; register float r2 = fparam.r2 / d2; int clast = fbuf.bufI(FGRIDOFF)[cell] + fbuf.bufI(FGRIDCNT)[cell]; for ( int cndx = fbuf.bufI(FGRIDOFF)[cell]; cndx < clast; cndx++ ) { int pndx = fbuf.bufI(FGRID) [cndx]; dist = p - fbuf.bufF3(FPOS) [pndx]; dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); if ( dsq < r2 && dsq > 0.0) { c = (r2 - dsq)*d2; sum += c * c * c; } } return sum; } extern "C" __global__ void computePressure ( int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; // Get search cell int nadj = (1*fparam.gridRes.z + 1)*fparam.gridRes.x + 1; uint gc = fbuf.bufI(FGCELL) [i]; if ( gc == GRID_UNDEF ) return; // particle out-of-range gc -= nadj; // Sum Pressures float3 pos = fbuf.bufF3(FPOS) [i]; float sum = 0.0; for (int c=0; c < fparam.gridAdjCnt; c++) { sum += contributePressure ( i, pos, gc + fparam.gridAdj[c] ); } __syncthreads(); // Compute Density & Pressure sum = sum * fparam.pmass * fparam.poly6kern; if ( sum == 0.0 ) sum = 1.0; fbuf.bufF(FPRESS) [ i ] = ( sum - fparam.prest_dens ) * fparam.pintstiff; fbuf.bufF(FDENSITY)[ i ] = 1.0f / sum; } extern "C" __device__ float3 contributeForce ( int i, float3 ipos, float3 iveleval, float ipress, float idens, int cell) { if ( fbuf.bufI(FGRIDCNT)[cell] == 0 ) return make_float3(0,0,0); float dsq, c, pterm; float3 dist, force = make_float3(0,0,0); int j; int clast = fbuf.bufI(FGRIDOFF)[cell] + fbuf.bufI(FGRIDCNT)[cell]; for ( int cndx = fbuf.bufI(FGRIDOFF)[cell]; cndx < clast; cndx++ ) { j = fbuf.bufI(FGRID)[ cndx ]; dist = ( ipos - fbuf.bufF3(FPOS)[ j ] ); // dist in cm dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); if ( dsq < fparam.rd2 && dsq > 0) { dsq = sqrt(dsq * fparam.d2); c = ( fparam.psmoothradius - dsq ); pterm = fparam.psimscale * -0.5f * c * fparam.spikykern * ( ipress + fbuf.bufF(FPRESS)[ j ] ) / dsq; force += ( pterm * dist + fparam.vterm * ( fbuf.bufF3(FVEVAL)[ j ] - iveleval )) * c * idens * (fbuf.bufF(FDENSITY)[ j ] ); } } return force; } extern "C" __global__ void computeForce ( int pnum) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; // Get search cell uint gc = fbuf.bufI(FGCELL)[ i ]; if ( gc == GRID_UNDEF ) return; // particle out-of-range gc -= (1*fparam.gridRes.z + 1)*fparam.gridRes.x + 1; // Sum Pressures register float3 force; force = make_float3(0,0,0); for (int c=0; c < fparam.gridAdjCnt; c++) { force += contributeForce ( i, fbuf.bufF3(FPOS)[ i ], fbuf.bufF3(FVEVAL)[ i ], fbuf.bufF(FPRESS)[ i ], fbuf.bufF(FDENSITY)[ i ], gc + fparam.gridAdj[c] ); } fbuf.bufF3(FFORCE)[ i ] = force; } extern "C" __global__ void randomInit ( int seed, int numPnts ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= numPnts ) return; // Initialize particle random generator curandState_t* st = (curandState_t*) (fbuf.bufC(FSTATE) + i*sizeof(curandState_t)); curand_init ( seed + i, 0, 0, st ); } #define CURANDMAX 2147483647 extern "C" __global__ void emitParticles ( float frame, int emit, int numPnts ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= emit ) return; curandState_t* st = (curandState_t*) (fbuf.bufC(FSTATE) + i*sizeof(curandState_t)); uint v = curand( st); uint j = v & (numPnts-1); float3 bmin = make_float3(-170,10,-20); float3 bmax = make_float3(-190,60, 20); float3 pos = make_float3(0,0,0); pos.x = float( v & 0xFF ) / 256.0; pos.y = float((v>>8) & 0xFF ) / 256.0; pos.z = float((v>>16) & 0xFF ) / 256.0; pos = bmin + pos*(bmax-bmin); fbuf.bufF3(FPOS)[j] = pos; fbuf.bufF3(FVEVAL)[j] = make_float3(0,0,0); fbuf.bufF3(FVEL)[j] = make_float3(5,-2,0); fbuf.bufF3(FFORCE)[j] = make_float3(0,0,0); } __device__ uint getGridCell ( float3 pos, uint3& gc ) { gc.x = (int)( (pos.x - fparam.gridMin.x) * fparam.gridDelta.x); // Cell in which particle is located gc.y = (int)( (pos.y - fparam.gridMin.y) * fparam.gridDelta.y); gc.z = (int)( (pos.z - fparam.gridMin.z) * fparam.gridDelta.z); return (int) ( (gc.y*fparam.gridRes.z + gc.z)*fparam.gridRes.x + gc.x); } extern "C" __global__ void sampleParticles ( float* brick, uint3 res, float3 bmin, float3 bmax, int numPnts, float scalar ) { float3 dist; float dsq; int j, cell; register float r2 = fparam.r2; register float h2 = 2.0*r2 / 8.0; // 8.0=smoothing. higher values are sharper uint3 i = blockIdx * make_uint3(blockDim.x, blockDim.y, blockDim.z) + threadIdx; if ( i.x >= res.x || i.y >= res.y || i.z >= res.z ) return; float3 p = bmin + make_float3(float(i.x)/res.x, float(i.y)/res.y, float(i.z)/res.z) * (bmax-bmin); //float3 v = make_float3(0,0,0); float v = 0.0; // Get search cell int nadj = (1*fparam.gridRes.z + 1)*fparam.gridRes.x + 1; uint3 gc; uint gs = getGridCell ( p, gc ); if ( gc.x < 1 || gc.x > fparam.gridRes.x-fparam.gridSrch || gc.y < 1 || gc.y > fparam.gridRes.y-fparam.gridSrch || gc.z < 1 || gc.z > fparam.gridRes.z-fparam.gridSrch ) { brick[ (i.y*int(res.z) + i.z)*int(res.x) + i.x ] = 0.0; return; } gs -= nadj; for (int c=0; c < fparam.gridAdjCnt; c++) { cell = gs + fparam.gridAdj[c]; if ( fbuf.bufI(FGRIDCNT)[cell] != 0 ) { for ( int cndx = fbuf.bufI(FGRIDOFF)[cell]; cndx < fbuf.bufI(FGRIDOFF)[cell] + fbuf.bufI(FGRIDCNT)[cell]; cndx++ ) { j = fbuf.bufI(FGRID)[cndx]; dist = p - fbuf.bufF3(FPOS)[ j ]; dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); if ( dsq < fparam.rd2 && dsq > 0 ) { dsq = sqrt(dsq * fparam.d2); //v += fbuf.mvel[j] * (fparam.gausskern * exp ( -(dsq*dsq)/h2 ) / fbuf.mdensity[ j ]); v += fparam.gausskern * exp ( -(dsq*dsq)/h2 ); } } } } __syncthreads(); brick[ (i.z*int(res.y) + i.y)*int(res.x) + i.x ] = v * scalar; //brick[ (i.z*int(res.y) + i.y)*int(res.x) + i.x ] = length(v) * scalar; } extern "C" __global__ void computeQuery ( int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; // Get search cell int nadj = (1*fparam.gridRes.z + 1)*fparam.gridRes.x + 1; uint gc = fbuf.bufI(FGCELL) [i]; if ( gc == GRID_UNDEF ) return; // particle out-of-range gc -= nadj; // Sum Pressures float sum = 0.0; for (int c=0; c < fparam.gridAdjCnt; c++) { sum += 1.0; } __syncthreads(); } extern "C" __global__ void advanceParticles ( float time, float dt, float ss, int numPnts ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= numPnts ) return; if ( fbuf.bufI(FGCELL)[i] == GRID_UNDEF ) { fbuf.bufF3(FPOS)[i] = make_float3(-1000,-1000,-1000); fbuf.bufF3(FVEL)[i] = make_float3(0,0,0); return; } // Get particle vars register float3 accel, norm; register float diff, adj, speed; register float3 pos = fbuf.bufF3(FPOS)[i]; register float3 veval = fbuf.bufF3(FVEVAL)[i]; // Leapfrog integration accel = fbuf.bufF3(FFORCE)[i]; accel *= fparam.pmass; // Boundaries // Y-axis diff = fparam.pradius - (pos.y - (fparam.pboundmin.y + (pos.x-fparam.pboundmin.x)*fparam.pground_slope )) * ss; if ( diff > EPSILON ) { norm = make_float3( -fparam.pground_slope, 1.0 - fparam.pground_slope, 0); adj = fparam.pextstiff * diff - fparam.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } diff = fparam.pradius - ( fparam.pboundmax.y - pos.y )*ss; if ( diff > EPSILON ) { norm = make_float3(0, -1, 0); adj = fparam.pextstiff * diff - fparam.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } // X-axis diff = fparam.pradius - (pos.x - (fparam.pboundmin.x + (sin(time*fparam.pforce_freq)+1)*0.5 * fparam.pforce_min))*ss; if ( diff > EPSILON ) { norm = make_float3( 1, 0, 0); adj = (fparam.pforce_min+1) * fparam.pextstiff * diff - fparam.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } diff = fparam.pradius - ( (fparam.pboundmax.x - (sin(time*fparam.pforce_freq)+1)*0.5*fparam.pforce_max) - pos.x)*ss; if ( diff > EPSILON ) { norm = make_float3(-1, 0, 0); adj = (fparam.pforce_max+1) * fparam.pextstiff * diff - fparam.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } // Z-axis diff = fparam.pradius - (pos.z - fparam.pboundmin.z ) * ss; if ( diff > EPSILON ) { norm = make_float3( 0, 0, 1 ); adj = fparam.pextstiff * diff - fparam.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } diff = fparam.pradius - ( fparam.pboundmax.z - pos.z )*ss; if ( diff > EPSILON ) { norm = make_float3( 0, 0, -1 ); adj = fparam.pextstiff * diff - fparam.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } // Gravity accel += fparam.pgravity; // Accel Limit speed = accel.x*accel.x + accel.y*accel.y + accel.z*accel.z; if ( speed > fparam.AL2 ) { accel *= fparam.AL / sqrt(speed); } // Velocity Limit float3 vel = fbuf.bufF3(FVEL)[i]; speed = vel.x*vel.x + vel.y*vel.y + vel.z*vel.z; if ( speed > fparam.VL2 ) { speed = fparam.VL2; vel *= fparam.VL / sqrt(speed); } // Ocean colors /*uint clr = fbuf.bufI(FCLR)[i]; if ( speed > fparam.VL2*0.2) { adj = fparam.VL2*0.2; clr += (( clr & 0xFF) < 0xFD ) ? +0x00000002 : 0; // decrement R by one clr += (( (clr>>8) & 0xFF) < 0xFD ) ? +0x00000200 : 0; // decrement G by one clr += (( (clr>>16) & 0xFF) < 0xFD ) ? +0x00020000 : 0; // decrement G by one fbuf.bufI(FCLR)[i] = clr; } if ( speed < 0.03 ) { int v = int(speed/.01)+1; clr += (( clr & 0xFF) > 0x80 ) ? -0x00000001 * v : 0; // decrement R by one clr += (( (clr>>8) & 0xFF) > 0x80 ) ? -0x00000100 * v : 0; // decrement G by one fbuf.bufI(FCLR)[i] = clr; }*/ //-- surface particle density //fbuf.mclr[i] = fbuf.mclr[i] & 0x00FFFFFF; //if ( fbuf.mdensity[i] > 0.0014 ) fbuf.mclr[i] += 0xAA000000; // Leap-frog Integration float3 vnext = accel*dt + vel; // v(t+1/2) = v(t-1/2) + a(t) dt fbuf.bufF3(FVEVAL)[i] = (vel + vnext) * 0.5; // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5 fbuf.bufF3(FVEL)[i] = vnext; fbuf.bufF3(FPOS)[i] += vnext * (dt/ss); // p(t+1) = p(t) + v(t+1/2) dt } extern "C" __global__ void prefixFixup(uint *input, uint *aux, int len) { unsigned int t = threadIdx.x; unsigned int start = t + 2 * blockIdx.x * SCAN_BLOCKSIZE; if (start < len) input[start] += aux[blockIdx.x]; if (start + SCAN_BLOCKSIZE < len) input[start + SCAN_BLOCKSIZE] += aux[blockIdx.x]; } extern "C" __global__ void prefixSum(uint* input, uint* output, uint* aux, int len, int zeroff) { __shared__ uint scan_array[SCAN_BLOCKSIZE << 1]; unsigned int t1 = threadIdx.x + 2 * blockIdx.x * SCAN_BLOCKSIZE; unsigned int t2 = t1 + SCAN_BLOCKSIZE; // Pre-load into shared memory scan_array[threadIdx.x] = (t1<len) ? input[t1] : 0.0f; scan_array[threadIdx.x + SCAN_BLOCKSIZE] = (t2<len) ? input[t2] : 0.0f; __syncthreads(); // Reduction int stride; for (stride = 1; stride <= SCAN_BLOCKSIZE; stride <<= 1) { int index = (threadIdx.x + 1) * stride * 2 - 1; if (index < 2 * SCAN_BLOCKSIZE) scan_array[index] += scan_array[index - stride]; __syncthreads(); } // Post reduction for (stride = SCAN_BLOCKSIZE >> 1; stride > 0; stride >>= 1) { int index = (threadIdx.x + 1) * stride * 2 - 1; if (index + stride < 2 * SCAN_BLOCKSIZE) scan_array[index + stride] += scan_array[index]; __syncthreads(); } __syncthreads(); // Output values & aux if (t1 + zeroff < len) output[t1 + zeroff] = scan_array[threadIdx.x]; if (t2 + zeroff < len) output[t2 + zeroff] = (threadIdx.x == SCAN_BLOCKSIZE - 1 && zeroff) ? 0 : scan_array[threadIdx.x + SCAN_BLOCKSIZE]; if (threadIdx.x == 0) { if (zeroff) output[0] = 0; if (aux) aux[blockIdx.x] = scan_array[2 * SCAN_BLOCKSIZE - 1]; } }
the_stack
#include "common.cuh" #include <kat/on_device/collaboration/grid.cuh> #include <kat/on_device/collaboration/block.cuh> #include <kat/on_device/collaboration/warp.cuh> #include <kat/on_device/atomics.cuh> using std::size_t; #if __cplusplus < 201701L #include <experimental/optional> template <typename T> using optional = std::experimental::optional<T>; #else template <typename T> #include <optional> using optional = std::optional<T>; #endif template <typename T> const auto make_exact_comparison { optional<T>{} }; namespace klcg = kat::linear_grid::collaborative::grid; namespace klcb = kat::linear_grid::collaborative::block; // namespace kcg = kat::collaborative::grid; namespace kcb = kat::collaborative::block; namespace kcw = kat::collaborative::warp; namespace kernels { template <typename F, typename T, typename... Is> __global__ void execute_testcase( F testcase_device_function, size_t num_values_to_populate, T* __restrict__ values_to_populate, const Is* __restrict__ ... inputs ) { testcase_device_function(num_values_to_populate, values_to_populate, inputs...); } } // namespace kernels template <typename T> std::size_t set_width_for_up_to(T max) { // assert(std::is_integral<I>::value, "Only integer types supported for now"); std::stringstream ss; ss << std::dec << max; return ss.str().length(); } namespace detail { template <typename T> auto tolerance_gadget(std::true_type, T x, optional<T> tolerance) { auto eps = tolerance.value_or(0); return doctest::Approx(x).epsilon(eps); } template <typename T> T tolerance_gadget(std::false_type, T x, optional<T>) { return x; } } // namespace detail template <typename T> auto tolerance_gadget(T x, optional<T> tolerance) { constexpr const auto is_arithmetic = std::is_arithmetic< std::decay_t<T> >::value; return detail::tolerance_gadget(std::integral_constant<bool, is_arithmetic>{}, x, tolerance); } // TODO: Take iterator templates rather than pointers template <typename T, typename F, typename... Is> void check_results( size_t num_values_to_check, // perhaps add another parameter for specific individual-check details? const T* __restrict__ actual_values, F expected_value_retriever, optional<T> comparison_tolerance_fraction, const Is* __restrict__... inputs) { std::stringstream ss; auto index_width = set_width_for_up_to(num_values_to_check); // TODO: Consider using the maximum/minimum result values to set field widths. for(size_t i = 0; i < num_values_to_check; i++) { ss.str(""); ss << "Assertion " << std::setw(index_width) << (i+1) << " for testcase " << doctest::current_test_name() // << " :\n" << "(" << std::make_tuple(inputs[i]...) << ")" ; auto mismatch_message { ss.str() }; if (comparison_tolerance_fraction) { CHECK_MESSAGE(actual_values[i] == tolerance_gadget(expected_value_retriever(i), comparison_tolerance_fraction), mismatch_message); } else { CHECK_MESSAGE(actual_values[i] == expected_value_retriever(i), mismatch_message); } } } template <typename T> struct tag {}; /** * @brief Executes a testcase intended to make certain checks using a GPU kernel * which produces the values to check for. * * @note The actual checks are eventually conducted on the host side, since doctest * code can't actually do anything useful on the GPU. So on the GPU side we "merely" * compute the values to check and let the test logic peform the actual comparison later * on. */ template <typename F, typename K, typename T, typename... Is, size_t... Indices> auto execute_testcase_on_gpu( tag<T>, std::index_sequence<Indices...>, K testcase_kernel, F testcase_device_function, cuda::launch_configuration_t launch_config, size_t num_values_to_populate, Is* __restrict__ ... inputs) { cuda::device_t device { cuda::device::current::get() }; auto device_side_results { cuda::memory::device::make_unique<T[]>(device, num_values_to_populate) }; cuda::memory::device::zero(device_side_results.get(), num_values_to_populate * sizeof(T)); // just to be on the safe side auto host_side_results { std::vector<T>(num_values_to_populate) }; auto make_device_side_input = [&device, num_values_to_populate](auto input, size_t n) { using input_type = std::remove_reference_t<decltype(*input)>; auto device_side_input = cuda::memory::device::make_unique<input_type[]>(device, n); cuda::memory::copy(device_side_input.get(), input, num_values_to_populate * sizeof(input_type)); return std::move(device_side_input); }; auto device_side_inputs = std::make_tuple( make_device_side_input(inputs, num_values_to_populate)... ); ignore(device_side_inputs); // for the case of no inputs cuda::launch( testcase_kernel, launch_config, testcase_device_function, num_values_to_populate, device_side_results.get(), std::get<Indices>(device_side_inputs).get()... ); cuda::memory::copy(host_side_results.data(), device_side_results.get(), sizeof(T) * num_values_to_populate); return host_side_results; } template <typename F, typename ExpectedResultRetriever, typename T, typename... Is> void execute_non_uniform_builtin_testcase_on_gpu_and_check( F testcase_device_function, ExpectedResultRetriever expected_value_retriever, size_t num_values_to_populate, cuda::grid::dimensions_t grid_dimensions, cuda::grid::block_dimensions_t block_dimensions, optional<T> comparison_tolerance_fraction, Is* __restrict__ ... inputs) { auto launch_config { cuda::make_launch_config(grid_dimensions, block_dimensions) }; // TODO: Should we check that num_values_to_populate is equal to the number of grid threads? auto host_side_results = execute_testcase_on_gpu( tag<T>{}, typename std::make_index_sequence<sizeof...(Is)> {}, kernels::execute_testcase<F, T, Is...>, testcase_device_function, launch_config, num_values_to_populate, inputs... ); check_results ( num_values_to_populate, // perhaps add another parameter for specific testcase details? host_side_results.data(), expected_value_retriever, comparison_tolerance_fraction, inputs...); } TEST_SUITE("block-level - linear grid") { TEST_CASE("at_block_stride") { using checked_value_type = uint32_t; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 2 }; // auto total_num_threads = num_grid_blocks * num_threads_per_block; size_t length_to_cover_per_block { 271 }; // We don't actually create input data, we just need each element in the // range 0 ... length_to_cover-1 to be attended to. // // In this test case - it's 0 ... length_to_cover-1 attended to by _each_ of the blocks. // In a real life kernel it might be, say num_grid_blocks * length_to_cover elements to // process. auto num_values_to_populate = length_to_cover_per_block * num_grid_blocks; auto testcase_device_function = [length_to_cover_per_block] KAT_DEV ( size_t num_grid_threads, checked_value_type* pos_attendent_thread_indices ) { namespace gi = kat::linear_grid::grid_info; auto offset_into_attendant_array = length_to_cover_per_block * gi::block::id(); auto f_inner = [&] (size_t pos) { pos_attendent_thread_indices[offset_into_attendant_array + pos] = gi::thread::index_in_grid(); }; klcb::at_block_stride(length_to_cover_per_block, f_inner); }; auto expected_value_retriever = [=] (size_t pos) { // Which thread processes position pos? auto intra_block_pos = pos % length_to_cover_per_block; auto processing_block_id = pos / length_to_cover_per_block; auto processing_thread_index = intra_block_pos % num_threads_per_block; return checked_value_type(processing_thread_index + processing_block_id * num_threads_per_block); }; execute_non_uniform_builtin_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<checked_value_type> ); } TEST_CASE("share_per_warp_data - specific writer lane") { using datum_type = uint32_t; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 7 }; auto num_warps_per_block = num_threads_per_block / kat::warp_size; auto num_values_to_populate = num_warps_per_block * num_grid_blocks; auto make_warp_datum = [] KAT_HD ( cuda::grid::dimension_t block_id, cuda::grid::block_dimension_t warp_id_within_block) { return datum_type{(warp_id_within_block + 1) + (block_id + 1) * 10000}; }; auto testcase_device_function = [=] KAT_DEV ( size_t, datum_type* warp_data_for_all_blocks ) { namespace gi = kat::linear_grid::grid_info; datum_type thread_datum { make_warp_datum(gi::block::id(), gi::warp::id()) }; // same for all threads in warp! constexpr auto max_possible_num_warps_per_block = 32; // Note: Important assumption here... __shared__ datum_type warp_data [max_possible_num_warps_per_block]; constexpr const auto writing_lane_index = 3u; // just for kicks klcb::share_per_warp_data(thread_datum, warp_data, writing_lane_index); // We've run the synchronized variant, so no need for extra sync if (gi::thread::is_first_in_block()) { // Now we're populating what's going to be checked outside the kernel. auto warp_data_for_this_block = warp_data_for_all_blocks + gi::block::id() * num_warps_per_block; for(int i = 0; i < num_warps_per_block; i++) { warp_data_for_this_block[i] = warp_data[i]; } } }; auto expected_value_retriever = [=] (size_t i) { auto relevant_block_id = i / num_warps_per_block; auto warp_id_within_block = i % num_warps_per_block; return make_warp_datum(relevant_block_id, warp_id_within_block); }; execute_non_uniform_builtin_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<datum_type> ); } TEST_CASE("share_per_warp_data - inspecific writer lane") { using datum_type = uint32_t; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 7 }; auto num_warps_per_block = num_threads_per_block / kat::warp_size; auto num_values_to_populate = num_warps_per_block * num_grid_blocks; auto make_warp_datum = [] KAT_HD ( cuda::grid::dimension_t block_id, cuda::grid::block_dimension_t warp_id_within_block) { return datum_type{(warp_id_within_block + 1) + (block_id + 1) * 10000}; }; auto testcase_device_function = [=] KAT_DEV ( size_t, datum_type* warp_data_for_all_blocks ) { namespace gi = kat::linear_grid::grid_info; datum_type thread_datum { make_warp_datum(gi::block::id(), gi::warp::id()) }; // same for all threads in warp! constexpr auto max_possible_num_warps_per_block = 32; // Note: Important assumption here... __shared__ datum_type warp_data [max_possible_num_warps_per_block]; klcb::share_per_warp_data(thread_datum, warp_data); // We've run the synchronized variant, so no need for extra sync if (gi::thread::is_first_in_block()) { // Now we're populating what's going to be checked outside the kernel. auto warp_data_for_this_block = warp_data_for_all_blocks + gi::block::id() * num_warps_per_block; for(int i = 0; i < num_warps_per_block; i++) { warp_data_for_this_block[i] = warp_data[i]; } } }; auto expected_value_retriever = [=] (size_t i) { auto relevant_block_id = i / num_warps_per_block; auto warp_id_within_block = i % num_warps_per_block; return make_warp_datum(relevant_block_id, warp_id_within_block); }; execute_non_uniform_builtin_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<datum_type> ); } TEST_CASE("get_from_thread") { using datum_type = uint32_t; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 7 }; auto num_total_threads = num_threads_per_block * num_grid_blocks; auto num_values_to_populate = num_total_threads; auto make_thread_datum = [] KAT_HD ( cuda::grid::dimension_t block_id, cuda::grid::block_dimension_t thread_index) { return datum_type{(thread_index + 1) + (block_id + 1) * 10000}; }; auto make_source_thread_index = [=] KAT_HD (cuda::grid::dimension_t block_id) { return unsigned{(block_id + 1) * 11 % num_threads_per_block}; }; auto testcase_device_function = [=] KAT_DEV ( size_t, datum_type* thread_obtained_values ) { namespace gi = kat::linear_grid::grid_info; datum_type thread_datum { make_thread_datum(gi::block::id(), gi::thread::id()) }; auto source_thread_index { make_source_thread_index(gi::block::id()) }; auto obtained_value { klcb::get_from_thread(thread_datum, source_thread_index) }; // We've run the synchronized variant, so no need for extra sync thread_obtained_values[gi::thread::global_id()] = obtained_value; }; auto expected_value_retriever = [=] (size_t global_thread_index) { auto block_id { global_thread_index / num_threads_per_block }; auto source_thread_index { make_source_thread_index(block_id) }; return make_thread_datum(block_id, source_thread_index); }; execute_non_uniform_builtin_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<datum_type> ); } TEST_CASE("get_from_first_thread") { using datum_type = uint32_t; cuda::grid::dimension_t num_grid_blocks { 2 }; cuda::grid::block_dimension_t num_threads_per_block { kat::warp_size * 7 }; auto num_total_threads = num_threads_per_block * num_grid_blocks; auto num_values_to_populate = num_total_threads; auto make_thread_datum = [] KAT_HD ( cuda::grid::dimension_t block_id, cuda::grid::block_dimension_t thread_index) { return datum_type{(thread_index + 1) + (block_id + 1) * 10000}; }; auto testcase_device_function = [=] KAT_DEV ( size_t, datum_type* thread_obtained_values ) { namespace gi = kat::linear_grid::grid_info; datum_type thread_datum { make_thread_datum(gi::block::id(), gi::thread::id()) }; auto obtained_value { klcb::get_from_first_thread(thread_datum) }; // We've run the synchronized variant, so no need for extra sync thread_obtained_values[gi::thread::global_id()] = obtained_value; }; auto expected_value_retriever = [=] (size_t global_thread_index) { auto block_id { global_thread_index / num_threads_per_block }; auto source_thread_index { 0 }; return make_thread_datum(block_id, source_thread_index); }; execute_non_uniform_builtin_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, num_grid_blocks, num_threads_per_block, make_exact_comparison<datum_type> ); } TEST_CASE("barrier") { // Note: Not much to test with the barrier() collaboration primitive, seeing how there could be "natural barrier'ing" even without the call } } // TEST_SUITE("block-level - linear grid") TEST_SUITE("block-level - general") { TEST_CASE("share_per_warp_data - specific writer lane") { using datum_type = uint32_t; cuda::grid::dimensions_t grid_dimensions { 2, 2 }; cuda::grid::block_dimensions_t block_dimensions { kat::warp_size, 3, 3 }; auto num_warps_per_block = block_dimensions.volume() / kat::warp_size; auto num_values_to_populate = num_warps_per_block * grid_dimensions.volume(); auto make_warp_datum = [] KAT_HD ( cuda::grid::dimension_t block_id, cuda::grid::block_dimension_t warp_id_within_block) { return datum_type{(warp_id_within_block + 1) + (block_id + 1) * 1000}; }; auto testcase_device_function = [=] KAT_DEV ( size_t, datum_type* warp_data_for_all_blocks ) { namespace gi = kat::grid_info; datum_type thread_datum { make_warp_datum(gi::block::id(), gi::warp::id()) }; constexpr auto max_possible_num_warps_per_block = 32; // Note: Important assumption here... __shared__ datum_type warp_data [max_possible_num_warps_per_block]; constexpr const auto writing_lane_index = 3u; kcb::share_per_warp_data(thread_datum, warp_data, writing_lane_index); // We've run the synchronized variant, so no need for extra sync if (gi::thread::is_first_in_block()) { // Now we're populating what's going to be checked outside the kernel. auto warp_data_for_this_block = warp_data_for_all_blocks + gi::block::id() * num_warps_per_block; for(int i = 0; i < num_warps_per_block; i++) { warp_data_for_this_block[i] = warp_data[i]; } } }; auto expected_value_retriever = [=] (size_t i) { auto relevant_block_id = i / num_warps_per_block; auto warp_id_within_block = i % num_warps_per_block; return make_warp_datum(relevant_block_id, warp_id_within_block); }; execute_non_uniform_builtin_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, grid_dimensions, block_dimensions, make_exact_comparison<datum_type> ); } TEST_CASE("share_per_warp_data - inspecific writer lane") { using datum_type = uint32_t; cuda::grid::dimensions_t grid_dimensions { 2, 2 }; cuda::grid::block_dimensions_t block_dimensions { kat::warp_size * 2, 5, 3 }; auto num_warps_per_block = block_dimensions.volume() / kat::warp_size; auto num_values_to_populate = num_warps_per_block * grid_dimensions.volume(); auto make_warp_datum = [] KAT_HD ( cuda::grid::dimension_t block_id, cuda::grid::block_dimension_t warp_id_within_block) { return datum_type{(warp_id_within_block + 1) + (block_id + 1) * 1000}; }; auto testcase_device_function = [=] KAT_DEV ( size_t, datum_type* warp_data_for_all_blocks ) { namespace gi = kat::grid_info; datum_type thread_datum { make_warp_datum(gi::block::id(), gi::warp::id()) }; constexpr auto max_possible_num_warps_per_block = 32; // Note: Important assumption here... __shared__ datum_type warp_data [max_possible_num_warps_per_block]; kcb::share_per_warp_data(thread_datum, warp_data); // We've run the synchronized variant, so no need for extra sync if (gi::thread::is_first_in_block()) { // Now we're populating what's going to be checked outside the kernel. auto warp_data_for_this_block = warp_data_for_all_blocks + gi::block::id() * num_warps_per_block; for(int i = 0; i < num_warps_per_block; i++) { warp_data_for_this_block[i] = warp_data[i]; } } }; auto expected_value_retriever = [=] (size_t i) { auto relevant_block_id = i / num_warps_per_block; auto warp_id_within_block = i % num_warps_per_block; return make_warp_datum(relevant_block_id, warp_id_within_block); }; execute_non_uniform_builtin_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, grid_dimensions, block_dimensions, make_exact_comparison<datum_type> ); } TEST_CASE("get_from_thread") { using datum_type = uint32_t; cuda::grid::dimensions_t grid_dimensions { 1 }; cuda::grid::block_dimensions_t block_dimensions { kat::warp_size, 2, 1 }; auto num_total_threads = block_dimensions.volume() * grid_dimensions.volume(); auto num_values_to_populate = num_total_threads; auto make_thread_datum = [] KAT_HD ( cuda::grid::dimension_t block_id, cuda::grid::block_dimension_t thread_id) { return datum_type{(thread_id + 1) + (block_id + 1) * 10000}; }; auto make_source_thread_index = [=] KAT_HD (cuda::grid::dimension_t block_id) { return kat::position_t { block_id, 1, 0 }; }; auto testcase_device_function = [=] KAT_DEV ( size_t, datum_type* thread_obtained_values ) { namespace gi = kat::grid_info; datum_type thread_datum { make_thread_datum(gi::block::id(), gi::thread::id()) }; auto source_thread_index { make_source_thread_index(gi::block::id()) }; auto obtained_value { kcb::get_from_thread(thread_datum, source_thread_index) }; // We've run the synchronized variant, so no need for extra sync // printf("Thread (%2u %2u %2u) = %4u in block %4u had datum %5d, used source thread index %2u,%2u,%2u and got value %4u\n", // threadIdx.x,threadIdx.y,threadIdx.z, // (unsigned) gi::thread::id(), (unsigned) gi::block::id(), thread_datum, source_thread_index.x, source_thread_index.y, source_thread_index.z, obtained_value ); thread_obtained_values[gi::thread::global_id()] = obtained_value; }; auto expected_value_retriever = [=] (size_t global_thread_index) { auto block_id { global_thread_index / block_dimensions.volume() }; auto source_thread_index { make_source_thread_index(block_id) }; auto source_thread_id = kat::detail::row_major_linearization(source_thread_index, uint3(block_dimensions)); return make_thread_datum(block_id, source_thread_id); }; execute_non_uniform_builtin_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, grid_dimensions, block_dimensions, make_exact_comparison<datum_type> ); } TEST_CASE("get_from_first_thread") { using datum_type = uint32_t; cuda::grid::dimensions_t grid_dimensions { 2, 2 }; cuda::grid::block_dimensions_t block_dimensions { kat::warp_size * 3, 3 }; auto num_total_threads = block_dimensions.volume() * grid_dimensions.volume(); auto num_values_to_populate = num_total_threads; auto make_thread_datum = [] KAT_HD ( cuda::grid::dimension_t block_id, cuda::grid::block_dimension_t thread_id) { return datum_type{(thread_id + 1) + (block_id + 1) * 10000}; }; auto testcase_device_function = [=] KAT_DEV ( size_t, datum_type* thread_obtained_values ) { namespace gi = kat::grid_info; datum_type thread_datum { make_thread_datum(gi::block::id(), gi::thread::id()) }; auto obtained_value { kcb::get_from_first_thread(thread_datum) }; // We've run the synchronized variant, so no need for extra sync thread_obtained_values[gi::thread::global_id()] = obtained_value; }; auto expected_value_retriever = [=] (size_t global_thread_index) { auto block_id { global_thread_index / block_dimensions.volume() }; auto source_thread_id = 0; // the first one... return make_thread_datum(block_id, source_thread_id); }; execute_non_uniform_builtin_testcase_on_gpu_and_check( testcase_device_function, expected_value_retriever, num_values_to_populate, grid_dimensions, block_dimensions, make_exact_comparison<datum_type> ); } TEST_CASE("barrier") { // Note: Not much to test with the barrier() collaboration primitive, seeing how there could be "natural barrier'ing" even without the call } } // TEST_SUITE("block-level - general")
the_stack
template <typename T, typename TN> __inline__ __device__ void welfordCombine( T& a_avg, T& a_M2, TN& a_N, const T& b_avg, const T& b_M2, TN b_N) { if (b_N == 0) { return; } TN ab_N = a_N + b_N; T b_N_div_ab_N = ((T)(nvfuser_index_t)(b_N)) / ((T)(nvfuser_index_t)(ab_N)); T delta = b_avg - a_avg; a_avg += delta * b_N_div_ab_N; a_M2 += b_M2 + delta * delta * ((T)(nvfuser_index_t)(a_N)) * b_N_div_ab_N; a_N = ab_N; } // [Z,Y,X]_THREADS is the number of participating threads in the z, y, x // dimension of the block. template < bool X_REDUCE, bool Y_REDUCE, bool Z_REDUCE, typename T, typename TN, typename _dim3ti, typename _dim3bd> __inline__ __device__ void blockWelford( T& out_avg, T& out_M2, TN& out_N, const T& in_avg, const T& in_M2, const TN& in_N, const _dim3ti& thread_idx, const _dim3bd& block_dim, T* shared_mem_avg, T* shared_mem_M2, TN* shared_mem_N, bool read_pred, bool write_pred, T init_val) { unsigned int reduction_size = (X_REDUCE ? block_dim.x : 1) * (Y_REDUCE ? block_dim.y : 1) * (Z_REDUCE ? block_dim.z : 1); // If this thread will output a final result bool should_write = true; if (X_REDUCE) should_write = should_write && thread_idx.x == 0; if (Y_REDUCE) should_write = should_write && thread_idx.y == 0; if (Z_REDUCE) should_write = should_write && thread_idx.z == 0; unsigned int reduction_stride; unsigned int reduction_tid; unsigned int linear_tid; if (X_REDUCE && !Y_REDUCE && Z_REDUCE) { // Transpose Z and Y in the shared memory so Z and X dims are contiguous in // smem reduction_stride = 1; linear_tid = threadIdx.y * blockDim.z * blockDim.x + threadIdx.z * blockDim.x + threadIdx.x; reduction_tid = threadIdx.z * blockDim.x + threadIdx.x; } else { // Normal reduction in order reduction_stride = (X_REDUCE ? 1 : (Y_REDUCE ? block_dim.x : (Z_REDUCE ? block_dim.x * block_dim.y : 0))); linear_tid = thread_idx.z * block_dim.y * block_dim.x + thread_idx.y * block_dim.x + thread_idx.x; reduction_tid = (Z_REDUCE ? thread_idx.z : 0) * (Y_REDUCE ? block_dim.y : 1) * (X_REDUCE ? block_dim.x : 1) + (Y_REDUCE ? thread_idx.y : 0) * (X_REDUCE ? block_dim.x : 1) + (X_REDUCE ? thread_idx.x : 0); } assert(reduction_stride != 0); if (read_pred) { shared_mem_avg[linear_tid] = in_avg; shared_mem_M2[linear_tid] = in_M2; shared_mem_N[linear_tid] = in_N; } else { shared_mem_avg[linear_tid] = init_val; shared_mem_M2[linear_tid] = init_val; shared_mem_N[linear_tid] = 0; } block_sync::sync(); // Reduce down to nearest power of 2: int np2 = 1 << (31 - __clz(reduction_size)); if (reduction_tid < np2) { if (reduction_tid + np2 < reduction_size) { welfordCombine( shared_mem_avg[linear_tid], shared_mem_M2[linear_tid], shared_mem_N[linear_tid], shared_mem_avg[linear_tid + np2 * reduction_stride], shared_mem_M2[linear_tid + np2 * reduction_stride], shared_mem_N[linear_tid + np2 * reduction_stride]); } } block_sync::sync(); // loop peel the final iteration to save one syncthread for the end for (int factor = np2 / 2; factor > 1; factor >>= 1) { if (reduction_tid < factor) { welfordCombine( shared_mem_avg[linear_tid], shared_mem_M2[linear_tid], shared_mem_N[linear_tid], shared_mem_avg[linear_tid + factor * reduction_stride], shared_mem_M2[linear_tid + factor * reduction_stride], shared_mem_N[linear_tid + factor * reduction_stride]); } block_sync::sync(); } if (should_write && write_pred) { T res_avg = out_avg; T res_M2 = out_M2; TN res_N = out_N; welfordCombine( res_avg, res_M2, res_N, shared_mem_avg[linear_tid], shared_mem_M2[linear_tid], shared_mem_N[linear_tid]); if (reduction_size > 1) { welfordCombine( res_avg, res_M2, res_N, shared_mem_avg[linear_tid + reduction_stride], shared_mem_M2[linear_tid + reduction_stride], shared_mem_N[linear_tid + reduction_stride]); } out_avg = res_avg; out_M2 = res_M2; out_N = res_N; } block_sync::sync(); } // Use the same pred for both reads and writes template < bool X_REDUCE, bool Y_REDUCE, bool Z_REDUCE, typename T, typename TN, typename _dim3ti, typename _dim3bd> __inline__ __device__ void blockWelford( T& out_avg, T& out_M2, TN& out_N, const T& in_avg, const T& in_M2, const TN& in_N, const _dim3ti& thread_idx, const _dim3bd& block_dim, T* shared_mem_avg, T* shared_mem_M2, TN* shared_mem_N, bool read_write_pred, T init_val) { blockWelford<X_REDUCE, Y_REDUCE, Z_REDUCE, T, TN, _dim3ti, _dim3bd>( out_avg, out_M2, out_N, in_avg, in_M2, in_N, thread_idx, block_dim, shared_mem_avg, shared_mem_M2, shared_mem_N, read_write_pred, read_write_pred, init_val); } // ----------------------------------------------------------------------------------------------- // Grid Welford Prototype // ----------------------------------------------------------------------------------------------- namespace welford { // Utility functions template <typename _dim3> __host__ __device__ __forceinline__ nvfuser_index_t size(const _dim3& d) { return (nvfuser_index_t)d.x * (nvfuser_index_t)d.y * (nvfuser_index_t)d.z; } #define isize(d) ((d).x * (d).y * (d).z) template <typename _dim3pos, typename _dim3dim> __host__ __device__ __forceinline__ nvfuser_index_t offset(const _dim3pos& pos, const _dim3dim& dim) { return (nvfuser_index_t)pos.x + (nvfuser_index_t)pos.y * (nvfuser_index_t)dim.x + (nvfuser_index_t)pos.z * (nvfuser_index_t)dim.x * (nvfuser_index_t)dim.y; } #define ioffset(pos, dim) \ ((pos).x + (pos).y * (dim).x + (pos).z * (dim).x * (dim).y) // Returns dim3 of each reduction segment. template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3> __host__ __device__ dim3 dimension_of_reduction_segment(const _dim3& grid_dim) { return dim3{ X_BLOCK ? grid_dim.x : 1, Y_BLOCK ? grid_dim.y : 1, Z_BLOCK ? grid_dim.z : 1}; } // Returns the number of blocks in each reduction segment. template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3> __host__ __device__ nvfuser_index_t size_of_reduction_segment(const _dim3& grid_dim) { return size( dimension_of_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>(grid_dim)); } // Returns the total number of reduction segments. template <bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3> __host__ __device__ nvfuser_index_t number_of_reduction_segments(const _dim3& grid_dim) { return (X_BLOCK ? 1 : grid_dim.x) * (Y_BLOCK ? 1 : grid_dim.y) * (Z_BLOCK ? 1 : grid_dim.z); } // Returns the 1-D index of the segment of thread block of block_idx. template < bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3bi, typename _dim3gd> __host__ __device__ nvfuser_index_t index_of_reduction_segment(const _dim3bi& block_idx, const _dim3gd& grid_dim) { nvfuser_index_t seg_idx = 0; if (!Z_BLOCK) seg_idx += block_idx.z; if (!Y_BLOCK) seg_idx = seg_idx * grid_dim.y + block_idx.y; if (!X_BLOCK) seg_idx = seg_idx * grid_dim.x + block_idx.x; return seg_idx; } // Returns the offset of thread block in its reduction segment. template < bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, typename _dim3bi, typename _dim3gd> __host__ __device__ nvfuser_index_t offset_in_reduction_segment(const _dim3bi& block_idx, const _dim3gd& grid_dim) { nvfuser_index_t offset = 0; if (Z_BLOCK) offset = offset * grid_dim.z + block_idx.z; if (Y_BLOCK) offset = offset * grid_dim.y + block_idx.y; if (X_BLOCK) offset = offset * grid_dim.x + block_idx.x; return offset; } // Returns dim3 of each reduction block. template <bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename _dim3> __host__ __device__ dim3 dimension_of_reduction_block(const _dim3& block_dim) { return dim3{ X_THREAD ? block_dim.x : 1, Y_THREAD ? block_dim.y : 1, Z_THREAD ? block_dim.z : 1}; } // Returns the number of threads of each reduction block. template <bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename _dim3> __host__ __device__ int size_of_reduction_block(const _dim3& block_dim) { auto tmp_dim = dimension_of_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(block_dim); return isize(tmp_dim); } // Returns the linear offset of a thread in a reduction block. template < bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename _dim3ti, typename _dim3bd> __host__ __device__ int offset_in_reduction_block( const _dim3ti& thread_idx, const _dim3bd& block_dim) { int offset = 0; if (Z_THREAD) offset += thread_idx.z; if (Y_THREAD) offset = offset * block_dim.y + thread_idx.y; if (X_THREAD) offset = offset * block_dim.x + thread_idx.x; return offset; } template <bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename T, typename TN> __device__ void gridWelfordLastBlock( T& out_avg, T& out_M2, TN& out_N, const T* in_avg, const T* in_M2, const TN* in_N, const nvfuser_index_t in_size, T* shared_buf_avg, T* shared_buf_M2, TN* shared_buf_N, bool write_pred, T init_val) { const int tid = ioffset(threadIdx, blockDim); const int block_size = isize(blockDim); const int rblock_size = size_of_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(blockDim); T inp_avg = init_val; T inp_M2 = init_val; TN inp_N = 0; if (tid < in_size) { inp_avg = in_avg[tid]; inp_M2 = in_M2[tid]; inp_N = in_N[tid]; } for (nvfuser_index_t i = tid + block_size; i < in_size; i += block_size) { welfordCombine(inp_avg, inp_M2, inp_N, in_avg[i], in_M2[i], in_N[i]); } const auto should_write = (X_THREAD || threadIdx.x == 0) && (Y_THREAD || threadIdx.y == 0) && (Z_THREAD || threadIdx.z == 0); auto rem_size = block_size / rblock_size; if (rem_size > 1) { const int rblock_offset = tid % rblock_size; const int rblock_idx = tid / rblock_size; T inp_avg_tmp = init_val; T inp_M2_tmp = init_val; TN inp_N_tmp = 0; blockWelford<false, true, false>( inp_avg_tmp, inp_M2_tmp, inp_N_tmp, inp_avg, inp_M2, inp_N, dim3{(unsigned)rblock_offset, (unsigned)rblock_idx, 0}, dim3{(unsigned)rblock_size, (unsigned)rem_size}, shared_buf_avg, shared_buf_M2, shared_buf_N, true, init_val); block_sync::sync(); if (tid < rblock_size) { shared_buf_avg[tid] = inp_avg_tmp; shared_buf_M2[tid] = inp_M2_tmp; shared_buf_N[tid] = inp_N_tmp; } block_sync::sync(); if (should_write) { nvfuser_index_t offset_write = offset_in_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>( threadIdx, blockDim); inp_avg = shared_buf_avg[offset_write]; inp_M2 = shared_buf_M2[offset_write]; inp_N = shared_buf_N[offset_write]; } } if (should_write && write_pred) { welfordCombine(out_avg, out_M2, out_N, inp_avg, inp_M2, inp_N); } } // Grid welford combine template < bool X_BLOCK, bool Y_BLOCK, bool Z_BLOCK, bool X_THREAD, bool Y_THREAD, bool Z_THREAD, typename T, typename TN> __device__ bool gridWelford( T& out_avg, T& out_M2, TN& out_N, const T& inp_avg, const T& inp_M2, const TN& inp_N, volatile T* work_buf_avg, volatile T* work_buf_M2, volatile TN* work_buf_N, Tensor<int64_t, 1> sync_flags, T* shared_buf_avg, T* shared_buf_M2, TN* shared_buf_N, bool read_pred, bool write_pred, T init_val) { // Number of values to reduce in the grid dimensions const auto seg_size = size_of_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>(gridDim); // Index of the reduction we're performing out of the seg_size const auto seg_idx = index_of_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>(blockIdx, gridDim); // Number of threads we can use in final reduction, Seems to assume all // threads in the block participate const auto rblock_size = size_of_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>(blockDim); work_buf_avg += seg_idx * seg_size * rblock_size; work_buf_M2 += seg_idx * seg_size * rblock_size; work_buf_N += seg_idx * seg_size * rblock_size; if ((X_THREAD || threadIdx.x == 0) && (Y_THREAD || threadIdx.y == 0) && (Z_THREAD || threadIdx.z == 0)) { auto rblock_offset = offset_in_reduction_segment<X_BLOCK, Y_BLOCK, Z_BLOCK>( blockIdx, gridDim); auto thread_offset = offset_in_reduction_block<X_THREAD, Y_THREAD, Z_THREAD>( threadIdx, blockDim); auto work_buf_offset = rblock_size * rblock_offset + thread_offset; if (read_pred) { work_buf_avg[work_buf_offset] = inp_avg; work_buf_M2[work_buf_offset] = inp_M2; work_buf_N[work_buf_offset] = inp_N; } else { work_buf_avg[work_buf_offset] = init_val; work_buf_M2[work_buf_offset] = init_val; work_buf_N[work_buf_offset] = 0; } } block_sync::sync(); __shared__ bool last_block; if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) { __threadfence(); auto old = (int64_t)atomicAdd((unsigned long long*)&sync_flags[seg_idx], 1); last_block = old + 1 == seg_size; } block_sync::sync(); if (last_block) { // final reduction gridWelfordLastBlock<X_THREAD, Y_THREAD, Z_THREAD>( out_avg, out_M2, out_N, (T*)work_buf_avg, (T*)work_buf_M2, (TN*)work_buf_N, seg_size * rblock_size, shared_buf_avg, shared_buf_M2, shared_buf_N, write_pred, init_val); return true; } else { return false; } } } // namespace welford #undef isize #undef ioffset
the_stack
#include "dali/operators/ssd/box_encoder.cuh" #include <cuda.h> #include <vector> #include <utility> namespace dali { __host__ __device__ inline float4 ToCenterWidthHeight(const float4 &box) { return { 0.5f * (box.x + box.z), 0.5f * (box.y + box.w), box.z - box.x, box.w - box.y}; } void BoxEncoder<GPUBackend>::PrepareAnchors(const vector<float> &anchors) { DALI_ENFORCE( (anchors.size() % BoundingBox::size) == 0, "Anchors size must be divisible by 4, actual value = " + std::to_string(anchors.size())); anchor_count_ = anchors.size() / BoundingBox::size; anchors_.Resize({anchor_count_, static_cast<int64_t>(BoundingBox::size)}, DALI_FLOAT); anchors_as_center_wh_.Resize({anchor_count_, static_cast<int64_t>(BoundingBox::size)}, DALI_FLOAT); auto anchors_data_cpu = reinterpret_cast<const float4 *>(anchors.data()); vector<float4> anchors_as_center_wh(anchor_count_); for (unsigned int anchor = 0; anchor < anchor_count_; ++anchor) anchors_as_center_wh[anchor] = ToCenterWidthHeight(anchors_data_cpu[anchor]); auto anchors_data = anchors_.mutable_data<float>(); auto anchors_as_center_wh_data = anchors_as_center_wh_.mutable_data<float>(); MemCopy(anchors_data, anchors.data(), anchor_count_ * BoundingBox::size * sizeof(float)); MemCopy( anchors_as_center_wh_data, anchors_as_center_wh.data(), anchor_count_ * BoundingBox::size * sizeof(float)); } __device__ __forceinline__ float CalculateIou(const float4 &b1, const float4 &b2) { float l = cuda_max(b1.x, b2.x); float t = cuda_max(b1.y, b2.y); float r = cuda_min(b1.z, b2.z); float b = cuda_min(b1.w, b2.w); float first = cuda_max(r - l, 0.0f); float second = cuda_max(b - t, 0.0f); volatile float intersection = first * second; volatile float area1 = (b1.w - b1.y) * (b1.z - b1.x); volatile float area2 = (b2.w - b2.y) * (b2.z - b2.x); return intersection / (area1 + area2 - intersection); } __device__ inline void FindBestMatch(const int N, volatile float *vals, volatile int *idx) { for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1) { if (threadIdx.x < stride) { if (vals[threadIdx.x] <= vals[threadIdx.x + stride]) { if (vals[threadIdx.x] == vals[threadIdx.x + stride]) { idx[threadIdx.x] = cuda_max(idx[threadIdx.x], idx[threadIdx.x + stride]); } else { vals[threadIdx.x] = vals[threadIdx.x + stride]; idx[threadIdx.x] = idx[threadIdx.x + stride]; } } } __syncthreads(); } } __device__ float4 MatchOffsets( float4 box, float4 anchor, const float *means, const float *stds, float scale) { box.x *= scale; box.y *= scale; box.z *= scale; box.w *= scale; anchor.x *= scale; anchor.y *= scale; anchor.z *= scale; anchor.w *= scale; float x = ((box.x - anchor.x) / anchor.z - means[0]) / stds[0]; float y = ((box.y - anchor.y) / anchor.w - means[1]) / stds[1]; float z = (log(box.z / anchor.z) - means[2]) / stds[2]; float w = (log(box.w / anchor.w) - means[3]) / stds[3]; return {x, y, z, w}; } __device__ void WriteMatchesToOutput( unsigned int anchor_count, float criteria, int *labels_out, const int *labels_in, float4 *boxes_out, const float4 *boxes_in, volatile int *best_box_idx, volatile float *best_box_iou, bool offset, const float* means, const float* stds, float scale, const float4 *anchors_as_cwh) { for (unsigned int anchor = threadIdx.x; anchor < anchor_count; anchor += blockDim.x) { if (best_box_iou[anchor] > criteria) { int box_idx = best_box_idx[anchor]; labels_out[anchor] = labels_in[box_idx]; float4 box = boxes_in[box_idx]; if (!offset) boxes_out[anchor] = ToCenterWidthHeight(box); else boxes_out[anchor] = MatchOffsets( ToCenterWidthHeight(box), anchors_as_cwh[anchor], means, stds, scale); } } } __device__ void MatchBoxWithAnchors( const float4 &box, const int box_idx, unsigned int anchor_count, const float4 *anchors, volatile int *best_anchor_idx_tmp, volatile float *best_anchor_iou_tmp, volatile int *best_box_idx, volatile float *best_box_iou) { float best_anchor_iou = -1.0f; int best_anchor_idx = -1; for (unsigned int anchor = threadIdx.x; anchor < anchor_count; anchor += blockDim.x) { float new_val = CalculateIou(box, anchors[anchor]); if (new_val >= best_anchor_iou) { best_anchor_iou = new_val; best_anchor_idx = anchor; } if (new_val >= best_box_iou[anchor]) { best_box_iou[anchor] = new_val; best_box_idx[anchor] = box_idx; } } best_anchor_iou_tmp[threadIdx.x] = best_anchor_iou; best_anchor_idx_tmp[threadIdx.x] = best_anchor_idx; } template <int BLOCK_SIZE> __global__ void Encode(const BoxEncoderSampleDesc *samples, const int anchor_count, const float4 *anchors, const float criteria, int *box_idx_buffer, float *box_iou_buffer, bool offset, const float *means, const float *stds, float scale, const float4 *anchors_as_cwh) { const int sample_idx = blockIdx.x; const auto &sample = samples[sample_idx]; // Remark: This algorithm is very fragile to floating point arithmetic effects. // For now, excessive use of volatile in this code, // makes it conform to reference solution in terms of resulting encoding. __shared__ volatile int best_anchor_idx_tmp[BLOCK_SIZE]; __shared__ volatile float best_anchor_iou_tmp[BLOCK_SIZE]; volatile int *best_box_idx = box_idx_buffer + sample_idx * anchor_count; volatile float *best_box_iou = box_iou_buffer + sample_idx * anchor_count; for (int box_idx = 0; box_idx < sample.in_box_count; ++box_idx) { MatchBoxWithAnchors( sample.boxes_in[box_idx], box_idx, anchor_count, anchors, best_anchor_idx_tmp, best_anchor_iou_tmp, best_box_idx, best_box_iou); __syncthreads(); FindBestMatch(blockDim.x, best_anchor_iou_tmp, best_anchor_idx_tmp); __syncthreads(); if (threadIdx.x == 0) { int idx = best_anchor_idx_tmp[0]; best_box_idx[idx] = box_idx; best_box_iou[idx] = 2.f; } __syncthreads(); } __syncthreads(); WriteMatchesToOutput( anchor_count, criteria, sample.labels_out, sample.labels_in, sample.boxes_out, sample.boxes_in, best_box_idx, best_box_iou, offset, means, stds, scale, anchors_as_cwh); } std::pair<int *, float *> BoxEncoder<GPUBackend>::ClearBuffers(const cudaStream_t &stream) { auto best_box_idx_data = best_box_idx_.mutable_data<int>(); auto best_box_iou_data = best_box_iou_.mutable_data<float>(); CUDA_CALL(cudaMemsetAsync( best_box_idx_data, 0, curr_batch_size_ * anchor_count_ * sizeof(int), stream)); CUDA_CALL(cudaMemsetAsync( best_box_iou_data, 0, curr_batch_size_ * anchor_count_ * sizeof(float), stream)); return {best_box_idx_data, best_box_iou_data}; } void BoxEncoder<GPUBackend>::ClearLabels(TensorList<GPUBackend> &labels_out, const cudaStream_t &stream) { for (int sample = 0; sample < curr_batch_size_; ++sample) { CUDA_CALL(cudaMemsetAsync(labels_out.mutable_tensor<int>(sample), 0, anchor_count_ * sizeof(int), stream)); } } void BoxEncoder<GPUBackend>::WriteAnchorsToOutput(TensorList<GPUBackend> &boxes_out, const cudaStream_t &stream) { if (!curr_batch_size_) return; const auto *anchors_to_copy = anchors_as_center_wh_.data<float>(); auto *first_sample_boxes_out = boxes_out.mutable_tensor<float>(0); // Host -> device copy of anchors for first sample MemCopy(first_sample_boxes_out, anchors_to_copy, anchor_count_ * BoundingBox::size * sizeof(float), stream); // Device -> device copy for the rest for (int sample = 1; sample < curr_batch_size_; ++sample) { auto *boxes_out_data = boxes_out.mutable_tensor<float>(sample); MemCopy(boxes_out_data, first_sample_boxes_out, anchor_count_ * BoundingBox::size * sizeof(float), stream); } } void BoxEncoder<GPUBackend>::ClearOutput(TensorList<GPUBackend> &boxes_out, const cudaStream_t &stream) { for (int sample = 0; sample < curr_batch_size_; ++sample) { auto *boxes_out_data = boxes_out.mutable_tensor<float>(sample); CUDA_CALL(cudaMemsetAsync(boxes_out_data, 0, anchor_count_ * BoundingBox::size * sizeof(float), stream)); } } std::pair<TensorListShape<>, TensorListShape<>> BoxEncoder<GPUBackend>::CalculateDims( const TensorList<GPUBackend> &boxes_input) { TensorListShape<> boxes_output_shape(boxes_input.num_samples(), kBoxesOutputDim); TensorListShape<> labels_output_shape(boxes_input.num_samples(), kLabelsOutputDim); for (size_t i = 0; i < boxes_input.num_samples(); i++) { boxes_output_shape.set_tensor_shape(i, {anchor_count_, static_cast<int64_t>(BoundingBox::size)}); labels_output_shape.set_tensor_shape(i, {anchor_count_}); } return {boxes_output_shape, labels_output_shape}; } void BoxEncoder<GPUBackend>::RunImpl(Workspace<GPUBackend> &ws) { const auto &boxes_input = ws.Input<GPUBackend>(kBoxesInId); const auto &labels_input = ws.Input<GPUBackend>(kLabelsInId); assert(ws.GetInputBatchSize(kBoxesInId) == ws.GetInputBatchSize(kLabelsInId)); auto curr_batch_size = ws.GetInputBatchSize(kBoxesInId); const auto anchors_data = reinterpret_cast<const float4 *>(anchors_.data<float>()); const auto anchors_as_cwh_data = reinterpret_cast<const float4 *>(anchors_as_center_wh_.data<float>()); const auto buffers = ClearBuffers(ws.stream()); auto dims = CalculateDims(boxes_input); auto &boxes_output = ws.Output<GPUBackend>(kBoxesOutId); boxes_output.Resize(dims.first, boxes_input.type()); auto &labels_output = ws.Output<GPUBackend>(kLabelsOutId); labels_output.Resize(dims.second, labels_input.type()); samples.resize(curr_batch_size_); for (int sample_idx = 0; sample_idx < curr_batch_size_; sample_idx++) { auto &sample = samples[sample_idx]; sample.boxes_out = reinterpret_cast<float4 *>(boxes_output.mutable_tensor<float>(sample_idx)); sample.labels_out = labels_output.mutable_tensor<int>(sample_idx); sample.boxes_in = reinterpret_cast<const float4 *>(boxes_input.tensor<float>(sample_idx)); sample.labels_in = labels_input.tensor<int>(sample_idx); sample.in_box_count = boxes_input.shape().tensor_shape_span(sample_idx)[0]; } const auto means_data = means_.data<float>(); const auto stds_data = stds_.data<float>(); ClearLabels(labels_output, ws.stream()); if (!offset_) WriteAnchorsToOutput(boxes_output, ws.stream()); else ClearOutput(boxes_output, ws.stream()); samples_dev.from_host(samples, ws.stream()); Encode<BlockSize><<<curr_batch_size, BlockSize, 0, ws.stream()>>>( samples_dev.data(), anchor_count_, anchors_data, criteria_, buffers.first, buffers.second, offset_, means_data, stds_data, scale_, anchors_as_cwh_data); } DALI_REGISTER_OPERATOR(BoxEncoder, BoxEncoder<GPUBackend>, GPU); } // namespace dali
the_stack
// <math.h> // This file was copied from libc++'s test suite, then modified to test CUDA. // For the most part, this consists of adding __device__ attributes and // deleting long double. // This test requires C++11 (it's mostly decltype checks). #if __cplusplus >= 201103L #include <math.h> #include <type_traits> #include <cassert> #include <stdio.h> // See PR21083 // Ambiguous is a user-defined type that defines its own overloads of cmath // functions. When the std overloads are candidates too (by using or adl), // they should not interfere. struct Ambiguous : std::true_type { // ADL __device__ operator float () { return 0.f; } __device__ operator double () { return 0.; } }; __device__ Ambiguous abs(Ambiguous){ return Ambiguous(); } __device__ Ambiguous acos(Ambiguous){ return Ambiguous(); } __device__ Ambiguous asin(Ambiguous){ return Ambiguous(); } __device__ Ambiguous atan(Ambiguous){ return Ambiguous(); } __device__ Ambiguous atan2(Ambiguous, Ambiguous){ return Ambiguous(); } __device__ Ambiguous ceil(Ambiguous){ return Ambiguous(); } __device__ Ambiguous cos(Ambiguous){ return Ambiguous(); } __device__ Ambiguous cosh(Ambiguous){ return Ambiguous(); } __device__ Ambiguous exp(Ambiguous){ return Ambiguous(); } __device__ Ambiguous fabs(Ambiguous){ return Ambiguous(); } __device__ Ambiguous floor(Ambiguous){ return Ambiguous(); } __device__ Ambiguous fmod(Ambiguous, Ambiguous){ return Ambiguous(); } __device__ Ambiguous frexp(Ambiguous, int*){ return Ambiguous(); } __device__ Ambiguous ldexp(Ambiguous, int){ return Ambiguous(); } __device__ Ambiguous log(Ambiguous){ return Ambiguous(); } __device__ Ambiguous log10(Ambiguous){ return Ambiguous(); } __device__ Ambiguous modf(Ambiguous, Ambiguous*){ return Ambiguous(); } __device__ Ambiguous pow(Ambiguous, Ambiguous){ return Ambiguous(); } __device__ Ambiguous sin(Ambiguous){ return Ambiguous(); } __device__ Ambiguous sinh(Ambiguous){ return Ambiguous(); } __device__ Ambiguous sqrt(Ambiguous){ return Ambiguous(); } __device__ Ambiguous tan(Ambiguous){ return Ambiguous(); } __device__ Ambiguous tanh(Ambiguous){ return Ambiguous(); } __device__ Ambiguous signbit(Ambiguous){ return Ambiguous(); } __device__ Ambiguous fpclassify(Ambiguous){ return Ambiguous(); } __device__ Ambiguous isfinite(Ambiguous){ return Ambiguous(); } __device__ Ambiguous isnormal(Ambiguous){ return Ambiguous(); } __device__ Ambiguous isgreater(Ambiguous, Ambiguous){ return Ambiguous(); } __device__ Ambiguous isgreaterequal(Ambiguous, Ambiguous){ return Ambiguous(); } __device__ Ambiguous isless(Ambiguous, Ambiguous){ return Ambiguous(); } __device__ Ambiguous islessequal(Ambiguous, Ambiguous){ return Ambiguous(); } __device__ Ambiguous islessgreater(Ambiguous, Ambiguous){ return Ambiguous(); } __device__ Ambiguous isunordered(Ambiguous, Ambiguous){ return Ambiguous(); } __device__ Ambiguous acosh(Ambiguous){ return Ambiguous(); } __device__ Ambiguous asinh(Ambiguous){ return Ambiguous(); } __device__ Ambiguous atanh(Ambiguous){ return Ambiguous(); } __device__ Ambiguous cbrt(Ambiguous){ return Ambiguous(); } __device__ Ambiguous copysign(Ambiguous, Ambiguous){ return Ambiguous(); } __device__ Ambiguous erf(Ambiguous){ return Ambiguous(); } __device__ Ambiguous erfc(Ambiguous){ return Ambiguous(); } __device__ Ambiguous exp2(Ambiguous){ return Ambiguous(); } __device__ Ambiguous expm1(Ambiguous){ return Ambiguous(); } __device__ Ambiguous fdim(Ambiguous, Ambiguous){ return Ambiguous(); } __device__ Ambiguous fma(Ambiguous, Ambiguous, Ambiguous){ return Ambiguous(); } __device__ Ambiguous fmax(Ambiguous, Ambiguous){ return Ambiguous(); } __device__ Ambiguous fmin(Ambiguous, Ambiguous){ return Ambiguous(); } __device__ Ambiguous hypot(Ambiguous, Ambiguous){ return Ambiguous(); } __device__ Ambiguous ilogb(Ambiguous){ return Ambiguous(); } __device__ Ambiguous lgamma(Ambiguous){ return Ambiguous(); } __device__ Ambiguous llrint(Ambiguous){ return Ambiguous(); } __device__ Ambiguous llround(Ambiguous){ return Ambiguous(); } __device__ Ambiguous log1p(Ambiguous){ return Ambiguous(); } __device__ Ambiguous log2(Ambiguous){ return Ambiguous(); } __device__ Ambiguous logb(Ambiguous){ return Ambiguous(); } __device__ Ambiguous lrint(Ambiguous){ return Ambiguous(); } __device__ Ambiguous lround(Ambiguous){ return Ambiguous(); } __device__ Ambiguous nearbyint(Ambiguous){ return Ambiguous(); } __device__ Ambiguous nextafter(Ambiguous, Ambiguous){ return Ambiguous(); } __device__ Ambiguous remainder(Ambiguous, Ambiguous){ return Ambiguous(); } __device__ Ambiguous remquo(Ambiguous, Ambiguous, int*){ return Ambiguous(); } __device__ Ambiguous rint(Ambiguous){ return Ambiguous(); } __device__ Ambiguous round(Ambiguous){ return Ambiguous(); } __device__ Ambiguous scalbln(Ambiguous, Ambiguous){ return Ambiguous(); } __device__ Ambiguous scalbn(Ambiguous, Ambiguous){ return Ambiguous(); } __device__ Ambiguous tgamma(Ambiguous){ return Ambiguous(); } __device__ Ambiguous trunc(Ambiguous){ return Ambiguous(); } // helper function to prevent compiler constant-folding test inputs. template <typename T> __device__ T V(T input) { volatile T tmp = input; return tmp; } __device__ void test_abs() { static_assert((std::is_same<decltype(abs((float)0)), float>::value), ""); static_assert((std::is_same<decltype(abs((double)0)), double>::value), ""); static_assert((std::is_same<decltype(abs(Ambiguous())), Ambiguous>::value), ""); assert(abs(V(-1)) == 1); assert(abs(V(-1.)) == 1); assert(abs(V(-1.f)) == 1); } __device__ void test_acos() { static_assert((std::is_same<decltype(acos((float)0)), float>::value), ""); static_assert((std::is_same<decltype(acos((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(acos((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(acos((int)0)), double>::value), ""); static_assert((std::is_same<decltype(acos((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(acos((long)0)), double>::value), ""); static_assert((std::is_same<decltype(acos((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(acos((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(acos((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(acos((double)0)), double>::value), ""); static_assert((std::is_same<decltype(acosf(0)), float>::value), ""); static_assert((std::is_same<decltype(acos(Ambiguous())), Ambiguous>::value), ""); assert(acos(V(1)) == 0); assert(acos(V(1.)) == 0); assert(acos(V(1.f)) == 0); } __device__ void test_asin() { static_assert((std::is_same<decltype(asin((float)0)), float>::value), ""); static_assert((std::is_same<decltype(asin((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(asin((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(asin((int)0)), double>::value), ""); static_assert((std::is_same<decltype(asin((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(asin((long)0)), double>::value), ""); static_assert((std::is_same<decltype(asin((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(asin((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(asin((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(asin((double)0)), double>::value), ""); static_assert((std::is_same<decltype(asinf(0)), float>::value), ""); static_assert((std::is_same<decltype(asin(Ambiguous())), Ambiguous>::value), ""); assert(asin(V(0)) == 0); assert(asin(V(0.)) == 0); assert(asin(V(0.f)) == 0); } __device__ void test_atan() { static_assert((std::is_same<decltype(atan((float)0)), float>::value), ""); static_assert((std::is_same<decltype(atan((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(atan((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(atan((int)0)), double>::value), ""); static_assert((std::is_same<decltype(atan((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(atan((long)0)), double>::value), ""); static_assert((std::is_same<decltype(atan((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(atan((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(atan((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(atan((double)0)), double>::value), ""); static_assert((std::is_same<decltype(atanf(0)), float>::value), ""); static_assert((std::is_same<decltype(atan(Ambiguous())), Ambiguous>::value), ""); assert(atan(V(0)) == 0); assert(atan(V(0.)) == 0); assert(atan(V(0.f)) == 0); } __device__ void test_atan2() { static_assert((std::is_same<decltype(atan2((float)0, (float)0)), float>::value), ""); static_assert((std::is_same<decltype(atan2((bool)0, (float)0)), double>::value), ""); static_assert((std::is_same<decltype(atan2((unsigned short)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(atan2((float)0, (unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(atan2((double)0, (long)0)), double>::value), ""); static_assert((std::is_same<decltype(atan2((int)0, (long long)0)), double>::value), ""); static_assert((std::is_same<decltype(atan2((int)0, (unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(atan2((double)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(atan2((float)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(atan2f(0,0)), float>::value), ""); static_assert((std::is_same<decltype(atan2((int)0, (int)0)), double>::value), ""); static_assert((std::is_same<decltype(atan2(Ambiguous(), Ambiguous())), Ambiguous>::value), ""); assert(atan2(V(0), 1) == 0); assert(atan2(V(0), 1.) == 0); assert(atan2(V(0), 1.f) == 0); assert(atan2(V(0.), 1) == 0); assert(atan2(V(0.), 1.) == 0); assert(atan2(V(0.), 1.f) == 0); assert(atan2(V(0.f), 1) == 0); assert(atan2(V(0.f), 1.) == 0); assert(atan2(V(0.f), 1.f) == 0); } __device__ void test_ceil() { static_assert((std::is_same<decltype(ceil((float)0)), float>::value), ""); static_assert((std::is_same<decltype(ceil((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(ceil((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(ceil((int)0)), double>::value), ""); static_assert((std::is_same<decltype(ceil((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(ceil((long)0)), double>::value), ""); static_assert((std::is_same<decltype(ceil((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(ceil((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(ceil((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(ceil((double)0)), double>::value), ""); static_assert((std::is_same<decltype(ceilf(0)), float>::value), ""); static_assert((std::is_same<decltype(ceil(Ambiguous())), Ambiguous>::value), ""); assert(ceil(V(0)) == 0); assert(ceil(V(0.)) == 0); assert(ceil(V(0.f)) == 0); } __device__ void test_cos() { static_assert((std::is_same<decltype(cos((float)0)), float>::value), ""); static_assert((std::is_same<decltype(cos((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(cos((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(cos((int)0)), double>::value), ""); static_assert((std::is_same<decltype(cos((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(cos((long)0)), double>::value), ""); static_assert((std::is_same<decltype(cos((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(cos((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(cos((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(cos((double)0)), double>::value), ""); static_assert((std::is_same<decltype(cosf(0)), float>::value), ""); static_assert((std::is_same<decltype(cos(Ambiguous())), Ambiguous>::value), ""); assert(cos(V(0)) == 1); assert(cos(V(0.)) == 1); assert(cos(V(0.f)) == 1); } __device__ void test_cosh() { static_assert((std::is_same<decltype(cosh((float)0)), float>::value), ""); static_assert((std::is_same<decltype(cosh((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(cosh((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(cosh((int)0)), double>::value), ""); static_assert((std::is_same<decltype(cosh((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(cosh((long)0)), double>::value), ""); static_assert((std::is_same<decltype(cosh((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(cosh((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(cosh((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(cosh((double)0)), double>::value), ""); static_assert((std::is_same<decltype(coshf(0)), float>::value), ""); static_assert((std::is_same<decltype(cosh(Ambiguous())), Ambiguous>::value), ""); assert(cosh(V(0)) == 1); assert(cosh(V(0.)) == 1); assert(cosh(V(0.f)) == 1); } __device__ void test_exp() { static_assert((std::is_same<decltype(exp((float)0)), float>::value), ""); static_assert((std::is_same<decltype(exp((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(exp((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(exp((int)0)), double>::value), ""); static_assert((std::is_same<decltype(exp((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(exp((long)0)), double>::value), ""); static_assert((std::is_same<decltype(exp((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(exp((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(exp((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(exp((double)0)), double>::value), ""); static_assert((std::is_same<decltype(expf(0)), float>::value), ""); static_assert((std::is_same<decltype(exp(Ambiguous())), Ambiguous>::value), ""); assert(exp(V(0)) == 1); assert(exp(V(0.)) == 1); assert(exp(V(0.f)) == 1); } __device__ void test_fabs() { static_assert((std::is_same<decltype(fabs((float)0)), float>::value), ""); static_assert((std::is_same<decltype(fabs((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(fabs((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(fabs((int)0)), double>::value), ""); static_assert((std::is_same<decltype(fabs((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(fabs((long)0)), double>::value), ""); static_assert((std::is_same<decltype(fabs((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(fabs((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(fabs((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(fabs((double)0)), double>::value), ""); static_assert((std::is_same<decltype(fabsf(0.0f)), float>::value), ""); static_assert((std::is_same<decltype(fabs(Ambiguous())), Ambiguous>::value), ""); assert(fabs(V(-1)) == 1); assert(fabs(V(-1.)) == 1); assert(fabs(V(-1.f)) == 1); } __device__ void test_floor() { static_assert((std::is_same<decltype(floor((float)0)), float>::value), ""); static_assert((std::is_same<decltype(floor((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(floor((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(floor((int)0)), double>::value), ""); static_assert((std::is_same<decltype(floor((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(floor((long)0)), double>::value), ""); static_assert((std::is_same<decltype(floor((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(floor((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(floor((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(floor((double)0)), double>::value), ""); static_assert((std::is_same<decltype(floorf(0)), float>::value), ""); static_assert((std::is_same<decltype(floor(Ambiguous())), Ambiguous>::value), ""); assert(floor(V(1)) == 1); assert(floor(V(1.)) == 1); assert(floor(V(1.f)) == 1); } __device__ void test_fmod() { static_assert((std::is_same<decltype(fmod((float)0, (float)0)), float>::value), ""); static_assert((std::is_same<decltype(fmod((bool)0, (float)0)), double>::value), ""); static_assert((std::is_same<decltype(fmod((unsigned short)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(fmod((float)0, (unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(fmod((double)0, (long)0)), double>::value), ""); static_assert((std::is_same<decltype(fmod((int)0, (long long)0)), double>::value), ""); static_assert((std::is_same<decltype(fmod((int)0, (unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(fmod((double)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(fmod((float)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(fmodf(0,0)), float>::value), ""); static_assert((std::is_same<decltype(fmod((int)0, (int)0)), double>::value), ""); static_assert((std::is_same<decltype(fmod(Ambiguous(), Ambiguous())), Ambiguous>::value), ""); assert(fmod(V(1.5), 1) == .5); assert(fmod(V(1.5), 1.) == .5); assert(fmod(V(1.5), 1.f) == .5); assert(fmod(V(1.5f), 1) == .5); assert(fmod(V(1.5f), 1.) == .5); assert(fmod(V(1.5f), 1.f) == .5); assert(fmod(V(2), 1) == 0); assert(fmod(V(2), 1.) == 0); assert(fmod(V(2), 1.f) == 0); } __device__ void test_frexp() { int ip; static_assert((std::is_same<decltype(frexp((float)0, &ip)), float>::value), ""); static_assert((std::is_same<decltype(frexp((bool)0, &ip)), double>::value), ""); static_assert((std::is_same<decltype(frexp((unsigned short)0, &ip)), double>::value), ""); static_assert((std::is_same<decltype(frexp((int)0, &ip)), double>::value), ""); static_assert((std::is_same<decltype(frexp((unsigned int)0, &ip)), double>::value), ""); static_assert((std::is_same<decltype(frexp((long)0, &ip)), double>::value), ""); static_assert((std::is_same<decltype(frexp((unsigned long)0, &ip)), double>::value), ""); static_assert((std::is_same<decltype(frexp((long long)0, &ip)), double>::value), ""); static_assert((std::is_same<decltype(frexp((unsigned long long)0, &ip)), double>::value), ""); static_assert((std::is_same<decltype(frexp((double)0, &ip)), double>::value), ""); static_assert((std::is_same<decltype(frexpf(0, &ip)), float>::value), ""); static_assert((std::is_same<decltype(frexp(Ambiguous(), &ip)), Ambiguous>::value), ""); assert(frexp(V(0), &ip) == 0); assert(frexp(V(0.), &ip) == 0); assert(frexp(V(0.f), &ip) == 0); } __device__ void test_ldexp() { int ip = 1; static_assert((std::is_same<decltype(ldexp((float)0, ip)), float>::value), ""); static_assert((std::is_same<decltype(ldexp((bool)0, ip)), double>::value), ""); static_assert((std::is_same<decltype(ldexp((unsigned short)0, ip)), double>::value), ""); static_assert((std::is_same<decltype(ldexp((int)0, ip)), double>::value), ""); static_assert((std::is_same<decltype(ldexp((unsigned int)0, ip)), double>::value), ""); static_assert((std::is_same<decltype(ldexp((long)0, ip)), double>::value), ""); static_assert((std::is_same<decltype(ldexp((unsigned long)0, ip)), double>::value), ""); static_assert((std::is_same<decltype(ldexp((long long)0, ip)), double>::value), ""); static_assert((std::is_same<decltype(ldexp((unsigned long long)0, ip)), double>::value), ""); static_assert((std::is_same<decltype(ldexp((double)0, ip)), double>::value), ""); static_assert((std::is_same<decltype(ldexpf(0, ip)), float>::value), ""); static_assert((std::is_same<decltype(ldexp(Ambiguous(), ip)), Ambiguous>::value), ""); assert(ldexp(V(1), ip) == 2); assert(ldexp(V(1.), ip) == 2); assert(ldexp(V(1.f), ip) == 2); } __device__ void test_log() { static_assert((std::is_same<decltype(log((float)0)), float>::value), ""); static_assert((std::is_same<decltype(log((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(log((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(log((int)0)), double>::value), ""); static_assert((std::is_same<decltype(log((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(log((long)0)), double>::value), ""); static_assert((std::is_same<decltype(log((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(log((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(log((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(log((double)0)), double>::value), ""); static_assert((std::is_same<decltype(logf(0)), float>::value), ""); static_assert((std::is_same<decltype(log(Ambiguous())), Ambiguous>::value), ""); assert(log(V(1)) == 0); assert(log(V(1.)) == 0); assert(log(V(1.f)) == 0); } __device__ void test_log10() { static_assert((std::is_same<decltype(log10((float)0)), float>::value), ""); static_assert((std::is_same<decltype(log10((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(log10((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(log10((int)0)), double>::value), ""); static_assert((std::is_same<decltype(log10((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(log10((long)0)), double>::value), ""); static_assert((std::is_same<decltype(log10((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(log10((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(log10((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(log10((double)0)), double>::value), ""); static_assert((std::is_same<decltype(log10f(0)), float>::value), ""); static_assert((std::is_same<decltype(log10(Ambiguous())), Ambiguous>::value), ""); assert(log10(V(1)) == 0); assert(log10(V(1.)) == 0); assert(log10(V(1.f)) == 0); } __device__ void test_modf() { static_assert((std::is_same<decltype(modf((float)0, (float*)0)), float>::value), ""); static_assert((std::is_same<decltype(modf((double)0, (double*)0)), double>::value), ""); static_assert((std::is_same<decltype(modff(0, (float*)0)), float>::value), ""); static_assert((std::is_same<decltype(modf(Ambiguous(), (Ambiguous*)0)), Ambiguous>::value), ""); double i; assert(modf(V(1), &i) == 0); assert(modf(V(1.), &i) == 0); assert(modf(V(1.f), &i) == 0); } __device__ void test_pow() { static_assert((std::is_same<decltype(pow((float)0, (float)0)), float>::value), ""); static_assert((std::is_same<decltype(pow((bool)0, (float)0)), double>::value), ""); static_assert((std::is_same<decltype(pow((unsigned short)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(pow((float)0, (unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(pow((double)0, (long)0)), double>::value), ""); static_assert((std::is_same<decltype(pow((int)0, (long long)0)), double>::value), ""); static_assert((std::is_same<decltype(pow((int)0, (unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(pow((double)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(pow((float)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(powf(0,0)), float>::value), ""); static_assert((std::is_same<decltype(pow((int)0, (int)0)), double>::value), ""); static_assert((std::is_same<decltype(pow(Ambiguous(), Ambiguous())), Ambiguous>::value), ""); assert(pow(V(1), 1) == 1); assert(pow(V(1.), 1) == 1); assert(pow(V(1.f), 1) == 1); assert(pow(V(1), 1.) == 1); assert(pow(V(1.), 1.) == 1); assert(pow(V(1.f), 1.) == 1); assert(pow(V(1), 1.f) == 1); assert(pow(V(1.), 1.f) == 1); assert(pow(V(1.f), 1.f) == 1); } __device__ void test_sin() { static_assert((std::is_same<decltype(sin((float)0)), float>::value), ""); static_assert((std::is_same<decltype(sin((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(sin((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(sin((int)0)), double>::value), ""); static_assert((std::is_same<decltype(sin((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(sin((long)0)), double>::value), ""); static_assert((std::is_same<decltype(sin((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(sin((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(sin((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(sin((double)0)), double>::value), ""); static_assert((std::is_same<decltype(sinf(0)), float>::value), ""); static_assert((std::is_same<decltype(sin(Ambiguous())), Ambiguous>::value), ""); assert(sin(0) == 0); assert(sin(0.) == 0); assert(sin(0.f) == 0); } __device__ void test_sinh() { static_assert((std::is_same<decltype(sinh((float)0)), float>::value), ""); static_assert((std::is_same<decltype(sinh((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(sinh((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(sinh((int)0)), double>::value), ""); static_assert((std::is_same<decltype(sinh((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(sinh((long)0)), double>::value), ""); static_assert((std::is_same<decltype(sinh((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(sinh((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(sinh((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(sinh((double)0)), double>::value), ""); static_assert((std::is_same<decltype(sinhf(0)), float>::value), ""); static_assert((std::is_same<decltype(sinh(Ambiguous())), Ambiguous>::value), ""); assert(sinh(V(0)) == 0); assert(sinh(V(0.)) == 0); assert(sinh(V(0.f)) == 0); } __device__ void test_sqrt() { static_assert((std::is_same<decltype(sqrt((float)0)), float>::value), ""); static_assert((std::is_same<decltype(sqrt((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(sqrt((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(sqrt((int)0)), double>::value), ""); static_assert((std::is_same<decltype(sqrt((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(sqrt((long)0)), double>::value), ""); static_assert((std::is_same<decltype(sqrt((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(sqrt((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(sqrt((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(sqrt((double)0)), double>::value), ""); static_assert((std::is_same<decltype(sqrtf(0)), float>::value), ""); static_assert((std::is_same<decltype(sqrt(Ambiguous())), Ambiguous>::value), ""); assert(sqrt(V(4)) == 2); assert(sqrt(V(4.)) == 2); assert(sqrt(V(4.f)) == 2); } __device__ void test_tan() { static_assert((std::is_same<decltype(tan((float)0)), float>::value), ""); static_assert((std::is_same<decltype(tan((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(tan((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(tan((int)0)), double>::value), ""); static_assert((std::is_same<decltype(tan((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(tan((long)0)), double>::value), ""); static_assert((std::is_same<decltype(tan((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(tan((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(tan((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(tan((double)0)), double>::value), ""); static_assert((std::is_same<decltype(tanf(0)), float>::value), ""); static_assert((std::is_same<decltype(tan(Ambiguous())), Ambiguous>::value), ""); assert(tan(V(0)) == 0); assert(tan(V(0.)) == 0); assert(tan(V(0.f)) == 0); } __device__ void test_tanh() { static_assert((std::is_same<decltype(tanh((float)0)), float>::value), ""); static_assert((std::is_same<decltype(tanh((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(tanh((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(tanh((int)0)), double>::value), ""); static_assert((std::is_same<decltype(tanh((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(tanh((long)0)), double>::value), ""); static_assert((std::is_same<decltype(tanh((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(tanh((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(tanh((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(tanh((double)0)), double>::value), ""); static_assert((std::is_same<decltype(tanhf(0)), float>::value), ""); static_assert((std::is_same<decltype(tanh(Ambiguous())), Ambiguous>::value), ""); assert(tanh(V(0)) == 0); assert(tanh(V(0.)) == 0); assert(tanh(V(0.f)) == 0); } __device__ void test_signbit() { #ifdef signbit #error signbit defined #endif static_assert((std::is_same<decltype(signbit((float)0)), bool>::value), ""); static_assert((std::is_same<decltype(signbit((double)0)), bool>::value), ""); static_assert((std::is_same<decltype(signbit(0)), bool>::value), ""); static_assert((std::is_same<decltype(signbit(Ambiguous())), Ambiguous>::value), ""); assert(signbit(V(-1)) == true); assert(signbit(V(-1.)) == true); assert(signbit(V(-1.f)) == true); } __device__ void test_fpclassify() { #ifdef fpclassify #error fpclassify defined #endif static_assert((std::is_same<decltype(fpclassify((float)0)), int>::value), ""); static_assert((std::is_same<decltype(fpclassify((double)0)), int>::value), ""); static_assert((std::is_same<decltype(fpclassify(0)), int>::value), ""); static_assert((std::is_same<decltype(fpclassify(Ambiguous())), Ambiguous>::value), ""); assert(fpclassify(V(-1)) == FP_NORMAL); assert(fpclassify(V(-1.)) == FP_NORMAL); assert(fpclassify(V(-1.f)) == FP_NORMAL); } __device__ void test_isfinite() { #ifdef isfinite #error isfinite defined #endif static_assert((std::is_same<decltype(isfinite((float)0)), bool>::value), ""); static_assert((std::is_same<decltype(isfinite((double)0)), bool>::value), ""); static_assert((std::is_same<decltype(isfinite(0)), bool>::value), ""); static_assert((std::is_same<decltype(isfinite(Ambiguous())), Ambiguous>::value), ""); assert(isfinite(V(-1)) == true); assert(isfinite(V(-1.)) == true); assert(isfinite(V(-1.f)) == true); } __device__ void test_isnormal() { #ifdef isnormal #error isnormal defined #endif static_assert((std::is_same<decltype(isnormal((float)0)), bool>::value), ""); static_assert((std::is_same<decltype(isnormal((double)0)), bool>::value), ""); static_assert((std::is_same<decltype(isnormal(0)), bool>::value), ""); static_assert((std::is_same<decltype(isnormal(Ambiguous())), Ambiguous>::value), ""); assert(std::isnormal(V(-1)) == true); assert(std::isnormal(V(-1.)) == true); assert(std::isnormal(V(-1.f)) == true); } __device__ void test_isgreater() { #ifdef isgreater #error isgreater defined #endif static_assert((std::is_same<decltype(isgreater((float)0, (float)0)), bool>::value), ""); static_assert((std::is_same<decltype(isgreater((float)0, (double)0)), bool>::value), ""); static_assert((std::is_same<decltype(isgreater((double)0, (float)0)), bool>::value), ""); static_assert((std::is_same<decltype(isgreater((double)0, (double)0)), bool>::value), ""); static_assert((std::is_same<decltype(isgreater(0, (double)0)), bool>::value), ""); static_assert((std::is_same<decltype(isgreater(Ambiguous(), Ambiguous())), Ambiguous>::value), ""); assert(std::isgreater(V(-1), 0) == false); assert(std::isgreater(V(-1), 0.) == false); assert(std::isgreater(V(-1), 0.f) == false); assert(std::isgreater(V(-1.), 0) == false); assert(std::isgreater(V(-1.), 0.) == false); assert(std::isgreater(V(-1.), 0.f) == false); assert(std::isgreater(V(-1.f), 0) == false); assert(std::isgreater(V(-1.f), 0.) == false); assert(std::isgreater(V(-1.f), 0.f) == false); } __device__ void test_isgreaterequal() { #ifdef isgreaterequal #error isgreaterequal defined #endif static_assert((std::is_same<decltype(isgreaterequal((float)0, (float)0)), bool>::value), ""); static_assert((std::is_same<decltype(isgreaterequal((float)0, (double)0)), bool>::value), ""); static_assert((std::is_same<decltype(isgreaterequal((double)0, (float)0)), bool>::value), ""); static_assert((std::is_same<decltype(isgreaterequal((double)0, (double)0)), bool>::value), ""); static_assert((std::is_same<decltype(isgreaterequal(0, (double)0)), bool>::value), ""); static_assert((std::is_same<decltype(isgreaterequal(Ambiguous(), Ambiguous())), Ambiguous>::value), ""); assert(std::isgreaterequal(V(-1), 0) == false); assert(std::isgreaterequal(V(-1), 0.) == false); assert(std::isgreaterequal(V(-1), 0.f) == false); assert(std::isgreaterequal(V(-1.), 0) == false); assert(std::isgreaterequal(V(-1.), 0.) == false); assert(std::isgreaterequal(V(-1.), 0.f) == false); assert(std::isgreaterequal(V(-1.f), 0) == false); assert(std::isgreaterequal(V(-1.f), 0.) == false); assert(std::isgreaterequal(V(-1.f), 0.f) == false); } __device__ void test_isinf() { #ifdef isinf #error isinf defined #endif static_assert((std::is_same<decltype(isinf((float)0)), bool>::value), ""); typedef decltype(isinf((double)0)) DoubleRetType; #ifndef __linux__ static_assert((std::is_same<DoubleRetType, bool>::value), ""); #else // GLIBC < 2.26 defines 'isinf(double)' with a return type of 'int' in // all C++ dialects. The test should tolerate this. // See: https://sourceware.org/bugzilla/show_bug.cgi?id=19439 static_assert((std::is_same<DoubleRetType, bool>::value || std::is_same<DoubleRetType, int>::value), ""); #endif static_assert((std::is_same<decltype(isinf(0)), bool>::value), ""); assert(std::isinf(V(-1)) == false); assert(std::isinf(V(-1.)) == false); assert(std::isinf(V(-1.f)) == false); } __device__ void test_isless() { #ifdef isless #error isless defined #endif static_assert((std::is_same<decltype(isless((float)0, (float)0)), bool>::value), ""); static_assert((std::is_same<decltype(isless((float)0, (double)0)), bool>::value), ""); static_assert((std::is_same<decltype(isless((double)0, (float)0)), bool>::value), ""); static_assert((std::is_same<decltype(isless((double)0, (double)0)), bool>::value), ""); static_assert((std::is_same<decltype(isless(0, (double)0)), bool>::value), ""); static_assert((std::is_same<decltype(isless(Ambiguous(), Ambiguous())), Ambiguous>::value), ""); assert(std::isless(V(-1), 0) == true); assert(std::isless(V(-1), 0.) == true); assert(std::isless(V(-1), 0.f) == true); assert(std::isless(V(-1.), 0) == true); assert(std::isless(V(-1.), 0.) == true); assert(std::isless(V(-1.), 0.f) == true); assert(std::isless(V(-1.f), 0) == true); assert(std::isless(V(-1.f), 0.) == true); assert(std::isless(V(-1.f), 0.f) == true); } __device__ void test_islessequal() { #ifdef islessequal #error islessequal defined #endif static_assert((std::is_same<decltype(islessequal((float)0, (float)0)), bool>::value), ""); static_assert((std::is_same<decltype(islessequal((float)0, (double)0)), bool>::value), ""); static_assert((std::is_same<decltype(islessequal((double)0, (float)0)), bool>::value), ""); static_assert((std::is_same<decltype(islessequal((double)0, (double)0)), bool>::value), ""); static_assert((std::is_same<decltype(islessequal(0, (double)0)), bool>::value), ""); static_assert((std::is_same<decltype(islessequal(Ambiguous(), Ambiguous())), Ambiguous>::value), ""); assert(std::islessequal(V(-1), 0) == true); assert(std::islessequal(V(-1), 0.) == true); assert(std::islessequal(V(-1), 0.f) == true); assert(std::islessequal(V(-1.), 0) == true); assert(std::islessequal(V(-1.), 0.) == true); assert(std::islessequal(V(-1.), 0.f) == true); assert(std::islessequal(V(-1.f), 0) == true); assert(std::islessequal(V(-1.f), 0.) == true); assert(std::islessequal(V(-1.f), 0.f) == true); } __device__ void test_islessgreater() { #ifdef islessgreater #error islessgreater defined #endif static_assert((std::is_same<decltype(islessgreater((float)0, (float)0)), bool>::value), ""); static_assert((std::is_same<decltype(islessgreater((float)0, (double)0)), bool>::value), ""); static_assert((std::is_same<decltype(islessgreater((double)0, (float)0)), bool>::value), ""); static_assert((std::is_same<decltype(islessgreater((double)0, (double)0)), bool>::value), ""); static_assert((std::is_same<decltype(islessgreater(0, (double)0)), bool>::value), ""); static_assert((std::is_same<decltype(islessgreater(Ambiguous(), Ambiguous())), Ambiguous>::value), ""); assert(std::islessgreater(V(-1), 0) == true); assert(std::islessgreater(V(-1), 0.) == true); assert(std::islessgreater(V(-1), 0.f) == true); assert(std::islessgreater(V(-1.), 0) == true); assert(std::islessgreater(V(-1.), 0.) == true); assert(std::islessgreater(V(-1.), 0.f) == true); assert(std::islessgreater(V(-1.f), 0) == true); assert(std::islessgreater(V(-1.f), 0.) == true); assert(std::islessgreater(V(-1.f), 0.f) == true); } __device__ void test_isnan() { #ifdef isnan #error isnan defined #endif static_assert((std::is_same<decltype(isnan((float)0)), bool>::value), ""); typedef decltype(isnan((double)0)) DoubleRetType; #ifndef __linux__ static_assert((std::is_same<DoubleRetType, bool>::value), ""); #else // GLIBC < 2.26 defines 'isnan(double)' with a return type of 'int' in // all C++ dialects. The test should tolerate this. // See: https://sourceware.org/bugzilla/show_bug.cgi?id=19439 static_assert((std::is_same<DoubleRetType, bool>::value || std::is_same<DoubleRetType, int>::value), ""); #endif static_assert((std::is_same<decltype(isnan(0)), bool>::value), ""); assert(std::isnan(V(-1)) == false); assert(std::isnan(V(-1.)) == false); assert(std::isnan(V(-1.f)) == false); } __device__ void test_isunordered() { #ifdef isunordered #error isunordered defined #endif static_assert((std::is_same<decltype(isunordered((float)0, (float)0)), bool>::value), ""); static_assert((std::is_same<decltype(isunordered((float)0, (double)0)), bool>::value), ""); static_assert((std::is_same<decltype(isunordered((double)0, (float)0)), bool>::value), ""); static_assert((std::is_same<decltype(isunordered((double)0, (double)0)), bool>::value), ""); static_assert((std::is_same<decltype(isunordered(0, (double)0)), bool>::value), ""); static_assert((std::is_same<decltype(isunordered(Ambiguous(), Ambiguous())), Ambiguous>::value), ""); assert(std::isunordered(V(-1), 0) == false); assert(std::isunordered(V(-1), 0.) == false); assert(std::isunordered(V(-1), 0.f) == false); assert(std::isunordered(V(-1.), 0) == false); assert(std::isunordered(V(-1.), 0.) == false); assert(std::isunordered(V(-1.), 0.f) == false); assert(std::isunordered(V(-1.f), 0) == false); assert(std::isunordered(V(-1.f), 0.) == false); assert(std::isunordered(V(-1.f), 0.f) == false); } __device__ void test_acosh() { static_assert((std::is_same<decltype(acosh((float)0)), float>::value), ""); static_assert((std::is_same<decltype(acosh((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(acosh((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(acosh((int)0)), double>::value), ""); static_assert((std::is_same<decltype(acosh((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(acosh((long)0)), double>::value), ""); static_assert((std::is_same<decltype(acosh((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(acosh((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(acosh((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(acosh((double)0)), double>::value), ""); static_assert((std::is_same<decltype(acoshf(0)), float>::value), ""); static_assert((std::is_same<decltype(acosh(Ambiguous())), Ambiguous>::value), ""); assert(std::acosh(V(1)) == 0); assert(std::acosh(V(1.)) == 0); assert(std::acosh(V(1.f)) == 0); } __device__ void test_asinh() { static_assert((std::is_same<decltype(asinh((float)0)), float>::value), ""); static_assert((std::is_same<decltype(asinh((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(asinh((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(asinh((int)0)), double>::value), ""); static_assert((std::is_same<decltype(asinh((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(asinh((long)0)), double>::value), ""); static_assert((std::is_same<decltype(asinh((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(asinh((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(asinh((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(asinh((double)0)), double>::value), ""); static_assert((std::is_same<decltype(asinhf(0)), float>::value), ""); static_assert((std::is_same<decltype(asinh(Ambiguous())), Ambiguous>::value), ""); assert(asinh(V(0)) == 0); assert(asinh(V(0.)) == 0); assert(asinh(V(0.f)) == 0); } __device__ void test_atanh() { static_assert((std::is_same<decltype(atanh((float)0)), float>::value), ""); static_assert((std::is_same<decltype(atanh((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(atanh((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(atanh((int)0)), double>::value), ""); static_assert((std::is_same<decltype(atanh((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(atanh((long)0)), double>::value), ""); static_assert((std::is_same<decltype(atanh((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(atanh((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(atanh((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(atanh((double)0)), double>::value), ""); static_assert((std::is_same<decltype(atanhf(0)), float>::value), ""); static_assert((std::is_same<decltype(atanh(Ambiguous())), Ambiguous>::value), ""); assert(atanh(V(0)) == 0); assert(atanh(V(0.)) == 0); assert(atanh(V(0.f)) == 0); } __device__ void test_cbrt() { static_assert((std::is_same<decltype(cbrt((float)0)), float>::value), ""); static_assert((std::is_same<decltype(cbrt((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(cbrt((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(cbrt((int)0)), double>::value), ""); static_assert((std::is_same<decltype(cbrt((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(cbrt((long)0)), double>::value), ""); static_assert((std::is_same<decltype(cbrt((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(cbrt((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(cbrt((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(cbrt((double)0)), double>::value), ""); static_assert((std::is_same<decltype(cbrtf(0)), float>::value), ""); static_assert((std::is_same<decltype(cbrt(Ambiguous())), Ambiguous>::value), ""); assert(cbrt(V(1)) == 1); assert(cbrt(V(1.)) == 1); assert(cbrt(V(1.f)) == 1); } __device__ void test_copysign() { static_assert((std::is_same<decltype(copysign((float)0, (float)0)), float>::value), ""); static_assert((std::is_same<decltype(copysign((bool)0, (float)0)), double>::value), ""); static_assert((std::is_same<decltype(copysign((unsigned short)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(copysign((float)0, (unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(copysign((double)0, (long)0)), double>::value), ""); static_assert((std::is_same<decltype(copysign((int)0, (long long)0)), double>::value), ""); static_assert((std::is_same<decltype(copysign((int)0, (unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(copysign((double)0, (double)0)), double>::value), ""); // CUDA's copysign(float, double) currently returns a float, in violation // of the spec. We can't easily change this, so accept either one. static_assert( (std::is_same<decltype(copysign((float)0, (double)0)), double>::value || std::is_same<decltype(copysign((float)0, (double)0)), float>::value), ""); static_assert((std::is_same<decltype(copysignf(0,0)), float>::value), ""); static_assert((std::is_same<decltype(copysign((int)0, (int)0)), double>::value), ""); static_assert((std::is_same<decltype(copysign(Ambiguous(), Ambiguous())), Ambiguous>::value), ""); assert(std::copysign(V(1), 1) == 1); assert(std::copysign(V(1.), 1) == 1); assert(std::copysign(V(1.f), 1) == 1); assert(std::copysign(V(1), 1.) == 1); assert(std::copysign(V(1.), 1.) == 1); assert(std::copysign(V(1.f), 1.) == 1); assert(std::copysign(V(1), 1.f) == 1); assert(std::copysign(V(1.), 1.f) == 1); assert(std::copysign(V(1.f), 1.f) == 1); } __device__ void test_erf() { static_assert((std::is_same<decltype(erf((float)0)), float>::value), ""); static_assert((std::is_same<decltype(erf((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(erf((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(erf((int)0)), double>::value), ""); static_assert((std::is_same<decltype(erf((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(erf((long)0)), double>::value), ""); static_assert((std::is_same<decltype(erf((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(erf((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(erf((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(erf((double)0)), double>::value), ""); static_assert((std::is_same<decltype(erff(0)), float>::value), ""); static_assert((std::is_same<decltype(erf(Ambiguous())), Ambiguous>::value), ""); assert(erf(V(0)) == 0); assert(erf(V(0.)) == 0); assert(erf(V(0.f)) == 0); } __device__ void test_erfc() { static_assert((std::is_same<decltype(erfc((float)0)), float>::value), ""); static_assert((std::is_same<decltype(erfc((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(erfc((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(erfc((int)0)), double>::value), ""); static_assert((std::is_same<decltype(erfc((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(erfc((long)0)), double>::value), ""); static_assert((std::is_same<decltype(erfc((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(erfc((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(erfc((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(erfc((double)0)), double>::value), ""); static_assert((std::is_same<decltype(erfcf(0)), float>::value), ""); static_assert((std::is_same<decltype(erfc(Ambiguous())), Ambiguous>::value), ""); assert(erfc(V(0)) == 1); assert(erfc(V(0.)) == 1); assert(erfc(V(0.f)) == 1); } __device__ void test_exp2() { static_assert((std::is_same<decltype(exp2((float)0)), float>::value), ""); static_assert((std::is_same<decltype(exp2((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(exp2((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(exp2((int)0)), double>::value), ""); static_assert((std::is_same<decltype(exp2((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(exp2((long)0)), double>::value), ""); static_assert((std::is_same<decltype(exp2((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(exp2((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(exp2((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(exp2((double)0)), double>::value), ""); static_assert((std::is_same<decltype(exp2f(0)), float>::value), ""); static_assert((std::is_same<decltype(exp2(Ambiguous())), Ambiguous>::value), ""); assert(exp2(V(1)) == 2); assert(exp2(V(1.)) == 2); assert(exp2(V(1.f)) == 2); } __device__ void test_expm1() { static_assert((std::is_same<decltype(expm1((float)0)), float>::value), ""); static_assert((std::is_same<decltype(expm1((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(expm1((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(expm1((int)0)), double>::value), ""); static_assert((std::is_same<decltype(expm1((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(expm1((long)0)), double>::value), ""); static_assert((std::is_same<decltype(expm1((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(expm1((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(expm1((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(expm1((double)0)), double>::value), ""); static_assert((std::is_same<decltype(expm1f(0)), float>::value), ""); static_assert((std::is_same<decltype(expm1(Ambiguous())), Ambiguous>::value), ""); assert(expm1(V(0)) == 0); assert(expm1(V(0.)) == 0); assert(expm1(V(0.f)) == 0); } __device__ void test_fdim() { static_assert((std::is_same<decltype(fdim((float)0, (float)0)), float>::value), ""); static_assert((std::is_same<decltype(fdim((bool)0, (float)0)), double>::value), ""); static_assert((std::is_same<decltype(fdim((unsigned short)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(fdim((float)0, (unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(fdim((double)0, (long)0)), double>::value), ""); static_assert((std::is_same<decltype(fdim((int)0, (long long)0)), double>::value), ""); static_assert((std::is_same<decltype(fdim((int)0, (unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(fdim((double)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(fdim((float)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(fdimf(0,0)), float>::value), ""); static_assert((std::is_same<decltype(fdim((int)0, (int)0)), double>::value), ""); static_assert((std::is_same<decltype(fdim(Ambiguous(), Ambiguous())), Ambiguous>::value), ""); assert(std::fdim(V(1), 0) == 1); assert(std::fdim(V(1.), 0) == 1); assert(std::fdim(V(1.f), 0) == 1); assert(std::fdim(V(1), 0.) == 1); assert(std::fdim(V(1.), 0.) == 1); assert(std::fdim(V(1.f), 0.) == 1); assert(std::fdim(V(1), 0.f) == 1); assert(std::fdim(V(1.), 0.f) == 1); assert(std::fdim(V(1.f), 0.f) == 1); } __device__ void test_fma() { static_assert((std::is_same<decltype(fma((bool)0, (float)0, (float)0)), double>::value), ""); static_assert((std::is_same<decltype(fma((char)0, (float)0, (float)0)), double>::value), ""); static_assert((std::is_same<decltype(fma((unsigned)0, (float)0, (float)0)), double>::value), ""); static_assert((std::is_same<decltype(fma((float)0, (int)0, (float)0)), double>::value), ""); static_assert((std::is_same<decltype(fma((float)0, (long)0, (float)0)), double>::value), ""); static_assert((std::is_same<decltype(fma((float)0, (float)0, (unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(fma((float)0, (float)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(fma((float)0, (float)0, (float)0)), float>::value), ""); static_assert((std::is_same<decltype(fma((bool)0, (double)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(fma((char)0, (double)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(fma((unsigned)0, (double)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(fma((double)0, (int)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(fma((double)0, (long)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(fma((double)0, (double)0, (unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(fma((double)0, (double)0, (float)0)), double>::value), ""); static_assert((std::is_same<decltype(fma((double)0, (double)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(fmaf(0,0,0)), float>::value), ""); static_assert((std::is_same<decltype(fma(Ambiguous(), Ambiguous(), Ambiguous())), Ambiguous>::value), ""); assert(std::fma(V(1), 1, 1) == 2); assert(std::fma(V(1.), 1, 1) == 2); assert(std::fma(V(1.f), 1, 1) == 2); assert(std::fma(V(1), 1., 1) == 2); assert(std::fma(V(1.), 1., 1) == 2); assert(std::fma(V(1.f), 1., 1) == 2); assert(std::fma(V(1), 1.f, 1) == 2); assert(std::fma(V(1.), 1.f, 1) == 2); assert(std::fma(V(1.f), 1.f, 1) == 2); assert(std::fma(V(1), 1, 1.) == 2); assert(std::fma(V(1.), 1, 1.) == 2); assert(std::fma(V(1.f), 1, 1.) == 2); assert(std::fma(V(1), 1., 1.) == 2); assert(std::fma(V(1.), 1., 1.) == 2); assert(std::fma(V(1.f), 1., 1.) == 2); assert(std::fma(V(1), 1.f, 1.) == 2); assert(std::fma(V(1.), 1.f, 1.) == 2); assert(std::fma(V(1.f), 1.f, 1.) == 2); assert(std::fma(V(1), 1, 1.f) == 2); assert(std::fma(V(1.), 1, 1.f) == 2); assert(std::fma(V(1.f), 1, 1.f) == 2); assert(std::fma(V(1), 1., 1.f) == 2); assert(std::fma(V(1.), 1., 1.f) == 2); assert(std::fma(V(1.f), 1., 1.f) == 2); assert(std::fma(V(1), 1.f, 1.f) == 2); assert(std::fma(V(1.), 1.f, 1.f) == 2); assert(std::fma(V(1.f), 1.f, 1.f) == 2); } __device__ void test_fmax() { static_assert((std::is_same<decltype(fmax((float)0, (float)0)), float>::value), ""); static_assert((std::is_same<decltype(fmax((bool)0, (float)0)), double>::value), ""); static_assert((std::is_same<decltype(fmax((unsigned short)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(fmax((float)0, (unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(fmax((double)0, (long)0)), double>::value), ""); static_assert((std::is_same<decltype(fmax((int)0, (long long)0)), double>::value), ""); static_assert((std::is_same<decltype(fmax((int)0, (unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(fmax((double)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(fmax((float)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(fmaxf(0,0)), float>::value), ""); static_assert((std::is_same<decltype(fmax((int)0, (int)0)), double>::value), ""); static_assert((std::is_same<decltype(fmax(Ambiguous(), Ambiguous())), Ambiguous>::value), ""); assert(std::fmax(V(1), 0) == 1); assert(std::fmax(V(1.), 0) == 1); assert(std::fmax(V(1.f), 0) == 1); assert(std::fmax(V(1), 0.) == 1); assert(std::fmax(V(1.), 0.) == 1); assert(std::fmax(V(1.f), 0.) == 1); assert(std::fmax(V(1), 0.f) == 1); assert(std::fmax(V(1.), 0.f) == 1); assert(std::fmax(V(1.f), 0.f) == 1); } __device__ void test_fmin() { static_assert((std::is_same<decltype(fmin((float)0, (float)0)), float>::value), ""); static_assert((std::is_same<decltype(fmin((bool)0, (float)0)), double>::value), ""); static_assert((std::is_same<decltype(fmin((unsigned short)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(fmin((float)0, (unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(fmin((double)0, (long)0)), double>::value), ""); static_assert((std::is_same<decltype(fmin((int)0, (long long)0)), double>::value), ""); static_assert((std::is_same<decltype(fmin((int)0, (unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(fmin((double)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(fmin((float)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(fminf(0,0)), float>::value), ""); static_assert((std::is_same<decltype(fmin((int)0, (int)0)), double>::value), ""); static_assert((std::is_same<decltype(fmin(Ambiguous(), Ambiguous())), Ambiguous>::value), ""); assert(std::fmin(V(1), 0) == 0); assert(std::fmin(V(1.), 0) == 0); assert(std::fmin(V(1.f), 0) == 0); assert(std::fmin(V(1), 0.) == 0); assert(std::fmin(V(1.), 0.) == 0); assert(std::fmin(V(1.f), 0.) == 0); assert(std::fmin(V(1), 0.f) == 0); assert(std::fmin(V(1.), 0.f) == 0); assert(std::fmin(V(1.f), 0.f) == 0); } __device__ void test_hypot() { static_assert((std::is_same<decltype(hypot((float)0, (float)0)), float>::value), ""); static_assert((std::is_same<decltype(hypot((bool)0, (float)0)), double>::value), ""); static_assert((std::is_same<decltype(hypot((unsigned short)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(hypot((float)0, (unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(hypot((double)0, (long)0)), double>::value), ""); static_assert((std::is_same<decltype(hypot((int)0, (long long)0)), double>::value), ""); static_assert((std::is_same<decltype(hypot((int)0, (unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(hypot((double)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(hypot((float)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(hypotf(0,0)), float>::value), ""); static_assert((std::is_same<decltype(hypot((int)0, (int)0)), double>::value), ""); static_assert((std::is_same<decltype(hypot(Ambiguous(), Ambiguous())), Ambiguous>::value), ""); assert(std::hypot(V(3), 4) == 5); assert(std::hypot(V(3), 4.) == 5); assert(std::hypot(V(3), 4.f) == 5); assert(std::hypot(V(3.), 4) == 5); assert(std::hypot(V(3.), 4.) == 5); assert(std::hypot(V(3.), 4.f) == 5); assert(std::hypot(V(3.f), 4) == 5); assert(std::hypot(V(3.f), 4.) == 5); assert(std::hypot(V(3.f), 4.f) == 5); } __device__ void test_ilogb() { static_assert((std::is_same<decltype(ilogb((float)0)), int>::value), ""); static_assert((std::is_same<decltype(ilogb((bool)0)), int>::value), ""); static_assert((std::is_same<decltype(ilogb((unsigned short)0)), int>::value), ""); static_assert((std::is_same<decltype(ilogb((int)0)), int>::value), ""); static_assert((std::is_same<decltype(ilogb((unsigned int)0)), int>::value), ""); static_assert((std::is_same<decltype(ilogb((long)0)), int>::value), ""); static_assert((std::is_same<decltype(ilogb((unsigned long)0)), int>::value), ""); static_assert((std::is_same<decltype(ilogb((long long)0)), int>::value), ""); static_assert((std::is_same<decltype(ilogb((unsigned long long)0)), int>::value), ""); static_assert((std::is_same<decltype(ilogb((double)0)), int>::value), ""); static_assert((std::is_same<decltype(ilogbf(0)), int>::value), ""); static_assert((std::is_same<decltype(ilogb(Ambiguous())), Ambiguous>::value), ""); assert(ilogb(V(1)) == 0); assert(ilogb(V(1.)) == 0); assert(ilogb(V(1.f)) == 0); } __device__ void test_lgamma() { static_assert((std::is_same<decltype(lgamma((float)0)), float>::value), ""); static_assert((std::is_same<decltype(lgamma((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(lgamma((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(lgamma((int)0)), double>::value), ""); static_assert((std::is_same<decltype(lgamma((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(lgamma((long)0)), double>::value), ""); static_assert((std::is_same<decltype(lgamma((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(lgamma((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(lgamma((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(lgamma((double)0)), double>::value), ""); static_assert((std::is_same<decltype(lgammaf(0)), float>::value), ""); static_assert((std::is_same<decltype(lgamma(Ambiguous())), Ambiguous>::value), ""); assert(lgamma(V(1)) == 0); assert(lgamma(V(1.)) == 0); assert(lgamma(V(1.f)) == 0); } __device__ void test_llrint() { static_assert((std::is_same<decltype(llrint((float)0)), long long>::value), ""); static_assert((std::is_same<decltype(llrint((bool)0)), long long>::value), ""); static_assert((std::is_same<decltype(llrint((unsigned short)0)), long long>::value), ""); static_assert((std::is_same<decltype(llrint((int)0)), long long>::value), ""); static_assert((std::is_same<decltype(llrint((unsigned int)0)), long long>::value), ""); static_assert((std::is_same<decltype(llrint((long)0)), long long>::value), ""); static_assert((std::is_same<decltype(llrint((unsigned long)0)), long long>::value), ""); static_assert((std::is_same<decltype(llrint((long long)0)), long long>::value), ""); static_assert((std::is_same<decltype(llrint((unsigned long long)0)), long long>::value), ""); static_assert((std::is_same<decltype(llrint((double)0)), long long>::value), ""); static_assert((std::is_same<decltype(llrintf(0)), long long>::value), ""); static_assert((std::is_same<decltype(llrint(Ambiguous())), Ambiguous>::value), ""); assert(llrint(V(1)) == 1LL); assert(llrint(V(1.)) == 1LL); #if CUDA_VERSION > 7050 assert(llrint(V(1.f)) == 1LL); #endif } __device__ void test_llround() { static_assert((std::is_same<decltype(llround((float)0)), long long>::value), ""); static_assert((std::is_same<decltype(llround((bool)0)), long long>::value), ""); static_assert((std::is_same<decltype(llround((unsigned short)0)), long long>::value), ""); static_assert((std::is_same<decltype(llround((int)0)), long long>::value), ""); static_assert((std::is_same<decltype(llround((unsigned int)0)), long long>::value), ""); static_assert((std::is_same<decltype(llround((long)0)), long long>::value), ""); static_assert((std::is_same<decltype(llround((unsigned long)0)), long long>::value), ""); static_assert((std::is_same<decltype(llround((long long)0)), long long>::value), ""); static_assert((std::is_same<decltype(llround((unsigned long long)0)), long long>::value), ""); static_assert((std::is_same<decltype(llround((double)0)), long long>::value), ""); static_assert((std::is_same<decltype(llroundf(0)), long long>::value), ""); static_assert((std::is_same<decltype(llround(Ambiguous())), Ambiguous>::value), ""); assert(llround(V(1)) == 1LL); assert(llround(V(1.)) == 1LL); assert(llround(V(1.f)) == 1LL); } __device__ void test_log1p() { static_assert((std::is_same<decltype(log1p((float)0)), float>::value), ""); static_assert((std::is_same<decltype(log1p((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(log1p((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(log1p((int)0)), double>::value), ""); static_assert((std::is_same<decltype(log1p((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(log1p((long)0)), double>::value), ""); static_assert((std::is_same<decltype(log1p((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(log1p((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(log1p((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(log1p((double)0)), double>::value), ""); static_assert((std::is_same<decltype(log1pf(0)), float>::value), ""); static_assert((std::is_same<decltype(log1p(Ambiguous())), Ambiguous>::value), ""); assert(log1p(V(0)) == 0); assert(log1p(V(0.)) == 0); assert(log1p(V(0.f)) == 0); } __device__ void test_log2() { static_assert((std::is_same<decltype(log2((float)0)), float>::value), ""); static_assert((std::is_same<decltype(log2((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(log2((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(log2((int)0)), double>::value), ""); static_assert((std::is_same<decltype(log2((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(log2((long)0)), double>::value), ""); static_assert((std::is_same<decltype(log2((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(log2((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(log2((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(log2((double)0)), double>::value), ""); static_assert((std::is_same<decltype(log2f(0)), float>::value), ""); static_assert((std::is_same<decltype(log2(Ambiguous())), Ambiguous>::value), ""); assert(log2(V(1)) == 0); assert(log2(V(1.)) == 0); assert(log2(V(1.f)) == 0); } __device__ void test_logb() { static_assert((std::is_same<decltype(logb((float)0)), float>::value), ""); static_assert((std::is_same<decltype(logb((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(logb((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(logb((int)0)), double>::value), ""); static_assert((std::is_same<decltype(logb((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(logb((long)0)), double>::value), ""); static_assert((std::is_same<decltype(logb((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(logb((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(logb((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(logb((double)0)), double>::value), ""); static_assert((std::is_same<decltype(logbf(0)), float>::value), ""); static_assert((std::is_same<decltype(logb(Ambiguous())), Ambiguous>::value), ""); assert(logb(V(1)) == 0); assert(logb(V(1.)) == 0); assert(logb(V(1.f)) == 0); } __device__ void test_lrint() { static_assert((std::is_same<decltype(lrint((float)0)), long>::value), ""); static_assert((std::is_same<decltype(lrint((bool)0)), long>::value), ""); static_assert((std::is_same<decltype(lrint((unsigned short)0)), long>::value), ""); static_assert((std::is_same<decltype(lrint((int)0)), long>::value), ""); static_assert((std::is_same<decltype(lrint((unsigned int)0)), long>::value), ""); static_assert((std::is_same<decltype(lrint((long)0)), long>::value), ""); static_assert((std::is_same<decltype(lrint((unsigned long)0)), long>::value), ""); static_assert((std::is_same<decltype(lrint((long long)0)), long>::value), ""); static_assert((std::is_same<decltype(lrint((unsigned long long)0)), long>::value), ""); static_assert((std::is_same<decltype(lrint((double)0)), long>::value), ""); static_assert((std::is_same<decltype(lrintf(0)), long>::value), ""); static_assert((std::is_same<decltype(lrint(Ambiguous())), Ambiguous>::value), ""); assert(lrint(V(1)) == 1L); assert(lrint(V(1.)) == 1L); #if CUDA_VERSION > 7050 assert(lrint(V(1.f)) == 1L); #endif } __device__ void test_lround() { static_assert((std::is_same<decltype(lround((float)0)), long>::value), ""); static_assert((std::is_same<decltype(lround((bool)0)), long>::value), ""); static_assert((std::is_same<decltype(lround((unsigned short)0)), long>::value), ""); static_assert((std::is_same<decltype(lround((int)0)), long>::value), ""); static_assert((std::is_same<decltype(lround((unsigned int)0)), long>::value), ""); static_assert((std::is_same<decltype(lround((long)0)), long>::value), ""); static_assert((std::is_same<decltype(lround((unsigned long)0)), long>::value), ""); static_assert((std::is_same<decltype(lround((long long)0)), long>::value), ""); static_assert((std::is_same<decltype(lround((unsigned long long)0)), long>::value), ""); static_assert((std::is_same<decltype(lround((double)0)), long>::value), ""); static_assert((std::is_same<decltype(lroundf(0)), long>::value), ""); static_assert((std::is_same<decltype(lround(Ambiguous())), Ambiguous>::value), ""); assert(lround(V(1)) == 1L); assert(lround(V(1.)) == 1L); assert(lround(V(1.f)) == 1L); } __device__ void test_nan() { static_assert((std::is_same<decltype(nan("")), double>::value), ""); static_assert((std::is_same<decltype(nanf("")), float>::value), ""); } __device__ void test_nearbyint() { static_assert((std::is_same<decltype(nearbyint((float)0)), float>::value), ""); static_assert((std::is_same<decltype(nearbyint((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(nearbyint((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(nearbyint((int)0)), double>::value), ""); static_assert((std::is_same<decltype(nearbyint((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(nearbyint((long)0)), double>::value), ""); static_assert((std::is_same<decltype(nearbyint((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(nearbyint((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(nearbyint((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(nearbyint((double)0)), double>::value), ""); static_assert((std::is_same<decltype(nearbyintf(0)), float>::value), ""); static_assert((std::is_same<decltype(nearbyint(Ambiguous())), Ambiguous>::value), ""); assert(nearbyint(V(1)) == 1); assert(nearbyint(V(1.)) == 1); assert(nearbyint(V(1.f)) == 1); // There are more checks in test_rint(). rint and nearbyint behave the same // way on the GPU, so we only test them in one place. } __device__ void test_nextafter() { static_assert((std::is_same<decltype(nextafter((float)0, (float)0)), float>::value), ""); static_assert((std::is_same<decltype(nextafter((bool)0, (float)0)), double>::value), ""); static_assert((std::is_same<decltype(nextafter((unsigned short)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(nextafter((float)0, (unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(nextafter((double)0, (long)0)), double>::value), ""); static_assert((std::is_same<decltype(nextafter((int)0, (long long)0)), double>::value), ""); static_assert((std::is_same<decltype(nextafter((int)0, (unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(nextafter((double)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(nextafter((float)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(nextafterf(0,0)), float>::value), ""); static_assert((std::is_same<decltype(nextafter((int)0, (int)0)), double>::value), ""); static_assert((std::is_same<decltype(nextafter(Ambiguous(), Ambiguous())), Ambiguous>::value), ""); //assert(nextafter(0,1) == hexfloat<double>(0x1, 0, -1074)); // Invoke all our overloads. Even though we don't check the exact result // (this is pretty annoying to do for this function), we make sure to *use* // the results so that these function calls can't be DCE'ed. assert(nextafter(V(0), 1) != 0); assert(nextafter(V(0), 1.) != 0); assert(nextafter(V(0), 1.f) != 0); assert(nextafter(V(0.), 1) != 0); assert(nextafter(V(0.), 1.) != 0); assert(nextafter(V(0.), 1.f) != 0); assert(nextafter(V(0.f), 1) != 0); assert(nextafter(V(0.f), 1.) != 0); assert(nextafter(V(0.f), 1.f) != 0); } __device__ void test_remainder() { static_assert((std::is_same<decltype(remainder((float)0, (float)0)), float>::value), ""); static_assert((std::is_same<decltype(remainder((bool)0, (float)0)), double>::value), ""); static_assert((std::is_same<decltype(remainder((unsigned short)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(remainder((float)0, (unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(remainder((double)0, (long)0)), double>::value), ""); static_assert((std::is_same<decltype(remainder((int)0, (long long)0)), double>::value), ""); static_assert((std::is_same<decltype(remainder((int)0, (unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(remainder((double)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(remainder((float)0, (double)0)), double>::value), ""); static_assert((std::is_same<decltype(remainderf(0,0)), float>::value), ""); static_assert((std::is_same<decltype(remainder((int)0, (int)0)), double>::value), ""); static_assert((std::is_same<decltype(remainder(Ambiguous(), Ambiguous())), Ambiguous>::value), ""); assert(std::remainder(V(1.5), 1) == -.5); assert(std::remainder(V(1.5), 1.) == -.5); assert(std::remainder(V(1.5), 1.f) == -.5); assert(std::remainder(V(1.5f), 1) == -.5); assert(std::remainder(V(1.5f), 1.) == -.5); assert(std::remainder(V(1.5f), 1.f) == -.5); assert(std::remainder(V(2), 1) == 0); assert(std::remainder(V(2), 1.) == 0); assert(std::remainder(V(2), 1.f) == 0); } __device__ void test_remquo() { int ip; static_assert((std::is_same<decltype(remquo((float)0, (float)0, &ip)), float>::value), ""); static_assert((std::is_same<decltype(remquo((bool)0, (float)0, &ip)), double>::value), ""); static_assert((std::is_same<decltype(remquo((unsigned short)0, (double)0, &ip)), double>::value), ""); static_assert((std::is_same<decltype(remquo((float)0, (unsigned int)0, &ip)), double>::value), ""); static_assert((std::is_same<decltype(remquo((double)0, (long)0, &ip)), double>::value), ""); static_assert((std::is_same<decltype(remquo((int)0, (long long)0, &ip)), double>::value), ""); static_assert((std::is_same<decltype(remquo((int)0, (unsigned long long)0, &ip)), double>::value), ""); static_assert((std::is_same<decltype(remquo((double)0, (double)0, &ip)), double>::value), ""); static_assert((std::is_same<decltype(remquo((float)0, (double)0, &ip)), double>::value), ""); static_assert((std::is_same<decltype(remquof(0,0, &ip)), float>::value), ""); static_assert((std::is_same<decltype(remquo((int)0, (int)0, &ip)), double>::value), ""); static_assert((std::is_same<decltype(remquo(Ambiguous(), Ambiguous(), &ip)), Ambiguous>::value), ""); assert(std::remquo(V(1), 1, &ip) == 0); assert(std::remquo(V(1), 1., &ip) == 0); assert(std::remquo(V(1), 1.f, &ip) == 0); assert(std::remquo(V(0.5), 1, &ip) == 0.5); assert(std::remquo(V(0.5), 1., &ip) == 0.5); assert(std::remquo(V(0.5), 1.f, &ip) == 0.5); assert(std::remquo(V(0.5f), 1, &ip) == 0.5); assert(std::remquo(V(0.5f), 1., &ip) == 0.5); assert(std::remquo(V(0.5f), 1.f, &ip) == 0.5); } __device__ void test_rint_nearbyint() { static_assert((std::is_same<decltype(rint((float)0)), float>::value), ""); static_assert((std::is_same<decltype(rint((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(rint((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(rint((int)0)), double>::value), ""); static_assert((std::is_same<decltype(rint((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(rint((long)0)), double>::value), ""); static_assert((std::is_same<decltype(rint((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(rint((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(rint((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(rint((double)0)), double>::value), ""); static_assert((std::is_same<decltype(rintf(0)), float>::value), ""); static_assert((std::is_same<decltype(rint(Ambiguous())), Ambiguous>::value), ""); // Verify that rint/nearbyint produce identical correct results auto check = [](double input, double fpresult) { // FP rint()/nearbyint must match the expected result. assert(rint(V(float(input))) == float(fpresult)); assert(nearbyint(V(float(input))) == float(fpresult)); assert(rint(V(input)) == fpresult); assert(nearbyint(V(input)) == fpresult); // for integral types, std::rint(input) == std::rint(double(input)) int iinput = input; assert(std::rint(V(iinput)) == std::rint(double(V(iinput)))); assert(std::nearbyint(V(iinput)) == std::nearbyint(double(V(iinput)))); }; // Whole values round to themselves and do not change sign. check(0.0, 0.0); check(-0.0, -0.0); check(1.0, 1.0); check(-1.0, -1.0); // Half-way values round towards nearest even number. check(2.5, 2.0); check(-2.5, -2.0); check(3.5, 4.0); check(-3.5, -4.0); // Everything else is rounded towards nearest integer. check(2.1, 2.0); check(-2.1, -2.0); check(2.7, 3.0); check(-2.7, -3.0); check(3.9, 4.0); check(-3.9, -4.0); } __device__ void test_round() { static_assert((std::is_same<decltype(round((float)0)), float>::value), ""); static_assert((std::is_same<decltype(round((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(round((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(round((int)0)), double>::value), ""); static_assert((std::is_same<decltype(round((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(round((long)0)), double>::value), ""); static_assert((std::is_same<decltype(round((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(round((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(round((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(round((double)0)), double>::value), ""); static_assert((std::is_same<decltype(roundf(0)), float>::value), ""); static_assert((std::is_same<decltype(round(Ambiguous())), Ambiguous>::value), ""); assert(round(V(1)) == 1); assert(round(V(1.)) == 1); assert(round(V(1.f)) == 1); } __device__ void test_scalbln() { static_assert((std::is_same<decltype(scalbln((float)0, (long)0)), float>::value), ""); static_assert((std::is_same<decltype(scalbln((bool)0, (long)0)), double>::value), ""); static_assert((std::is_same<decltype(scalbln((unsigned short)0, (long)0)), double>::value), ""); static_assert((std::is_same<decltype(scalbln((int)0, (long)0)), double>::value), ""); static_assert((std::is_same<decltype(scalbln((unsigned int)0, (long)0)), double>::value), ""); static_assert((std::is_same<decltype(scalbln((long)0, (long)0)), double>::value), ""); static_assert((std::is_same<decltype(scalbln((unsigned long)0, (long)0)), double>::value), ""); static_assert((std::is_same<decltype(scalbln((long long)0, (long)0)), double>::value), ""); static_assert((std::is_same<decltype(scalbln((unsigned long long)0, (long)0)), double>::value), ""); static_assert((std::is_same<decltype(scalbln((double)0, (long)0)), double>::value), ""); static_assert((std::is_same<decltype(scalblnf(0, (long)0)), float>::value), ""); static_assert((std::is_same<decltype(scalbln(Ambiguous(), Ambiguous())), Ambiguous>::value), ""); assert(std::scalbln(V(1), 1) == 2); assert(std::scalbln(V(1), 1.) == 2); assert(std::scalbln(V(1), 1.f) == 2); assert(std::scalbln(V(1.), 1) == 2); assert(std::scalbln(V(1.), 1.) == 2); assert(std::scalbln(V(1.), 1.f) == 2); assert(std::scalbln(V(1.f), 1) == 2); assert(std::scalbln(V(1.f), 1.) == 2); assert(std::scalbln(V(1.f), 1.f) == 2); } __device__ void test_scalbn() { static_assert((std::is_same<decltype(scalbn((float)0, (int)0)), float>::value), ""); static_assert((std::is_same<decltype(scalbn((bool)0, (int)0)), double>::value), ""); static_assert((std::is_same<decltype(scalbn((unsigned short)0, (int)0)), double>::value), ""); static_assert((std::is_same<decltype(scalbn((int)0, (int)0)), double>::value), ""); static_assert((std::is_same<decltype(scalbn((unsigned int)0, (int)0)), double>::value), ""); static_assert((std::is_same<decltype(scalbn((long)0, (int)0)), double>::value), ""); static_assert((std::is_same<decltype(scalbn((unsigned long)0, (int)0)), double>::value), ""); static_assert((std::is_same<decltype(scalbn((long long)0, (int)0)), double>::value), ""); static_assert((std::is_same<decltype(scalbn((unsigned long long)0, (int)0)), double>::value), ""); static_assert((std::is_same<decltype(scalbn((double)0, (int)0)), double>::value), ""); static_assert((std::is_same<decltype(scalbnf(0, (int)0)), float>::value), ""); static_assert((std::is_same<decltype(scalbn(Ambiguous(), Ambiguous())), Ambiguous>::value), ""); assert(std::scalbn(V(1), 1) == 2); assert(std::scalbn(V(1), 1.) == 2); assert(std::scalbn(V(1), 1.f) == 2); assert(std::scalbn(V(1.), 1) == 2); assert(std::scalbn(V(1.), 1.) == 2); assert(std::scalbn(V(1.), 1.f) == 2); assert(std::scalbn(V(1.f), 1) == 2); assert(std::scalbn(V(1.f), 1.) == 2); assert(std::scalbn(V(1.f), 1.f) == 2); } __device__ void test_tgamma() { static_assert((std::is_same<decltype(tgamma((float)0)), float>::value), ""); static_assert((std::is_same<decltype(tgamma((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(tgamma((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(tgamma((int)0)), double>::value), ""); static_assert((std::is_same<decltype(tgamma((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(tgamma((long)0)), double>::value), ""); static_assert((std::is_same<decltype(tgamma((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(tgamma((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(tgamma((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(tgamma((double)0)), double>::value), ""); static_assert((std::is_same<decltype(tgammaf(0)), float>::value), ""); static_assert((std::is_same<decltype(tgamma(Ambiguous())), Ambiguous>::value), ""); assert(tgamma(V(1)) == 1); assert(tgamma(V(1.)) == 1); assert(tgamma(V(1.f)) == 1); } __device__ void test_trunc() { static_assert((std::is_same<decltype(trunc((float)0)), float>::value), ""); static_assert((std::is_same<decltype(trunc((bool)0)), double>::value), ""); static_assert((std::is_same<decltype(trunc((unsigned short)0)), double>::value), ""); static_assert((std::is_same<decltype(trunc((int)0)), double>::value), ""); static_assert((std::is_same<decltype(trunc((unsigned int)0)), double>::value), ""); static_assert((std::is_same<decltype(trunc((long)0)), double>::value), ""); static_assert((std::is_same<decltype(trunc((unsigned long)0)), double>::value), ""); static_assert((std::is_same<decltype(trunc((long long)0)), double>::value), ""); static_assert((std::is_same<decltype(trunc((unsigned long long)0)), double>::value), ""); static_assert((std::is_same<decltype(trunc((double)0)), double>::value), ""); static_assert((std::is_same<decltype(truncf(0)), float>::value), ""); static_assert((std::is_same<decltype(trunc(Ambiguous())), Ambiguous>::value), ""); assert(trunc(V(1)) == 1); assert(trunc(V(1.)) == 1); assert(trunc(V(1.f)) == 1); } __global__ void tests() { test_abs(); test_acos(); test_asin(); test_atan(); test_atan2(); test_ceil(); test_cos(); test_cosh(); test_exp(); test_fabs(); test_floor(); test_fmod(); test_frexp(); test_ldexp(); test_log(); test_log10(); test_modf(); test_pow(); test_sin(); test_sinh(); test_sqrt(); test_tan(); test_tanh(); test_signbit(); test_fpclassify(); test_isfinite(); test_isnormal(); test_isgreater(); test_isgreaterequal(); test_isinf(); test_isless(); test_islessequal(); test_islessgreater(); test_isnan(); test_isunordered(); test_acosh(); test_asinh(); test_atanh(); test_cbrt(); test_copysign(); test_erf(); test_erfc(); test_exp2(); test_expm1(); test_fdim(); test_fma(); test_fmax(); test_fmin(); test_hypot(); test_ilogb(); test_lgamma(); test_llrint(); test_llround(); test_log1p(); test_log2(); test_logb(); test_lrint(); test_lround(); test_nan(); test_nearbyint(); test_nextafter(); test_remainder(); test_remquo(); test_rint_nearbyint(); test_round(); test_scalbln(); test_scalbn(); test_tgamma(); test_trunc(); } int main() { tests<<<1,1>>>(); cudaError_t err = cudaDeviceSynchronize(); if (err != cudaSuccess) { printf("CUDA error %d\n", (int)err); return 1; } printf("Success!\n"); return 0; } #else #include <stdio.h> // No C++11; test is a nop. int main() { printf("Success!\n"); return 0; } #endif // __cplusplus < 201103L
the_stack